X86ISelDAGToDAG.cpp revision 37ed9c199ca639565f6ce88105f9e39e898d82d0
1//===- X86ISelDAGToDAG.cpp - A DAG pattern matching inst selector for X86 -===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines a DAG pattern matching instruction selector for X86, 11// converting from a legalized dag to a X86 dag. 12// 13//===----------------------------------------------------------------------===// 14 15#include "X86.h" 16#include "X86InstrBuilder.h" 17#include "X86MachineFunctionInfo.h" 18#include "X86RegisterInfo.h" 19#include "X86Subtarget.h" 20#include "X86TargetMachine.h" 21#include "llvm/ADT/Statistic.h" 22#include "llvm/CodeGen/MachineFrameInfo.h" 23#include "llvm/CodeGen/MachineFunction.h" 24#include "llvm/CodeGen/MachineInstrBuilder.h" 25#include "llvm/CodeGen/MachineRegisterInfo.h" 26#include "llvm/CodeGen/SelectionDAGISel.h" 27#include "llvm/IR/Function.h" 28#include "llvm/IR/Instructions.h" 29#include "llvm/IR/Intrinsics.h" 30#include "llvm/IR/Type.h" 31#include "llvm/Support/Debug.h" 32#include "llvm/Support/ErrorHandling.h" 33#include "llvm/Support/MathExtras.h" 34#include "llvm/Support/raw_ostream.h" 35#include "llvm/Target/TargetMachine.h" 36#include "llvm/Target/TargetOptions.h" 37#include <stdint.h> 38using namespace llvm; 39 40#define DEBUG_TYPE "x86-isel" 41 42STATISTIC(NumLoadMoved, "Number of loads moved below TokenFactor"); 43 44//===----------------------------------------------------------------------===// 45// Pattern Matcher Implementation 46//===----------------------------------------------------------------------===// 47 48namespace { 49 /// X86ISelAddressMode - This corresponds to X86AddressMode, but uses 50 /// SDValue's instead of register numbers for the leaves of the matched 51 /// tree. 52 struct X86ISelAddressMode { 53 enum { 54 RegBase, 55 FrameIndexBase 56 } BaseType; 57 58 // This is really a union, discriminated by BaseType! 59 SDValue Base_Reg; 60 int Base_FrameIndex; 61 62 unsigned Scale; 63 SDValue IndexReg; 64 int32_t Disp; 65 SDValue Segment; 66 const GlobalValue *GV; 67 const Constant *CP; 68 const BlockAddress *BlockAddr; 69 const char *ES; 70 int JT; 71 unsigned Align; // CP alignment. 72 unsigned char SymbolFlags; // X86II::MO_* 73 74 X86ISelAddressMode() 75 : BaseType(RegBase), Base_FrameIndex(0), Scale(1), IndexReg(), Disp(0), 76 Segment(), GV(nullptr), CP(nullptr), BlockAddr(nullptr), ES(nullptr), 77 JT(-1), Align(0), SymbolFlags(X86II::MO_NO_FLAG) { 78 } 79 80 bool hasSymbolicDisplacement() const { 81 return GV != nullptr || CP != nullptr || ES != nullptr || 82 JT != -1 || BlockAddr != nullptr; 83 } 84 85 bool hasBaseOrIndexReg() const { 86 return BaseType == FrameIndexBase || 87 IndexReg.getNode() != nullptr || Base_Reg.getNode() != nullptr; 88 } 89 90 /// isRIPRelative - Return true if this addressing mode is already RIP 91 /// relative. 92 bool isRIPRelative() const { 93 if (BaseType != RegBase) return false; 94 if (RegisterSDNode *RegNode = 95 dyn_cast_or_null<RegisterSDNode>(Base_Reg.getNode())) 96 return RegNode->getReg() == X86::RIP; 97 return false; 98 } 99 100 void setBaseReg(SDValue Reg) { 101 BaseType = RegBase; 102 Base_Reg = Reg; 103 } 104 105#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 106 void dump() { 107 dbgs() << "X86ISelAddressMode " << this << '\n'; 108 dbgs() << "Base_Reg "; 109 if (Base_Reg.getNode()) 110 Base_Reg.getNode()->dump(); 111 else 112 dbgs() << "nul"; 113 dbgs() << " Base.FrameIndex " << Base_FrameIndex << '\n' 114 << " Scale" << Scale << '\n' 115 << "IndexReg "; 116 if (IndexReg.getNode()) 117 IndexReg.getNode()->dump(); 118 else 119 dbgs() << "nul"; 120 dbgs() << " Disp " << Disp << '\n' 121 << "GV "; 122 if (GV) 123 GV->dump(); 124 else 125 dbgs() << "nul"; 126 dbgs() << " CP "; 127 if (CP) 128 CP->dump(); 129 else 130 dbgs() << "nul"; 131 dbgs() << '\n' 132 << "ES "; 133 if (ES) 134 dbgs() << ES; 135 else 136 dbgs() << "nul"; 137 dbgs() << " JT" << JT << " Align" << Align << '\n'; 138 } 139#endif 140 }; 141} 142 143namespace { 144 //===--------------------------------------------------------------------===// 145 /// ISel - X86 specific code to select X86 machine instructions for 146 /// SelectionDAG operations. 147 /// 148 class X86DAGToDAGISel final : public SelectionDAGISel { 149 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can 150 /// make the right decision when generating code for different targets. 151 const X86Subtarget *Subtarget; 152 153 /// OptForSize - If true, selector should try to optimize for code size 154 /// instead of performance. 155 bool OptForSize; 156 157 public: 158 explicit X86DAGToDAGISel(X86TargetMachine &tm, CodeGenOpt::Level OptLevel) 159 : SelectionDAGISel(tm, OptLevel), 160 Subtarget(&tm.getSubtarget<X86Subtarget>()), 161 OptForSize(false) {} 162 163 const char *getPassName() const override { 164 return "X86 DAG->DAG Instruction Selection"; 165 } 166 167 bool runOnMachineFunction(MachineFunction &MF) override { 168 // Reset the subtarget each time through. 169 Subtarget = &TM.getSubtarget<X86Subtarget>(); 170 SelectionDAGISel::runOnMachineFunction(MF); 171 return true; 172 } 173 174 void EmitFunctionEntryCode() override; 175 176 bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const override; 177 178 void PreprocessISelDAG() override; 179 180 inline bool immSext8(SDNode *N) const { 181 return isInt<8>(cast<ConstantSDNode>(N)->getSExtValue()); 182 } 183 184 // i64immSExt32 predicate - True if the 64-bit immediate fits in a 32-bit 185 // sign extended field. 186 inline bool i64immSExt32(SDNode *N) const { 187 uint64_t v = cast<ConstantSDNode>(N)->getZExtValue(); 188 return (int64_t)v == (int32_t)v; 189 } 190 191// Include the pieces autogenerated from the target description. 192#include "X86GenDAGISel.inc" 193 194 private: 195 SDNode *Select(SDNode *N) override; 196 SDNode *SelectGather(SDNode *N, unsigned Opc); 197 SDNode *SelectAtomicLoadArith(SDNode *Node, MVT NVT); 198 199 bool FoldOffsetIntoAddress(uint64_t Offset, X86ISelAddressMode &AM); 200 bool MatchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM); 201 bool MatchWrapper(SDValue N, X86ISelAddressMode &AM); 202 bool MatchAddress(SDValue N, X86ISelAddressMode &AM); 203 bool MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM, 204 unsigned Depth); 205 bool MatchAddressBase(SDValue N, X86ISelAddressMode &AM); 206 bool SelectAddr(SDNode *Parent, SDValue N, SDValue &Base, 207 SDValue &Scale, SDValue &Index, SDValue &Disp, 208 SDValue &Segment); 209 bool SelectMOV64Imm32(SDValue N, SDValue &Imm); 210 bool SelectLEAAddr(SDValue N, SDValue &Base, 211 SDValue &Scale, SDValue &Index, SDValue &Disp, 212 SDValue &Segment); 213 bool SelectLEA64_32Addr(SDValue N, SDValue &Base, 214 SDValue &Scale, SDValue &Index, SDValue &Disp, 215 SDValue &Segment); 216 bool SelectTLSADDRAddr(SDValue N, SDValue &Base, 217 SDValue &Scale, SDValue &Index, SDValue &Disp, 218 SDValue &Segment); 219 bool SelectScalarSSELoad(SDNode *Root, SDValue N, 220 SDValue &Base, SDValue &Scale, 221 SDValue &Index, SDValue &Disp, 222 SDValue &Segment, 223 SDValue &NodeWithChain); 224 225 bool TryFoldLoad(SDNode *P, SDValue N, 226 SDValue &Base, SDValue &Scale, 227 SDValue &Index, SDValue &Disp, 228 SDValue &Segment); 229 230 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for 231 /// inline asm expressions. 232 bool SelectInlineAsmMemoryOperand(const SDValue &Op, 233 char ConstraintCode, 234 std::vector<SDValue> &OutOps) override; 235 236 void EmitSpecialCodeForMain(MachineBasicBlock *BB, MachineFrameInfo *MFI); 237 238 inline void getAddressOperands(X86ISelAddressMode &AM, SDValue &Base, 239 SDValue &Scale, SDValue &Index, 240 SDValue &Disp, SDValue &Segment) { 241 Base = (AM.BaseType == X86ISelAddressMode::FrameIndexBase) 242 ? CurDAG->getTargetFrameIndex(AM.Base_FrameIndex, 243 TLI->getPointerTy()) 244 : AM.Base_Reg; 245 Scale = getI8Imm(AM.Scale); 246 Index = AM.IndexReg; 247 // These are 32-bit even in 64-bit mode since RIP relative offset 248 // is 32-bit. 249 if (AM.GV) 250 Disp = CurDAG->getTargetGlobalAddress(AM.GV, SDLoc(), 251 MVT::i32, AM.Disp, 252 AM.SymbolFlags); 253 else if (AM.CP) 254 Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i32, 255 AM.Align, AM.Disp, AM.SymbolFlags); 256 else if (AM.ES) { 257 assert(!AM.Disp && "Non-zero displacement is ignored with ES."); 258 Disp = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i32, AM.SymbolFlags); 259 } else if (AM.JT != -1) { 260 assert(!AM.Disp && "Non-zero displacement is ignored with JT."); 261 Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i32, AM.SymbolFlags); 262 } else if (AM.BlockAddr) 263 Disp = CurDAG->getTargetBlockAddress(AM.BlockAddr, MVT::i32, AM.Disp, 264 AM.SymbolFlags); 265 else 266 Disp = CurDAG->getTargetConstant(AM.Disp, MVT::i32); 267 268 if (AM.Segment.getNode()) 269 Segment = AM.Segment; 270 else 271 Segment = CurDAG->getRegister(0, MVT::i32); 272 } 273 274 /// getI8Imm - Return a target constant with the specified value, of type 275 /// i8. 276 inline SDValue getI8Imm(unsigned Imm) { 277 return CurDAG->getTargetConstant(Imm, MVT::i8); 278 } 279 280 /// getI32Imm - Return a target constant with the specified value, of type 281 /// i32. 282 inline SDValue getI32Imm(unsigned Imm) { 283 return CurDAG->getTargetConstant(Imm, MVT::i32); 284 } 285 286 /// getGlobalBaseReg - Return an SDNode that returns the value of 287 /// the global base register. Output instructions required to 288 /// initialize the global base register, if necessary. 289 /// 290 SDNode *getGlobalBaseReg(); 291 292 /// getTargetMachine - Return a reference to the TargetMachine, casted 293 /// to the target-specific type. 294 const X86TargetMachine &getTargetMachine() const { 295 return static_cast<const X86TargetMachine &>(TM); 296 } 297 298 /// getInstrInfo - Return a reference to the TargetInstrInfo, casted 299 /// to the target-specific type. 300 const X86InstrInfo *getInstrInfo() const { 301 return getTargetMachine().getSubtargetImpl()->getInstrInfo(); 302 } 303 304 /// \brief Address-mode matching performs shift-of-and to and-of-shift 305 /// reassociation in order to expose more scaled addressing 306 /// opportunities. 307 bool ComplexPatternFuncMutatesDAG() const override { 308 return true; 309 } 310 }; 311} 312 313 314bool 315X86DAGToDAGISel::IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const { 316 if (OptLevel == CodeGenOpt::None) return false; 317 318 if (!N.hasOneUse()) 319 return false; 320 321 if (N.getOpcode() != ISD::LOAD) 322 return true; 323 324 // If N is a load, do additional profitability checks. 325 if (U == Root) { 326 switch (U->getOpcode()) { 327 default: break; 328 case X86ISD::ADD: 329 case X86ISD::SUB: 330 case X86ISD::AND: 331 case X86ISD::XOR: 332 case X86ISD::OR: 333 case ISD::ADD: 334 case ISD::ADDC: 335 case ISD::ADDE: 336 case ISD::AND: 337 case ISD::OR: 338 case ISD::XOR: { 339 SDValue Op1 = U->getOperand(1); 340 341 // If the other operand is a 8-bit immediate we should fold the immediate 342 // instead. This reduces code size. 343 // e.g. 344 // movl 4(%esp), %eax 345 // addl $4, %eax 346 // vs. 347 // movl $4, %eax 348 // addl 4(%esp), %eax 349 // The former is 2 bytes shorter. In case where the increment is 1, then 350 // the saving can be 4 bytes (by using incl %eax). 351 if (ConstantSDNode *Imm = dyn_cast<ConstantSDNode>(Op1)) 352 if (Imm->getAPIntValue().isSignedIntN(8)) 353 return false; 354 355 // If the other operand is a TLS address, we should fold it instead. 356 // This produces 357 // movl %gs:0, %eax 358 // leal i@NTPOFF(%eax), %eax 359 // instead of 360 // movl $i@NTPOFF, %eax 361 // addl %gs:0, %eax 362 // if the block also has an access to a second TLS address this will save 363 // a load. 364 // FIXME: This is probably also true for non-TLS addresses. 365 if (Op1.getOpcode() == X86ISD::Wrapper) { 366 SDValue Val = Op1.getOperand(0); 367 if (Val.getOpcode() == ISD::TargetGlobalTLSAddress) 368 return false; 369 } 370 } 371 } 372 } 373 374 return true; 375} 376 377/// MoveBelowCallOrigChain - Replace the original chain operand of the call with 378/// load's chain operand and move load below the call's chain operand. 379static void MoveBelowOrigChain(SelectionDAG *CurDAG, SDValue Load, 380 SDValue Call, SDValue OrigChain) { 381 SmallVector<SDValue, 8> Ops; 382 SDValue Chain = OrigChain.getOperand(0); 383 if (Chain.getNode() == Load.getNode()) 384 Ops.push_back(Load.getOperand(0)); 385 else { 386 assert(Chain.getOpcode() == ISD::TokenFactor && 387 "Unexpected chain operand"); 388 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i) 389 if (Chain.getOperand(i).getNode() == Load.getNode()) 390 Ops.push_back(Load.getOperand(0)); 391 else 392 Ops.push_back(Chain.getOperand(i)); 393 SDValue NewChain = 394 CurDAG->getNode(ISD::TokenFactor, SDLoc(Load), MVT::Other, Ops); 395 Ops.clear(); 396 Ops.push_back(NewChain); 397 } 398 for (unsigned i = 1, e = OrigChain.getNumOperands(); i != e; ++i) 399 Ops.push_back(OrigChain.getOperand(i)); 400 CurDAG->UpdateNodeOperands(OrigChain.getNode(), Ops); 401 CurDAG->UpdateNodeOperands(Load.getNode(), Call.getOperand(0), 402 Load.getOperand(1), Load.getOperand(2)); 403 404 unsigned NumOps = Call.getNode()->getNumOperands(); 405 Ops.clear(); 406 Ops.push_back(SDValue(Load.getNode(), 1)); 407 for (unsigned i = 1, e = NumOps; i != e; ++i) 408 Ops.push_back(Call.getOperand(i)); 409 CurDAG->UpdateNodeOperands(Call.getNode(), Ops); 410} 411 412/// isCalleeLoad - Return true if call address is a load and it can be 413/// moved below CALLSEQ_START and the chains leading up to the call. 414/// Return the CALLSEQ_START by reference as a second output. 415/// In the case of a tail call, there isn't a callseq node between the call 416/// chain and the load. 417static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) { 418 // The transformation is somewhat dangerous if the call's chain was glued to 419 // the call. After MoveBelowOrigChain the load is moved between the call and 420 // the chain, this can create a cycle if the load is not folded. So it is 421 // *really* important that we are sure the load will be folded. 422 if (Callee.getNode() == Chain.getNode() || !Callee.hasOneUse()) 423 return false; 424 LoadSDNode *LD = dyn_cast<LoadSDNode>(Callee.getNode()); 425 if (!LD || 426 LD->isVolatile() || 427 LD->getAddressingMode() != ISD::UNINDEXED || 428 LD->getExtensionType() != ISD::NON_EXTLOAD) 429 return false; 430 431 // Now let's find the callseq_start. 432 while (HasCallSeq && Chain.getOpcode() != ISD::CALLSEQ_START) { 433 if (!Chain.hasOneUse()) 434 return false; 435 Chain = Chain.getOperand(0); 436 } 437 438 if (!Chain.getNumOperands()) 439 return false; 440 // Since we are not checking for AA here, conservatively abort if the chain 441 // writes to memory. It's not safe to move the callee (a load) across a store. 442 if (isa<MemSDNode>(Chain.getNode()) && 443 cast<MemSDNode>(Chain.getNode())->writeMem()) 444 return false; 445 if (Chain.getOperand(0).getNode() == Callee.getNode()) 446 return true; 447 if (Chain.getOperand(0).getOpcode() == ISD::TokenFactor && 448 Callee.getValue(1).isOperandOf(Chain.getOperand(0).getNode()) && 449 Callee.getValue(1).hasOneUse()) 450 return true; 451 return false; 452} 453 454void X86DAGToDAGISel::PreprocessISelDAG() { 455 // OptForSize is used in pattern predicates that isel is matching. 456 OptForSize = MF->getFunction()->getAttributes(). 457 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize); 458 459 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(), 460 E = CurDAG->allnodes_end(); I != E; ) { 461 SDNode *N = I++; // Preincrement iterator to avoid invalidation issues. 462 463 if (OptLevel != CodeGenOpt::None && 464 // Only does this when target favors doesn't favor register indirect 465 // call. 466 ((N->getOpcode() == X86ISD::CALL && !Subtarget->callRegIndirect()) || 467 (N->getOpcode() == X86ISD::TC_RETURN && 468 // Only does this if load can be folded into TC_RETURN. 469 (Subtarget->is64Bit() || 470 getTargetMachine().getRelocationModel() != Reloc::PIC_)))) { 471 /// Also try moving call address load from outside callseq_start to just 472 /// before the call to allow it to be folded. 473 /// 474 /// [Load chain] 475 /// ^ 476 /// | 477 /// [Load] 478 /// ^ ^ 479 /// | | 480 /// / \-- 481 /// / | 482 ///[CALLSEQ_START] | 483 /// ^ | 484 /// | | 485 /// [LOAD/C2Reg] | 486 /// | | 487 /// \ / 488 /// \ / 489 /// [CALL] 490 bool HasCallSeq = N->getOpcode() == X86ISD::CALL; 491 SDValue Chain = N->getOperand(0); 492 SDValue Load = N->getOperand(1); 493 if (!isCalleeLoad(Load, Chain, HasCallSeq)) 494 continue; 495 MoveBelowOrigChain(CurDAG, Load, SDValue(N, 0), Chain); 496 ++NumLoadMoved; 497 continue; 498 } 499 500 // Lower fpround and fpextend nodes that target the FP stack to be store and 501 // load to the stack. This is a gross hack. We would like to simply mark 502 // these as being illegal, but when we do that, legalize produces these when 503 // it expands calls, then expands these in the same legalize pass. We would 504 // like dag combine to be able to hack on these between the call expansion 505 // and the node legalization. As such this pass basically does "really 506 // late" legalization of these inline with the X86 isel pass. 507 // FIXME: This should only happen when not compiled with -O0. 508 if (N->getOpcode() != ISD::FP_ROUND && N->getOpcode() != ISD::FP_EXTEND) 509 continue; 510 511 MVT SrcVT = N->getOperand(0).getSimpleValueType(); 512 MVT DstVT = N->getSimpleValueType(0); 513 514 // If any of the sources are vectors, no fp stack involved. 515 if (SrcVT.isVector() || DstVT.isVector()) 516 continue; 517 518 // If the source and destination are SSE registers, then this is a legal 519 // conversion that should not be lowered. 520 const X86TargetLowering *X86Lowering = 521 static_cast<const X86TargetLowering *>(TLI); 522 bool SrcIsSSE = X86Lowering->isScalarFPTypeInSSEReg(SrcVT); 523 bool DstIsSSE = X86Lowering->isScalarFPTypeInSSEReg(DstVT); 524 if (SrcIsSSE && DstIsSSE) 525 continue; 526 527 if (!SrcIsSSE && !DstIsSSE) { 528 // If this is an FPStack extension, it is a noop. 529 if (N->getOpcode() == ISD::FP_EXTEND) 530 continue; 531 // If this is a value-preserving FPStack truncation, it is a noop. 532 if (N->getConstantOperandVal(1)) 533 continue; 534 } 535 536 // Here we could have an FP stack truncation or an FPStack <-> SSE convert. 537 // FPStack has extload and truncstore. SSE can fold direct loads into other 538 // operations. Based on this, decide what we want to do. 539 MVT MemVT; 540 if (N->getOpcode() == ISD::FP_ROUND) 541 MemVT = DstVT; // FP_ROUND must use DstVT, we can't do a 'trunc load'. 542 else 543 MemVT = SrcIsSSE ? SrcVT : DstVT; 544 545 SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT); 546 SDLoc dl(N); 547 548 // FIXME: optimize the case where the src/dest is a load or store? 549 SDValue Store = CurDAG->getTruncStore(CurDAG->getEntryNode(), dl, 550 N->getOperand(0), 551 MemTmp, MachinePointerInfo(), MemVT, 552 false, false, 0); 553 SDValue Result = CurDAG->getExtLoad(ISD::EXTLOAD, dl, DstVT, Store, MemTmp, 554 MachinePointerInfo(), 555 MemVT, false, false, false, 0); 556 557 // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the 558 // extload we created. This will cause general havok on the dag because 559 // anything below the conversion could be folded into other existing nodes. 560 // To avoid invalidating 'I', back it up to the convert node. 561 --I; 562 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result); 563 564 // Now that we did that, the node is dead. Increment the iterator to the 565 // next node to process, then delete N. 566 ++I; 567 CurDAG->DeleteNode(N); 568 } 569} 570 571 572/// EmitSpecialCodeForMain - Emit any code that needs to be executed only in 573/// the main function. 574void X86DAGToDAGISel::EmitSpecialCodeForMain(MachineBasicBlock *BB, 575 MachineFrameInfo *MFI) { 576 const TargetInstrInfo *TII = TM.getSubtargetImpl()->getInstrInfo(); 577 if (Subtarget->isTargetCygMing()) { 578 unsigned CallOp = 579 Subtarget->is64Bit() ? X86::CALL64pcrel32 : X86::CALLpcrel32; 580 BuildMI(BB, DebugLoc(), 581 TII->get(CallOp)).addExternalSymbol("__main"); 582 } 583} 584 585void X86DAGToDAGISel::EmitFunctionEntryCode() { 586 // If this is main, emit special code for main. 587 if (const Function *Fn = MF->getFunction()) 588 if (Fn->hasExternalLinkage() && Fn->getName() == "main") 589 EmitSpecialCodeForMain(MF->begin(), MF->getFrameInfo()); 590} 591 592static bool isDispSafeForFrameIndex(int64_t Val) { 593 // On 64-bit platforms, we can run into an issue where a frame index 594 // includes a displacement that, when added to the explicit displacement, 595 // will overflow the displacement field. Assuming that the frame index 596 // displacement fits into a 31-bit integer (which is only slightly more 597 // aggressive than the current fundamental assumption that it fits into 598 // a 32-bit integer), a 31-bit disp should always be safe. 599 return isInt<31>(Val); 600} 601 602bool X86DAGToDAGISel::FoldOffsetIntoAddress(uint64_t Offset, 603 X86ISelAddressMode &AM) { 604 int64_t Val = AM.Disp + Offset; 605 CodeModel::Model M = TM.getCodeModel(); 606 if (Subtarget->is64Bit()) { 607 if (!X86::isOffsetSuitableForCodeModel(Val, M, 608 AM.hasSymbolicDisplacement())) 609 return true; 610 // In addition to the checks required for a register base, check that 611 // we do not try to use an unsafe Disp with a frame index. 612 if (AM.BaseType == X86ISelAddressMode::FrameIndexBase && 613 !isDispSafeForFrameIndex(Val)) 614 return true; 615 } 616 AM.Disp = Val; 617 return false; 618 619} 620 621bool X86DAGToDAGISel::MatchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM){ 622 SDValue Address = N->getOperand(1); 623 624 // load gs:0 -> GS segment register. 625 // load fs:0 -> FS segment register. 626 // 627 // This optimization is valid because the GNU TLS model defines that 628 // gs:0 (or fs:0 on X86-64) contains its own address. 629 // For more information see http://people.redhat.com/drepper/tls.pdf 630 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Address)) 631 if (C->getSExtValue() == 0 && AM.Segment.getNode() == nullptr && 632 Subtarget->isTargetLinux()) 633 switch (N->getPointerInfo().getAddrSpace()) { 634 case 256: 635 AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16); 636 return false; 637 case 257: 638 AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16); 639 return false; 640 } 641 642 return true; 643} 644 645/// MatchWrapper - Try to match X86ISD::Wrapper and X86ISD::WrapperRIP nodes 646/// into an addressing mode. These wrap things that will resolve down into a 647/// symbol reference. If no match is possible, this returns true, otherwise it 648/// returns false. 649bool X86DAGToDAGISel::MatchWrapper(SDValue N, X86ISelAddressMode &AM) { 650 // If the addressing mode already has a symbol as the displacement, we can 651 // never match another symbol. 652 if (AM.hasSymbolicDisplacement()) 653 return true; 654 655 SDValue N0 = N.getOperand(0); 656 CodeModel::Model M = TM.getCodeModel(); 657 658 // Handle X86-64 rip-relative addresses. We check this before checking direct 659 // folding because RIP is preferable to non-RIP accesses. 660 if (Subtarget->is64Bit() && N.getOpcode() == X86ISD::WrapperRIP && 661 // Under X86-64 non-small code model, GV (and friends) are 64-bits, so 662 // they cannot be folded into immediate fields. 663 // FIXME: This can be improved for kernel and other models? 664 (M == CodeModel::Small || M == CodeModel::Kernel)) { 665 // Base and index reg must be 0 in order to use %rip as base. 666 if (AM.hasBaseOrIndexReg()) 667 return true; 668 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) { 669 X86ISelAddressMode Backup = AM; 670 AM.GV = G->getGlobal(); 671 AM.SymbolFlags = G->getTargetFlags(); 672 if (FoldOffsetIntoAddress(G->getOffset(), AM)) { 673 AM = Backup; 674 return true; 675 } 676 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) { 677 X86ISelAddressMode Backup = AM; 678 AM.CP = CP->getConstVal(); 679 AM.Align = CP->getAlignment(); 680 AM.SymbolFlags = CP->getTargetFlags(); 681 if (FoldOffsetIntoAddress(CP->getOffset(), AM)) { 682 AM = Backup; 683 return true; 684 } 685 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) { 686 AM.ES = S->getSymbol(); 687 AM.SymbolFlags = S->getTargetFlags(); 688 } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) { 689 AM.JT = J->getIndex(); 690 AM.SymbolFlags = J->getTargetFlags(); 691 } else if (BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(N0)) { 692 X86ISelAddressMode Backup = AM; 693 AM.BlockAddr = BA->getBlockAddress(); 694 AM.SymbolFlags = BA->getTargetFlags(); 695 if (FoldOffsetIntoAddress(BA->getOffset(), AM)) { 696 AM = Backup; 697 return true; 698 } 699 } else 700 llvm_unreachable("Unhandled symbol reference node."); 701 702 if (N.getOpcode() == X86ISD::WrapperRIP) 703 AM.setBaseReg(CurDAG->getRegister(X86::RIP, MVT::i64)); 704 return false; 705 } 706 707 // Handle the case when globals fit in our immediate field: This is true for 708 // X86-32 always and X86-64 when in -mcmodel=small mode. In 64-bit 709 // mode, this only applies to a non-RIP-relative computation. 710 if (!Subtarget->is64Bit() || 711 M == CodeModel::Small || M == CodeModel::Kernel) { 712 assert(N.getOpcode() != X86ISD::WrapperRIP && 713 "RIP-relative addressing already handled"); 714 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) { 715 AM.GV = G->getGlobal(); 716 AM.Disp += G->getOffset(); 717 AM.SymbolFlags = G->getTargetFlags(); 718 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) { 719 AM.CP = CP->getConstVal(); 720 AM.Align = CP->getAlignment(); 721 AM.Disp += CP->getOffset(); 722 AM.SymbolFlags = CP->getTargetFlags(); 723 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) { 724 AM.ES = S->getSymbol(); 725 AM.SymbolFlags = S->getTargetFlags(); 726 } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) { 727 AM.JT = J->getIndex(); 728 AM.SymbolFlags = J->getTargetFlags(); 729 } else if (BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(N0)) { 730 AM.BlockAddr = BA->getBlockAddress(); 731 AM.Disp += BA->getOffset(); 732 AM.SymbolFlags = BA->getTargetFlags(); 733 } else 734 llvm_unreachable("Unhandled symbol reference node."); 735 return false; 736 } 737 738 return true; 739} 740 741/// MatchAddress - Add the specified node to the specified addressing mode, 742/// returning true if it cannot be done. This just pattern matches for the 743/// addressing mode. 744bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM) { 745 if (MatchAddressRecursively(N, AM, 0)) 746 return true; 747 748 // Post-processing: Convert lea(,%reg,2) to lea(%reg,%reg), which has 749 // a smaller encoding and avoids a scaled-index. 750 if (AM.Scale == 2 && 751 AM.BaseType == X86ISelAddressMode::RegBase && 752 AM.Base_Reg.getNode() == nullptr) { 753 AM.Base_Reg = AM.IndexReg; 754 AM.Scale = 1; 755 } 756 757 // Post-processing: Convert foo to foo(%rip), even in non-PIC mode, 758 // because it has a smaller encoding. 759 // TODO: Which other code models can use this? 760 if (TM.getCodeModel() == CodeModel::Small && 761 Subtarget->is64Bit() && 762 AM.Scale == 1 && 763 AM.BaseType == X86ISelAddressMode::RegBase && 764 AM.Base_Reg.getNode() == nullptr && 765 AM.IndexReg.getNode() == nullptr && 766 AM.SymbolFlags == X86II::MO_NO_FLAG && 767 AM.hasSymbolicDisplacement()) 768 AM.Base_Reg = CurDAG->getRegister(X86::RIP, MVT::i64); 769 770 return false; 771} 772 773// Insert a node into the DAG at least before the Pos node's position. This 774// will reposition the node as needed, and will assign it a node ID that is <= 775// the Pos node's ID. Note that this does *not* preserve the uniqueness of node 776// IDs! The selection DAG must no longer depend on their uniqueness when this 777// is used. 778static void InsertDAGNode(SelectionDAG &DAG, SDValue Pos, SDValue N) { 779 if (N.getNode()->getNodeId() == -1 || 780 N.getNode()->getNodeId() > Pos.getNode()->getNodeId()) { 781 DAG.RepositionNode(Pos.getNode(), N.getNode()); 782 N.getNode()->setNodeId(Pos.getNode()->getNodeId()); 783 } 784} 785 786// Transform "(X >> (8-C1)) & (0xff << C1)" to "((X >> 8) & 0xff) << C1" if 787// safe. This allows us to convert the shift and and into an h-register 788// extract and a scaled index. Returns false if the simplification is 789// performed. 790static bool FoldMaskAndShiftToExtract(SelectionDAG &DAG, SDValue N, 791 uint64_t Mask, 792 SDValue Shift, SDValue X, 793 X86ISelAddressMode &AM) { 794 if (Shift.getOpcode() != ISD::SRL || 795 !isa<ConstantSDNode>(Shift.getOperand(1)) || 796 !Shift.hasOneUse()) 797 return true; 798 799 int ScaleLog = 8 - Shift.getConstantOperandVal(1); 800 if (ScaleLog <= 0 || ScaleLog >= 4 || 801 Mask != (0xffu << ScaleLog)) 802 return true; 803 804 MVT VT = N.getSimpleValueType(); 805 SDLoc DL(N); 806 SDValue Eight = DAG.getConstant(8, MVT::i8); 807 SDValue NewMask = DAG.getConstant(0xff, VT); 808 SDValue Srl = DAG.getNode(ISD::SRL, DL, VT, X, Eight); 809 SDValue And = DAG.getNode(ISD::AND, DL, VT, Srl, NewMask); 810 SDValue ShlCount = DAG.getConstant(ScaleLog, MVT::i8); 811 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, And, ShlCount); 812 813 // Insert the new nodes into the topological ordering. We must do this in 814 // a valid topological ordering as nothing is going to go back and re-sort 815 // these nodes. We continually insert before 'N' in sequence as this is 816 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no 817 // hierarchy left to express. 818 InsertDAGNode(DAG, N, Eight); 819 InsertDAGNode(DAG, N, Srl); 820 InsertDAGNode(DAG, N, NewMask); 821 InsertDAGNode(DAG, N, And); 822 InsertDAGNode(DAG, N, ShlCount); 823 InsertDAGNode(DAG, N, Shl); 824 DAG.ReplaceAllUsesWith(N, Shl); 825 AM.IndexReg = And; 826 AM.Scale = (1 << ScaleLog); 827 return false; 828} 829 830// Transforms "(X << C1) & C2" to "(X & (C2>>C1)) << C1" if safe and if this 831// allows us to fold the shift into this addressing mode. Returns false if the 832// transform succeeded. 833static bool FoldMaskedShiftToScaledMask(SelectionDAG &DAG, SDValue N, 834 uint64_t Mask, 835 SDValue Shift, SDValue X, 836 X86ISelAddressMode &AM) { 837 if (Shift.getOpcode() != ISD::SHL || 838 !isa<ConstantSDNode>(Shift.getOperand(1))) 839 return true; 840 841 // Not likely to be profitable if either the AND or SHIFT node has more 842 // than one use (unless all uses are for address computation). Besides, 843 // isel mechanism requires their node ids to be reused. 844 if (!N.hasOneUse() || !Shift.hasOneUse()) 845 return true; 846 847 // Verify that the shift amount is something we can fold. 848 unsigned ShiftAmt = Shift.getConstantOperandVal(1); 849 if (ShiftAmt != 1 && ShiftAmt != 2 && ShiftAmt != 3) 850 return true; 851 852 MVT VT = N.getSimpleValueType(); 853 SDLoc DL(N); 854 SDValue NewMask = DAG.getConstant(Mask >> ShiftAmt, VT); 855 SDValue NewAnd = DAG.getNode(ISD::AND, DL, VT, X, NewMask); 856 SDValue NewShift = DAG.getNode(ISD::SHL, DL, VT, NewAnd, Shift.getOperand(1)); 857 858 // Insert the new nodes into the topological ordering. We must do this in 859 // a valid topological ordering as nothing is going to go back and re-sort 860 // these nodes. We continually insert before 'N' in sequence as this is 861 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no 862 // hierarchy left to express. 863 InsertDAGNode(DAG, N, NewMask); 864 InsertDAGNode(DAG, N, NewAnd); 865 InsertDAGNode(DAG, N, NewShift); 866 DAG.ReplaceAllUsesWith(N, NewShift); 867 868 AM.Scale = 1 << ShiftAmt; 869 AM.IndexReg = NewAnd; 870 return false; 871} 872 873// Implement some heroics to detect shifts of masked values where the mask can 874// be replaced by extending the shift and undoing that in the addressing mode 875// scale. Patterns such as (shl (srl x, c1), c2) are canonicalized into (and 876// (srl x, SHIFT), MASK) by DAGCombines that don't know the shl can be done in 877// the addressing mode. This results in code such as: 878// 879// int f(short *y, int *lookup_table) { 880// ... 881// return *y + lookup_table[*y >> 11]; 882// } 883// 884// Turning into: 885// movzwl (%rdi), %eax 886// movl %eax, %ecx 887// shrl $11, %ecx 888// addl (%rsi,%rcx,4), %eax 889// 890// Instead of: 891// movzwl (%rdi), %eax 892// movl %eax, %ecx 893// shrl $9, %ecx 894// andl $124, %rcx 895// addl (%rsi,%rcx), %eax 896// 897// Note that this function assumes the mask is provided as a mask *after* the 898// value is shifted. The input chain may or may not match that, but computing 899// such a mask is trivial. 900static bool FoldMaskAndShiftToScale(SelectionDAG &DAG, SDValue N, 901 uint64_t Mask, 902 SDValue Shift, SDValue X, 903 X86ISelAddressMode &AM) { 904 if (Shift.getOpcode() != ISD::SRL || !Shift.hasOneUse() || 905 !isa<ConstantSDNode>(Shift.getOperand(1))) 906 return true; 907 908 unsigned ShiftAmt = Shift.getConstantOperandVal(1); 909 unsigned MaskLZ = countLeadingZeros(Mask); 910 unsigned MaskTZ = countTrailingZeros(Mask); 911 912 // The amount of shift we're trying to fit into the addressing mode is taken 913 // from the trailing zeros of the mask. 914 unsigned AMShiftAmt = MaskTZ; 915 916 // There is nothing we can do here unless the mask is removing some bits. 917 // Also, the addressing mode can only represent shifts of 1, 2, or 3 bits. 918 if (AMShiftAmt <= 0 || AMShiftAmt > 3) return true; 919 920 // We also need to ensure that mask is a continuous run of bits. 921 if (CountTrailingOnes_64(Mask >> MaskTZ) + MaskTZ + MaskLZ != 64) return true; 922 923 // Scale the leading zero count down based on the actual size of the value. 924 // Also scale it down based on the size of the shift. 925 MaskLZ -= (64 - X.getSimpleValueType().getSizeInBits()) + ShiftAmt; 926 927 // The final check is to ensure that any masked out high bits of X are 928 // already known to be zero. Otherwise, the mask has a semantic impact 929 // other than masking out a couple of low bits. Unfortunately, because of 930 // the mask, zero extensions will be removed from operands in some cases. 931 // This code works extra hard to look through extensions because we can 932 // replace them with zero extensions cheaply if necessary. 933 bool ReplacingAnyExtend = false; 934 if (X.getOpcode() == ISD::ANY_EXTEND) { 935 unsigned ExtendBits = X.getSimpleValueType().getSizeInBits() - 936 X.getOperand(0).getSimpleValueType().getSizeInBits(); 937 // Assume that we'll replace the any-extend with a zero-extend, and 938 // narrow the search to the extended value. 939 X = X.getOperand(0); 940 MaskLZ = ExtendBits > MaskLZ ? 0 : MaskLZ - ExtendBits; 941 ReplacingAnyExtend = true; 942 } 943 APInt MaskedHighBits = 944 APInt::getHighBitsSet(X.getSimpleValueType().getSizeInBits(), MaskLZ); 945 APInt KnownZero, KnownOne; 946 DAG.computeKnownBits(X, KnownZero, KnownOne); 947 if (MaskedHighBits != KnownZero) return true; 948 949 // We've identified a pattern that can be transformed into a single shift 950 // and an addressing mode. Make it so. 951 MVT VT = N.getSimpleValueType(); 952 if (ReplacingAnyExtend) { 953 assert(X.getValueType() != VT); 954 // We looked through an ANY_EXTEND node, insert a ZERO_EXTEND. 955 SDValue NewX = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(X), VT, X); 956 InsertDAGNode(DAG, N, NewX); 957 X = NewX; 958 } 959 SDLoc DL(N); 960 SDValue NewSRLAmt = DAG.getConstant(ShiftAmt + AMShiftAmt, MVT::i8); 961 SDValue NewSRL = DAG.getNode(ISD::SRL, DL, VT, X, NewSRLAmt); 962 SDValue NewSHLAmt = DAG.getConstant(AMShiftAmt, MVT::i8); 963 SDValue NewSHL = DAG.getNode(ISD::SHL, DL, VT, NewSRL, NewSHLAmt); 964 965 // Insert the new nodes into the topological ordering. We must do this in 966 // a valid topological ordering as nothing is going to go back and re-sort 967 // these nodes. We continually insert before 'N' in sequence as this is 968 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no 969 // hierarchy left to express. 970 InsertDAGNode(DAG, N, NewSRLAmt); 971 InsertDAGNode(DAG, N, NewSRL); 972 InsertDAGNode(DAG, N, NewSHLAmt); 973 InsertDAGNode(DAG, N, NewSHL); 974 DAG.ReplaceAllUsesWith(N, NewSHL); 975 976 AM.Scale = 1 << AMShiftAmt; 977 AM.IndexReg = NewSRL; 978 return false; 979} 980 981bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM, 982 unsigned Depth) { 983 SDLoc dl(N); 984 DEBUG({ 985 dbgs() << "MatchAddress: "; 986 AM.dump(); 987 }); 988 // Limit recursion. 989 if (Depth > 5) 990 return MatchAddressBase(N, AM); 991 992 // If this is already a %rip relative address, we can only merge immediates 993 // into it. Instead of handling this in every case, we handle it here. 994 // RIP relative addressing: %rip + 32-bit displacement! 995 if (AM.isRIPRelative()) { 996 // FIXME: JumpTable and ExternalSymbol address currently don't like 997 // displacements. It isn't very important, but this should be fixed for 998 // consistency. 999 if (!AM.ES && AM.JT != -1) return true; 1000 1001 if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N)) 1002 if (!FoldOffsetIntoAddress(Cst->getSExtValue(), AM)) 1003 return false; 1004 return true; 1005 } 1006 1007 switch (N.getOpcode()) { 1008 default: break; 1009 case ISD::Constant: { 1010 uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue(); 1011 if (!FoldOffsetIntoAddress(Val, AM)) 1012 return false; 1013 break; 1014 } 1015 1016 case X86ISD::Wrapper: 1017 case X86ISD::WrapperRIP: 1018 if (!MatchWrapper(N, AM)) 1019 return false; 1020 break; 1021 1022 case ISD::LOAD: 1023 if (!MatchLoadInAddress(cast<LoadSDNode>(N), AM)) 1024 return false; 1025 break; 1026 1027 case ISD::FrameIndex: 1028 if (AM.BaseType == X86ISelAddressMode::RegBase && 1029 AM.Base_Reg.getNode() == nullptr && 1030 (!Subtarget->is64Bit() || isDispSafeForFrameIndex(AM.Disp))) { 1031 AM.BaseType = X86ISelAddressMode::FrameIndexBase; 1032 AM.Base_FrameIndex = cast<FrameIndexSDNode>(N)->getIndex(); 1033 return false; 1034 } 1035 break; 1036 1037 case ISD::SHL: 1038 if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1) 1039 break; 1040 1041 if (ConstantSDNode 1042 *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1))) { 1043 unsigned Val = CN->getZExtValue(); 1044 // Note that we handle x<<1 as (,x,2) rather than (x,x) here so 1045 // that the base operand remains free for further matching. If 1046 // the base doesn't end up getting used, a post-processing step 1047 // in MatchAddress turns (,x,2) into (x,x), which is cheaper. 1048 if (Val == 1 || Val == 2 || Val == 3) { 1049 AM.Scale = 1 << Val; 1050 SDValue ShVal = N.getNode()->getOperand(0); 1051 1052 // Okay, we know that we have a scale by now. However, if the scaled 1053 // value is an add of something and a constant, we can fold the 1054 // constant into the disp field here. 1055 if (CurDAG->isBaseWithConstantOffset(ShVal)) { 1056 AM.IndexReg = ShVal.getNode()->getOperand(0); 1057 ConstantSDNode *AddVal = 1058 cast<ConstantSDNode>(ShVal.getNode()->getOperand(1)); 1059 uint64_t Disp = (uint64_t)AddVal->getSExtValue() << Val; 1060 if (!FoldOffsetIntoAddress(Disp, AM)) 1061 return false; 1062 } 1063 1064 AM.IndexReg = ShVal; 1065 return false; 1066 } 1067 } 1068 break; 1069 1070 case ISD::SRL: { 1071 // Scale must not be used already. 1072 if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1) break; 1073 1074 SDValue And = N.getOperand(0); 1075 if (And.getOpcode() != ISD::AND) break; 1076 SDValue X = And.getOperand(0); 1077 1078 // We only handle up to 64-bit values here as those are what matter for 1079 // addressing mode optimizations. 1080 if (X.getSimpleValueType().getSizeInBits() > 64) break; 1081 1082 // The mask used for the transform is expected to be post-shift, but we 1083 // found the shift first so just apply the shift to the mask before passing 1084 // it down. 1085 if (!isa<ConstantSDNode>(N.getOperand(1)) || 1086 !isa<ConstantSDNode>(And.getOperand(1))) 1087 break; 1088 uint64_t Mask = And.getConstantOperandVal(1) >> N.getConstantOperandVal(1); 1089 1090 // Try to fold the mask and shift into the scale, and return false if we 1091 // succeed. 1092 if (!FoldMaskAndShiftToScale(*CurDAG, N, Mask, N, X, AM)) 1093 return false; 1094 break; 1095 } 1096 1097 case ISD::SMUL_LOHI: 1098 case ISD::UMUL_LOHI: 1099 // A mul_lohi where we need the low part can be folded as a plain multiply. 1100 if (N.getResNo() != 0) break; 1101 // FALL THROUGH 1102 case ISD::MUL: 1103 case X86ISD::MUL_IMM: 1104 // X*[3,5,9] -> X+X*[2,4,8] 1105 if (AM.BaseType == X86ISelAddressMode::RegBase && 1106 AM.Base_Reg.getNode() == nullptr && 1107 AM.IndexReg.getNode() == nullptr) { 1108 if (ConstantSDNode 1109 *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1))) 1110 if (CN->getZExtValue() == 3 || CN->getZExtValue() == 5 || 1111 CN->getZExtValue() == 9) { 1112 AM.Scale = unsigned(CN->getZExtValue())-1; 1113 1114 SDValue MulVal = N.getNode()->getOperand(0); 1115 SDValue Reg; 1116 1117 // Okay, we know that we have a scale by now. However, if the scaled 1118 // value is an add of something and a constant, we can fold the 1119 // constant into the disp field here. 1120 if (MulVal.getNode()->getOpcode() == ISD::ADD && MulVal.hasOneUse() && 1121 isa<ConstantSDNode>(MulVal.getNode()->getOperand(1))) { 1122 Reg = MulVal.getNode()->getOperand(0); 1123 ConstantSDNode *AddVal = 1124 cast<ConstantSDNode>(MulVal.getNode()->getOperand(1)); 1125 uint64_t Disp = AddVal->getSExtValue() * CN->getZExtValue(); 1126 if (FoldOffsetIntoAddress(Disp, AM)) 1127 Reg = N.getNode()->getOperand(0); 1128 } else { 1129 Reg = N.getNode()->getOperand(0); 1130 } 1131 1132 AM.IndexReg = AM.Base_Reg = Reg; 1133 return false; 1134 } 1135 } 1136 break; 1137 1138 case ISD::SUB: { 1139 // Given A-B, if A can be completely folded into the address and 1140 // the index field with the index field unused, use -B as the index. 1141 // This is a win if a has multiple parts that can be folded into 1142 // the address. Also, this saves a mov if the base register has 1143 // other uses, since it avoids a two-address sub instruction, however 1144 // it costs an additional mov if the index register has other uses. 1145 1146 // Add an artificial use to this node so that we can keep track of 1147 // it if it gets CSE'd with a different node. 1148 HandleSDNode Handle(N); 1149 1150 // Test if the LHS of the sub can be folded. 1151 X86ISelAddressMode Backup = AM; 1152 if (MatchAddressRecursively(N.getNode()->getOperand(0), AM, Depth+1)) { 1153 AM = Backup; 1154 break; 1155 } 1156 // Test if the index field is free for use. 1157 if (AM.IndexReg.getNode() || AM.isRIPRelative()) { 1158 AM = Backup; 1159 break; 1160 } 1161 1162 int Cost = 0; 1163 SDValue RHS = Handle.getValue().getNode()->getOperand(1); 1164 // If the RHS involves a register with multiple uses, this 1165 // transformation incurs an extra mov, due to the neg instruction 1166 // clobbering its operand. 1167 if (!RHS.getNode()->hasOneUse() || 1168 RHS.getNode()->getOpcode() == ISD::CopyFromReg || 1169 RHS.getNode()->getOpcode() == ISD::TRUNCATE || 1170 RHS.getNode()->getOpcode() == ISD::ANY_EXTEND || 1171 (RHS.getNode()->getOpcode() == ISD::ZERO_EXTEND && 1172 RHS.getNode()->getOperand(0).getValueType() == MVT::i32)) 1173 ++Cost; 1174 // If the base is a register with multiple uses, this 1175 // transformation may save a mov. 1176 if ((AM.BaseType == X86ISelAddressMode::RegBase && 1177 AM.Base_Reg.getNode() && 1178 !AM.Base_Reg.getNode()->hasOneUse()) || 1179 AM.BaseType == X86ISelAddressMode::FrameIndexBase) 1180 --Cost; 1181 // If the folded LHS was interesting, this transformation saves 1182 // address arithmetic. 1183 if ((AM.hasSymbolicDisplacement() && !Backup.hasSymbolicDisplacement()) + 1184 ((AM.Disp != 0) && (Backup.Disp == 0)) + 1185 (AM.Segment.getNode() && !Backup.Segment.getNode()) >= 2) 1186 --Cost; 1187 // If it doesn't look like it may be an overall win, don't do it. 1188 if (Cost >= 0) { 1189 AM = Backup; 1190 break; 1191 } 1192 1193 // Ok, the transformation is legal and appears profitable. Go for it. 1194 SDValue Zero = CurDAG->getConstant(0, N.getValueType()); 1195 SDValue Neg = CurDAG->getNode(ISD::SUB, dl, N.getValueType(), Zero, RHS); 1196 AM.IndexReg = Neg; 1197 AM.Scale = 1; 1198 1199 // Insert the new nodes into the topological ordering. 1200 InsertDAGNode(*CurDAG, N, Zero); 1201 InsertDAGNode(*CurDAG, N, Neg); 1202 return false; 1203 } 1204 1205 case ISD::ADD: { 1206 // Add an artificial use to this node so that we can keep track of 1207 // it if it gets CSE'd with a different node. 1208 HandleSDNode Handle(N); 1209 1210 X86ISelAddressMode Backup = AM; 1211 if (!MatchAddressRecursively(N.getOperand(0), AM, Depth+1) && 1212 !MatchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1)) 1213 return false; 1214 AM = Backup; 1215 1216 // Try again after commuting the operands. 1217 if (!MatchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1)&& 1218 !MatchAddressRecursively(Handle.getValue().getOperand(0), AM, Depth+1)) 1219 return false; 1220 AM = Backup; 1221 1222 // If we couldn't fold both operands into the address at the same time, 1223 // see if we can just put each operand into a register and fold at least 1224 // the add. 1225 if (AM.BaseType == X86ISelAddressMode::RegBase && 1226 !AM.Base_Reg.getNode() && 1227 !AM.IndexReg.getNode()) { 1228 N = Handle.getValue(); 1229 AM.Base_Reg = N.getOperand(0); 1230 AM.IndexReg = N.getOperand(1); 1231 AM.Scale = 1; 1232 return false; 1233 } 1234 N = Handle.getValue(); 1235 break; 1236 } 1237 1238 case ISD::OR: 1239 // Handle "X | C" as "X + C" iff X is known to have C bits clear. 1240 if (CurDAG->isBaseWithConstantOffset(N)) { 1241 X86ISelAddressMode Backup = AM; 1242 ConstantSDNode *CN = cast<ConstantSDNode>(N.getOperand(1)); 1243 1244 // Start with the LHS as an addr mode. 1245 if (!MatchAddressRecursively(N.getOperand(0), AM, Depth+1) && 1246 !FoldOffsetIntoAddress(CN->getSExtValue(), AM)) 1247 return false; 1248 AM = Backup; 1249 } 1250 break; 1251 1252 case ISD::AND: { 1253 // Perform some heroic transforms on an and of a constant-count shift 1254 // with a constant to enable use of the scaled offset field. 1255 1256 // Scale must not be used already. 1257 if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1) break; 1258 1259 SDValue Shift = N.getOperand(0); 1260 if (Shift.getOpcode() != ISD::SRL && Shift.getOpcode() != ISD::SHL) break; 1261 SDValue X = Shift.getOperand(0); 1262 1263 // We only handle up to 64-bit values here as those are what matter for 1264 // addressing mode optimizations. 1265 if (X.getSimpleValueType().getSizeInBits() > 64) break; 1266 1267 if (!isa<ConstantSDNode>(N.getOperand(1))) 1268 break; 1269 uint64_t Mask = N.getConstantOperandVal(1); 1270 1271 // Try to fold the mask and shift into an extract and scale. 1272 if (!FoldMaskAndShiftToExtract(*CurDAG, N, Mask, Shift, X, AM)) 1273 return false; 1274 1275 // Try to fold the mask and shift directly into the scale. 1276 if (!FoldMaskAndShiftToScale(*CurDAG, N, Mask, Shift, X, AM)) 1277 return false; 1278 1279 // Try to swap the mask and shift to place shifts which can be done as 1280 // a scale on the outside of the mask. 1281 if (!FoldMaskedShiftToScaledMask(*CurDAG, N, Mask, Shift, X, AM)) 1282 return false; 1283 break; 1284 } 1285 } 1286 1287 return MatchAddressBase(N, AM); 1288} 1289 1290/// MatchAddressBase - Helper for MatchAddress. Add the specified node to the 1291/// specified addressing mode without any further recursion. 1292bool X86DAGToDAGISel::MatchAddressBase(SDValue N, X86ISelAddressMode &AM) { 1293 // Is the base register already occupied? 1294 if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base_Reg.getNode()) { 1295 // If so, check to see if the scale index register is set. 1296 if (!AM.IndexReg.getNode()) { 1297 AM.IndexReg = N; 1298 AM.Scale = 1; 1299 return false; 1300 } 1301 1302 // Otherwise, we cannot select it. 1303 return true; 1304 } 1305 1306 // Default, generate it as a register. 1307 AM.BaseType = X86ISelAddressMode::RegBase; 1308 AM.Base_Reg = N; 1309 return false; 1310} 1311 1312/// SelectAddr - returns true if it is able pattern match an addressing mode. 1313/// It returns the operands which make up the maximal addressing mode it can 1314/// match by reference. 1315/// 1316/// Parent is the parent node of the addr operand that is being matched. It 1317/// is always a load, store, atomic node, or null. It is only null when 1318/// checking memory operands for inline asm nodes. 1319bool X86DAGToDAGISel::SelectAddr(SDNode *Parent, SDValue N, SDValue &Base, 1320 SDValue &Scale, SDValue &Index, 1321 SDValue &Disp, SDValue &Segment) { 1322 X86ISelAddressMode AM; 1323 1324 if (Parent && 1325 // This list of opcodes are all the nodes that have an "addr:$ptr" operand 1326 // that are not a MemSDNode, and thus don't have proper addrspace info. 1327 Parent->getOpcode() != ISD::INTRINSIC_W_CHAIN && // unaligned loads, fixme 1328 Parent->getOpcode() != ISD::INTRINSIC_VOID && // nontemporal stores 1329 Parent->getOpcode() != X86ISD::TLSCALL && // Fixme 1330 Parent->getOpcode() != X86ISD::EH_SJLJ_SETJMP && // setjmp 1331 Parent->getOpcode() != X86ISD::EH_SJLJ_LONGJMP) { // longjmp 1332 unsigned AddrSpace = 1333 cast<MemSDNode>(Parent)->getPointerInfo().getAddrSpace(); 1334 // AddrSpace 256 -> GS, 257 -> FS. 1335 if (AddrSpace == 256) 1336 AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16); 1337 if (AddrSpace == 257) 1338 AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16); 1339 } 1340 1341 if (MatchAddress(N, AM)) 1342 return false; 1343 1344 MVT VT = N.getSimpleValueType(); 1345 if (AM.BaseType == X86ISelAddressMode::RegBase) { 1346 if (!AM.Base_Reg.getNode()) 1347 AM.Base_Reg = CurDAG->getRegister(0, VT); 1348 } 1349 1350 if (!AM.IndexReg.getNode()) 1351 AM.IndexReg = CurDAG->getRegister(0, VT); 1352 1353 getAddressOperands(AM, Base, Scale, Index, Disp, Segment); 1354 return true; 1355} 1356 1357/// SelectScalarSSELoad - Match a scalar SSE load. In particular, we want to 1358/// match a load whose top elements are either undef or zeros. The load flavor 1359/// is derived from the type of N, which is either v4f32 or v2f64. 1360/// 1361/// We also return: 1362/// PatternChainNode: this is the matched node that has a chain input and 1363/// output. 1364bool X86DAGToDAGISel::SelectScalarSSELoad(SDNode *Root, 1365 SDValue N, SDValue &Base, 1366 SDValue &Scale, SDValue &Index, 1367 SDValue &Disp, SDValue &Segment, 1368 SDValue &PatternNodeWithChain) { 1369 if (N.getOpcode() == ISD::SCALAR_TO_VECTOR) { 1370 PatternNodeWithChain = N.getOperand(0); 1371 if (ISD::isNON_EXTLoad(PatternNodeWithChain.getNode()) && 1372 PatternNodeWithChain.hasOneUse() && 1373 IsProfitableToFold(N.getOperand(0), N.getNode(), Root) && 1374 IsLegalToFold(N.getOperand(0), N.getNode(), Root, OptLevel)) { 1375 LoadSDNode *LD = cast<LoadSDNode>(PatternNodeWithChain); 1376 if (!SelectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp, Segment)) 1377 return false; 1378 return true; 1379 } 1380 } 1381 1382 // Also handle the case where we explicitly require zeros in the top 1383 // elements. This is a vector shuffle from the zero vector. 1384 if (N.getOpcode() == X86ISD::VZEXT_MOVL && N.getNode()->hasOneUse() && 1385 // Check to see if the top elements are all zeros (or bitcast of zeros). 1386 N.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR && 1387 N.getOperand(0).getNode()->hasOneUse() && 1388 ISD::isNON_EXTLoad(N.getOperand(0).getOperand(0).getNode()) && 1389 N.getOperand(0).getOperand(0).hasOneUse() && 1390 IsProfitableToFold(N.getOperand(0), N.getNode(), Root) && 1391 IsLegalToFold(N.getOperand(0), N.getNode(), Root, OptLevel)) { 1392 // Okay, this is a zero extending load. Fold it. 1393 LoadSDNode *LD = cast<LoadSDNode>(N.getOperand(0).getOperand(0)); 1394 if (!SelectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp, Segment)) 1395 return false; 1396 PatternNodeWithChain = SDValue(LD, 0); 1397 return true; 1398 } 1399 return false; 1400} 1401 1402 1403bool X86DAGToDAGISel::SelectMOV64Imm32(SDValue N, SDValue &Imm) { 1404 if (const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { 1405 uint64_t ImmVal = CN->getZExtValue(); 1406 if ((uint32_t)ImmVal != (uint64_t)ImmVal) 1407 return false; 1408 1409 Imm = CurDAG->getTargetConstant(ImmVal, MVT::i64); 1410 return true; 1411 } 1412 1413 // In static codegen with small code model, we can get the address of a label 1414 // into a register with 'movl'. TableGen has already made sure we're looking 1415 // at a label of some kind. 1416 assert(N->getOpcode() == X86ISD::Wrapper && 1417 "Unexpected node type for MOV32ri64"); 1418 N = N.getOperand(0); 1419 1420 if (N->getOpcode() != ISD::TargetConstantPool && 1421 N->getOpcode() != ISD::TargetJumpTable && 1422 N->getOpcode() != ISD::TargetGlobalAddress && 1423 N->getOpcode() != ISD::TargetExternalSymbol && 1424 N->getOpcode() != ISD::TargetBlockAddress) 1425 return false; 1426 1427 Imm = N; 1428 return TM.getCodeModel() == CodeModel::Small; 1429} 1430 1431bool X86DAGToDAGISel::SelectLEA64_32Addr(SDValue N, SDValue &Base, 1432 SDValue &Scale, SDValue &Index, 1433 SDValue &Disp, SDValue &Segment) { 1434 if (!SelectLEAAddr(N, Base, Scale, Index, Disp, Segment)) 1435 return false; 1436 1437 SDLoc DL(N); 1438 RegisterSDNode *RN = dyn_cast<RegisterSDNode>(Base); 1439 if (RN && RN->getReg() == 0) 1440 Base = CurDAG->getRegister(0, MVT::i64); 1441 else if (Base.getValueType() == MVT::i32 && !dyn_cast<FrameIndexSDNode>(Base)) { 1442 // Base could already be %rip, particularly in the x32 ABI. 1443 Base = SDValue(CurDAG->getMachineNode( 1444 TargetOpcode::SUBREG_TO_REG, DL, MVT::i64, 1445 CurDAG->getTargetConstant(0, MVT::i64), 1446 Base, 1447 CurDAG->getTargetConstant(X86::sub_32bit, MVT::i32)), 1448 0); 1449 } 1450 1451 RN = dyn_cast<RegisterSDNode>(Index); 1452 if (RN && RN->getReg() == 0) 1453 Index = CurDAG->getRegister(0, MVT::i64); 1454 else { 1455 assert(Index.getValueType() == MVT::i32 && 1456 "Expect to be extending 32-bit registers for use in LEA"); 1457 Index = SDValue(CurDAG->getMachineNode( 1458 TargetOpcode::SUBREG_TO_REG, DL, MVT::i64, 1459 CurDAG->getTargetConstant(0, MVT::i64), 1460 Index, 1461 CurDAG->getTargetConstant(X86::sub_32bit, MVT::i32)), 1462 0); 1463 } 1464 1465 return true; 1466} 1467 1468/// SelectLEAAddr - it calls SelectAddr and determines if the maximal addressing 1469/// mode it matches can be cost effectively emitted as an LEA instruction. 1470bool X86DAGToDAGISel::SelectLEAAddr(SDValue N, 1471 SDValue &Base, SDValue &Scale, 1472 SDValue &Index, SDValue &Disp, 1473 SDValue &Segment) { 1474 X86ISelAddressMode AM; 1475 1476 // Set AM.Segment to prevent MatchAddress from using one. LEA doesn't support 1477 // segments. 1478 SDValue Copy = AM.Segment; 1479 SDValue T = CurDAG->getRegister(0, MVT::i32); 1480 AM.Segment = T; 1481 if (MatchAddress(N, AM)) 1482 return false; 1483 assert (T == AM.Segment); 1484 AM.Segment = Copy; 1485 1486 MVT VT = N.getSimpleValueType(); 1487 unsigned Complexity = 0; 1488 if (AM.BaseType == X86ISelAddressMode::RegBase) 1489 if (AM.Base_Reg.getNode()) 1490 Complexity = 1; 1491 else 1492 AM.Base_Reg = CurDAG->getRegister(0, VT); 1493 else if (AM.BaseType == X86ISelAddressMode::FrameIndexBase) 1494 Complexity = 4; 1495 1496 if (AM.IndexReg.getNode()) 1497 Complexity++; 1498 else 1499 AM.IndexReg = CurDAG->getRegister(0, VT); 1500 1501 // Don't match just leal(,%reg,2). It's cheaper to do addl %reg, %reg, or with 1502 // a simple shift. 1503 if (AM.Scale > 1) 1504 Complexity++; 1505 1506 // FIXME: We are artificially lowering the criteria to turn ADD %reg, $GA 1507 // to a LEA. This is determined with some expermentation but is by no means 1508 // optimal (especially for code size consideration). LEA is nice because of 1509 // its three-address nature. Tweak the cost function again when we can run 1510 // convertToThreeAddress() at register allocation time. 1511 if (AM.hasSymbolicDisplacement()) { 1512 // For X86-64, we should always use lea to materialize RIP relative 1513 // addresses. 1514 if (Subtarget->is64Bit()) 1515 Complexity = 4; 1516 else 1517 Complexity += 2; 1518 } 1519 1520 if (AM.Disp && (AM.Base_Reg.getNode() || AM.IndexReg.getNode())) 1521 Complexity++; 1522 1523 // If it isn't worth using an LEA, reject it. 1524 if (Complexity <= 2) 1525 return false; 1526 1527 getAddressOperands(AM, Base, Scale, Index, Disp, Segment); 1528 return true; 1529} 1530 1531/// SelectTLSADDRAddr - This is only run on TargetGlobalTLSAddress nodes. 1532bool X86DAGToDAGISel::SelectTLSADDRAddr(SDValue N, SDValue &Base, 1533 SDValue &Scale, SDValue &Index, 1534 SDValue &Disp, SDValue &Segment) { 1535 assert(N.getOpcode() == ISD::TargetGlobalTLSAddress); 1536 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N); 1537 1538 X86ISelAddressMode AM; 1539 AM.GV = GA->getGlobal(); 1540 AM.Disp += GA->getOffset(); 1541 AM.Base_Reg = CurDAG->getRegister(0, N.getValueType()); 1542 AM.SymbolFlags = GA->getTargetFlags(); 1543 1544 if (N.getValueType() == MVT::i32) { 1545 AM.Scale = 1; 1546 AM.IndexReg = CurDAG->getRegister(X86::EBX, MVT::i32); 1547 } else { 1548 AM.IndexReg = CurDAG->getRegister(0, MVT::i64); 1549 } 1550 1551 getAddressOperands(AM, Base, Scale, Index, Disp, Segment); 1552 return true; 1553} 1554 1555 1556bool X86DAGToDAGISel::TryFoldLoad(SDNode *P, SDValue N, 1557 SDValue &Base, SDValue &Scale, 1558 SDValue &Index, SDValue &Disp, 1559 SDValue &Segment) { 1560 if (!ISD::isNON_EXTLoad(N.getNode()) || 1561 !IsProfitableToFold(N, P, P) || 1562 !IsLegalToFold(N, P, P, OptLevel)) 1563 return false; 1564 1565 return SelectAddr(N.getNode(), 1566 N.getOperand(1), Base, Scale, Index, Disp, Segment); 1567} 1568 1569/// getGlobalBaseReg - Return an SDNode that returns the value of 1570/// the global base register. Output instructions required to 1571/// initialize the global base register, if necessary. 1572/// 1573SDNode *X86DAGToDAGISel::getGlobalBaseReg() { 1574 unsigned GlobalBaseReg = getInstrInfo()->getGlobalBaseReg(MF); 1575 return CurDAG->getRegister(GlobalBaseReg, TLI->getPointerTy()).getNode(); 1576} 1577 1578/// Atomic opcode table 1579/// 1580enum AtomicOpc { 1581 ADD, 1582 SUB, 1583 INC, 1584 DEC, 1585 OR, 1586 AND, 1587 XOR, 1588 AtomicOpcEnd 1589}; 1590 1591enum AtomicSz { 1592 ConstantI8, 1593 I8, 1594 SextConstantI16, 1595 ConstantI16, 1596 I16, 1597 SextConstantI32, 1598 ConstantI32, 1599 I32, 1600 SextConstantI64, 1601 ConstantI64, 1602 I64, 1603 AtomicSzEnd 1604}; 1605 1606static const uint16_t AtomicOpcTbl[AtomicOpcEnd][AtomicSzEnd] = { 1607 { 1608 X86::LOCK_ADD8mi, 1609 X86::LOCK_ADD8mr, 1610 X86::LOCK_ADD16mi8, 1611 X86::LOCK_ADD16mi, 1612 X86::LOCK_ADD16mr, 1613 X86::LOCK_ADD32mi8, 1614 X86::LOCK_ADD32mi, 1615 X86::LOCK_ADD32mr, 1616 X86::LOCK_ADD64mi8, 1617 X86::LOCK_ADD64mi32, 1618 X86::LOCK_ADD64mr, 1619 }, 1620 { 1621 X86::LOCK_SUB8mi, 1622 X86::LOCK_SUB8mr, 1623 X86::LOCK_SUB16mi8, 1624 X86::LOCK_SUB16mi, 1625 X86::LOCK_SUB16mr, 1626 X86::LOCK_SUB32mi8, 1627 X86::LOCK_SUB32mi, 1628 X86::LOCK_SUB32mr, 1629 X86::LOCK_SUB64mi8, 1630 X86::LOCK_SUB64mi32, 1631 X86::LOCK_SUB64mr, 1632 }, 1633 { 1634 0, 1635 X86::LOCK_INC8m, 1636 0, 1637 0, 1638 X86::LOCK_INC16m, 1639 0, 1640 0, 1641 X86::LOCK_INC32m, 1642 0, 1643 0, 1644 X86::LOCK_INC64m, 1645 }, 1646 { 1647 0, 1648 X86::LOCK_DEC8m, 1649 0, 1650 0, 1651 X86::LOCK_DEC16m, 1652 0, 1653 0, 1654 X86::LOCK_DEC32m, 1655 0, 1656 0, 1657 X86::LOCK_DEC64m, 1658 }, 1659 { 1660 X86::LOCK_OR8mi, 1661 X86::LOCK_OR8mr, 1662 X86::LOCK_OR16mi8, 1663 X86::LOCK_OR16mi, 1664 X86::LOCK_OR16mr, 1665 X86::LOCK_OR32mi8, 1666 X86::LOCK_OR32mi, 1667 X86::LOCK_OR32mr, 1668 X86::LOCK_OR64mi8, 1669 X86::LOCK_OR64mi32, 1670 X86::LOCK_OR64mr, 1671 }, 1672 { 1673 X86::LOCK_AND8mi, 1674 X86::LOCK_AND8mr, 1675 X86::LOCK_AND16mi8, 1676 X86::LOCK_AND16mi, 1677 X86::LOCK_AND16mr, 1678 X86::LOCK_AND32mi8, 1679 X86::LOCK_AND32mi, 1680 X86::LOCK_AND32mr, 1681 X86::LOCK_AND64mi8, 1682 X86::LOCK_AND64mi32, 1683 X86::LOCK_AND64mr, 1684 }, 1685 { 1686 X86::LOCK_XOR8mi, 1687 X86::LOCK_XOR8mr, 1688 X86::LOCK_XOR16mi8, 1689 X86::LOCK_XOR16mi, 1690 X86::LOCK_XOR16mr, 1691 X86::LOCK_XOR32mi8, 1692 X86::LOCK_XOR32mi, 1693 X86::LOCK_XOR32mr, 1694 X86::LOCK_XOR64mi8, 1695 X86::LOCK_XOR64mi32, 1696 X86::LOCK_XOR64mr, 1697 } 1698}; 1699 1700// Return the target constant operand for atomic-load-op and do simple 1701// translations, such as from atomic-load-add to lock-sub. The return value is 1702// one of the following 3 cases: 1703// + target-constant, the operand could be supported as a target constant. 1704// + empty, the operand is not needed any more with the new op selected. 1705// + non-empty, otherwise. 1706static SDValue getAtomicLoadArithTargetConstant(SelectionDAG *CurDAG, 1707 SDLoc dl, 1708 enum AtomicOpc &Op, MVT NVT, 1709 SDValue Val, 1710 const X86Subtarget *Subtarget) { 1711 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Val)) { 1712 int64_t CNVal = CN->getSExtValue(); 1713 // Quit if not 32-bit imm. 1714 if ((int32_t)CNVal != CNVal) 1715 return Val; 1716 // Quit if INT32_MIN: it would be negated as it is negative and overflow, 1717 // producing an immediate that does not fit in the 32 bits available for 1718 // an immediate operand to sub. However, it still fits in 32 bits for the 1719 // add (since it is not negated) so we can return target-constant. 1720 if (CNVal == INT32_MIN) 1721 return CurDAG->getTargetConstant(CNVal, NVT); 1722 // For atomic-load-add, we could do some optimizations. 1723 if (Op == ADD) { 1724 // Translate to INC/DEC if ADD by 1 or -1. 1725 if (((CNVal == 1) || (CNVal == -1)) && !Subtarget->slowIncDec()) { 1726 Op = (CNVal == 1) ? INC : DEC; 1727 // No more constant operand after being translated into INC/DEC. 1728 return SDValue(); 1729 } 1730 // Translate to SUB if ADD by negative value. 1731 if (CNVal < 0) { 1732 Op = SUB; 1733 CNVal = -CNVal; 1734 } 1735 } 1736 return CurDAG->getTargetConstant(CNVal, NVT); 1737 } 1738 1739 // If the value operand is single-used, try to optimize it. 1740 if (Op == ADD && Val.hasOneUse()) { 1741 // Translate (atomic-load-add ptr (sub 0 x)) back to (lock-sub x). 1742 if (Val.getOpcode() == ISD::SUB && X86::isZeroNode(Val.getOperand(0))) { 1743 Op = SUB; 1744 return Val.getOperand(1); 1745 } 1746 // A special case for i16, which needs truncating as, in most cases, it's 1747 // promoted to i32. We will translate 1748 // (atomic-load-add (truncate (sub 0 x))) to (lock-sub (EXTRACT_SUBREG x)) 1749 if (Val.getOpcode() == ISD::TRUNCATE && NVT == MVT::i16 && 1750 Val.getOperand(0).getOpcode() == ISD::SUB && 1751 X86::isZeroNode(Val.getOperand(0).getOperand(0))) { 1752 Op = SUB; 1753 Val = Val.getOperand(0); 1754 return CurDAG->getTargetExtractSubreg(X86::sub_16bit, dl, NVT, 1755 Val.getOperand(1)); 1756 } 1757 } 1758 1759 return Val; 1760} 1761 1762SDNode *X86DAGToDAGISel::SelectAtomicLoadArith(SDNode *Node, MVT NVT) { 1763 if (Node->hasAnyUseOfValue(0)) 1764 return nullptr; 1765 1766 SDLoc dl(Node); 1767 1768 // Optimize common patterns for __sync_or_and_fetch and similar arith 1769 // operations where the result is not used. This allows us to use the "lock" 1770 // version of the arithmetic instruction. 1771 SDValue Chain = Node->getOperand(0); 1772 SDValue Ptr = Node->getOperand(1); 1773 SDValue Val = Node->getOperand(2); 1774 SDValue Base, Scale, Index, Disp, Segment; 1775 if (!SelectAddr(Node, Ptr, Base, Scale, Index, Disp, Segment)) 1776 return nullptr; 1777 1778 // Which index into the table. 1779 enum AtomicOpc Op; 1780 switch (Node->getOpcode()) { 1781 default: 1782 return nullptr; 1783 case ISD::ATOMIC_LOAD_OR: 1784 Op = OR; 1785 break; 1786 case ISD::ATOMIC_LOAD_AND: 1787 Op = AND; 1788 break; 1789 case ISD::ATOMIC_LOAD_XOR: 1790 Op = XOR; 1791 break; 1792 case ISD::ATOMIC_LOAD_ADD: 1793 Op = ADD; 1794 break; 1795 } 1796 1797 Val = getAtomicLoadArithTargetConstant(CurDAG, dl, Op, NVT, Val, Subtarget); 1798 bool isUnOp = !Val.getNode(); 1799 bool isCN = Val.getNode() && (Val.getOpcode() == ISD::TargetConstant); 1800 1801 unsigned Opc = 0; 1802 switch (NVT.SimpleTy) { 1803 default: return nullptr; 1804 case MVT::i8: 1805 if (isCN) 1806 Opc = AtomicOpcTbl[Op][ConstantI8]; 1807 else 1808 Opc = AtomicOpcTbl[Op][I8]; 1809 break; 1810 case MVT::i16: 1811 if (isCN) { 1812 if (immSext8(Val.getNode())) 1813 Opc = AtomicOpcTbl[Op][SextConstantI16]; 1814 else 1815 Opc = AtomicOpcTbl[Op][ConstantI16]; 1816 } else 1817 Opc = AtomicOpcTbl[Op][I16]; 1818 break; 1819 case MVT::i32: 1820 if (isCN) { 1821 if (immSext8(Val.getNode())) 1822 Opc = AtomicOpcTbl[Op][SextConstantI32]; 1823 else 1824 Opc = AtomicOpcTbl[Op][ConstantI32]; 1825 } else 1826 Opc = AtomicOpcTbl[Op][I32]; 1827 break; 1828 case MVT::i64: 1829 if (isCN) { 1830 if (immSext8(Val.getNode())) 1831 Opc = AtomicOpcTbl[Op][SextConstantI64]; 1832 else if (i64immSExt32(Val.getNode())) 1833 Opc = AtomicOpcTbl[Op][ConstantI64]; 1834 else 1835 llvm_unreachable("True 64 bits constant in SelectAtomicLoadArith"); 1836 } else 1837 Opc = AtomicOpcTbl[Op][I64]; 1838 break; 1839 } 1840 1841 assert(Opc != 0 && "Invalid arith lock transform!"); 1842 1843 // Building the new node. 1844 SDValue Ret; 1845 if (isUnOp) { 1846 SDValue Ops[] = { Base, Scale, Index, Disp, Segment, Chain }; 1847 Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops), 0); 1848 } else { 1849 SDValue Ops[] = { Base, Scale, Index, Disp, Segment, Val, Chain }; 1850 Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops), 0); 1851 } 1852 1853 // Copying the MachineMemOperand. 1854 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); 1855 MemOp[0] = cast<MemSDNode>(Node)->getMemOperand(); 1856 cast<MachineSDNode>(Ret)->setMemRefs(MemOp, MemOp + 1); 1857 1858 // We need to have two outputs as that is what the original instruction had. 1859 // So we add a dummy, undefined output. This is safe as we checked first 1860 // that no-one uses our output anyway. 1861 SDValue Undef = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, 1862 dl, NVT), 0); 1863 SDValue RetVals[] = { Undef, Ret }; 1864 return CurDAG->getMergeValues(RetVals, dl).getNode(); 1865} 1866 1867/// HasNoSignedComparisonUses - Test whether the given X86ISD::CMP node has 1868/// any uses which require the SF or OF bits to be accurate. 1869static bool HasNoSignedComparisonUses(SDNode *N) { 1870 // Examine each user of the node. 1871 for (SDNode::use_iterator UI = N->use_begin(), 1872 UE = N->use_end(); UI != UE; ++UI) { 1873 // Only examine CopyToReg uses. 1874 if (UI->getOpcode() != ISD::CopyToReg) 1875 return false; 1876 // Only examine CopyToReg uses that copy to EFLAGS. 1877 if (cast<RegisterSDNode>(UI->getOperand(1))->getReg() != 1878 X86::EFLAGS) 1879 return false; 1880 // Examine each user of the CopyToReg use. 1881 for (SDNode::use_iterator FlagUI = UI->use_begin(), 1882 FlagUE = UI->use_end(); FlagUI != FlagUE; ++FlagUI) { 1883 // Only examine the Flag result. 1884 if (FlagUI.getUse().getResNo() != 1) continue; 1885 // Anything unusual: assume conservatively. 1886 if (!FlagUI->isMachineOpcode()) return false; 1887 // Examine the opcode of the user. 1888 switch (FlagUI->getMachineOpcode()) { 1889 // These comparisons don't treat the most significant bit specially. 1890 case X86::SETAr: case X86::SETAEr: case X86::SETBr: case X86::SETBEr: 1891 case X86::SETEr: case X86::SETNEr: case X86::SETPr: case X86::SETNPr: 1892 case X86::SETAm: case X86::SETAEm: case X86::SETBm: case X86::SETBEm: 1893 case X86::SETEm: case X86::SETNEm: case X86::SETPm: case X86::SETNPm: 1894 case X86::JA_4: case X86::JAE_4: case X86::JB_4: case X86::JBE_4: 1895 case X86::JE_4: case X86::JNE_4: case X86::JP_4: case X86::JNP_4: 1896 case X86::CMOVA16rr: case X86::CMOVA16rm: 1897 case X86::CMOVA32rr: case X86::CMOVA32rm: 1898 case X86::CMOVA64rr: case X86::CMOVA64rm: 1899 case X86::CMOVAE16rr: case X86::CMOVAE16rm: 1900 case X86::CMOVAE32rr: case X86::CMOVAE32rm: 1901 case X86::CMOVAE64rr: case X86::CMOVAE64rm: 1902 case X86::CMOVB16rr: case X86::CMOVB16rm: 1903 case X86::CMOVB32rr: case X86::CMOVB32rm: 1904 case X86::CMOVB64rr: case X86::CMOVB64rm: 1905 case X86::CMOVBE16rr: case X86::CMOVBE16rm: 1906 case X86::CMOVBE32rr: case X86::CMOVBE32rm: 1907 case X86::CMOVBE64rr: case X86::CMOVBE64rm: 1908 case X86::CMOVE16rr: case X86::CMOVE16rm: 1909 case X86::CMOVE32rr: case X86::CMOVE32rm: 1910 case X86::CMOVE64rr: case X86::CMOVE64rm: 1911 case X86::CMOVNE16rr: case X86::CMOVNE16rm: 1912 case X86::CMOVNE32rr: case X86::CMOVNE32rm: 1913 case X86::CMOVNE64rr: case X86::CMOVNE64rm: 1914 case X86::CMOVNP16rr: case X86::CMOVNP16rm: 1915 case X86::CMOVNP32rr: case X86::CMOVNP32rm: 1916 case X86::CMOVNP64rr: case X86::CMOVNP64rm: 1917 case X86::CMOVP16rr: case X86::CMOVP16rm: 1918 case X86::CMOVP32rr: case X86::CMOVP32rm: 1919 case X86::CMOVP64rr: case X86::CMOVP64rm: 1920 continue; 1921 // Anything else: assume conservatively. 1922 default: return false; 1923 } 1924 } 1925 } 1926 return true; 1927} 1928 1929/// isLoadIncOrDecStore - Check whether or not the chain ending in StoreNode 1930/// is suitable for doing the {load; increment or decrement; store} to modify 1931/// transformation. 1932static bool isLoadIncOrDecStore(StoreSDNode *StoreNode, unsigned Opc, 1933 SDValue StoredVal, SelectionDAG *CurDAG, 1934 LoadSDNode* &LoadNode, SDValue &InputChain) { 1935 1936 // is the value stored the result of a DEC or INC? 1937 if (!(Opc == X86ISD::DEC || Opc == X86ISD::INC)) return false; 1938 1939 // is the stored value result 0 of the load? 1940 if (StoredVal.getResNo() != 0) return false; 1941 1942 // are there other uses of the loaded value than the inc or dec? 1943 if (!StoredVal.getNode()->hasNUsesOfValue(1, 0)) return false; 1944 1945 // is the store non-extending and non-indexed? 1946 if (!ISD::isNormalStore(StoreNode) || StoreNode->isNonTemporal()) 1947 return false; 1948 1949 SDValue Load = StoredVal->getOperand(0); 1950 // Is the stored value a non-extending and non-indexed load? 1951 if (!ISD::isNormalLoad(Load.getNode())) return false; 1952 1953 // Return LoadNode by reference. 1954 LoadNode = cast<LoadSDNode>(Load); 1955 // is the size of the value one that we can handle? (i.e. 64, 32, 16, or 8) 1956 EVT LdVT = LoadNode->getMemoryVT(); 1957 if (LdVT != MVT::i64 && LdVT != MVT::i32 && LdVT != MVT::i16 && 1958 LdVT != MVT::i8) 1959 return false; 1960 1961 // Is store the only read of the loaded value? 1962 if (!Load.hasOneUse()) 1963 return false; 1964 1965 // Is the address of the store the same as the load? 1966 if (LoadNode->getBasePtr() != StoreNode->getBasePtr() || 1967 LoadNode->getOffset() != StoreNode->getOffset()) 1968 return false; 1969 1970 // Check if the chain is produced by the load or is a TokenFactor with 1971 // the load output chain as an operand. Return InputChain by reference. 1972 SDValue Chain = StoreNode->getChain(); 1973 1974 bool ChainCheck = false; 1975 if (Chain == Load.getValue(1)) { 1976 ChainCheck = true; 1977 InputChain = LoadNode->getChain(); 1978 } else if (Chain.getOpcode() == ISD::TokenFactor) { 1979 SmallVector<SDValue, 4> ChainOps; 1980 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i) { 1981 SDValue Op = Chain.getOperand(i); 1982 if (Op == Load.getValue(1)) { 1983 ChainCheck = true; 1984 continue; 1985 } 1986 1987 // Make sure using Op as part of the chain would not cause a cycle here. 1988 // In theory, we could check whether the chain node is a predecessor of 1989 // the load. But that can be very expensive. Instead visit the uses and 1990 // make sure they all have smaller node id than the load. 1991 int LoadId = LoadNode->getNodeId(); 1992 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 1993 UE = UI->use_end(); UI != UE; ++UI) { 1994 if (UI.getUse().getResNo() != 0) 1995 continue; 1996 if (UI->getNodeId() > LoadId) 1997 return false; 1998 } 1999 2000 ChainOps.push_back(Op); 2001 } 2002 2003 if (ChainCheck) 2004 // Make a new TokenFactor with all the other input chains except 2005 // for the load. 2006 InputChain = CurDAG->getNode(ISD::TokenFactor, SDLoc(Chain), 2007 MVT::Other, ChainOps); 2008 } 2009 if (!ChainCheck) 2010 return false; 2011 2012 return true; 2013} 2014 2015/// getFusedLdStOpcode - Get the appropriate X86 opcode for an in memory 2016/// increment or decrement. Opc should be X86ISD::DEC or X86ISD::INC. 2017static unsigned getFusedLdStOpcode(EVT &LdVT, unsigned Opc) { 2018 if (Opc == X86ISD::DEC) { 2019 if (LdVT == MVT::i64) return X86::DEC64m; 2020 if (LdVT == MVT::i32) return X86::DEC32m; 2021 if (LdVT == MVT::i16) return X86::DEC16m; 2022 if (LdVT == MVT::i8) return X86::DEC8m; 2023 } else { 2024 assert(Opc == X86ISD::INC && "unrecognized opcode"); 2025 if (LdVT == MVT::i64) return X86::INC64m; 2026 if (LdVT == MVT::i32) return X86::INC32m; 2027 if (LdVT == MVT::i16) return X86::INC16m; 2028 if (LdVT == MVT::i8) return X86::INC8m; 2029 } 2030 llvm_unreachable("unrecognized size for LdVT"); 2031} 2032 2033/// SelectGather - Customized ISel for GATHER operations. 2034/// 2035SDNode *X86DAGToDAGISel::SelectGather(SDNode *Node, unsigned Opc) { 2036 // Operands of Gather: VSrc, Base, VIdx, VMask, Scale 2037 SDValue Chain = Node->getOperand(0); 2038 SDValue VSrc = Node->getOperand(2); 2039 SDValue Base = Node->getOperand(3); 2040 SDValue VIdx = Node->getOperand(4); 2041 SDValue VMask = Node->getOperand(5); 2042 ConstantSDNode *Scale = dyn_cast<ConstantSDNode>(Node->getOperand(6)); 2043 if (!Scale) 2044 return nullptr; 2045 2046 SDVTList VTs = CurDAG->getVTList(VSrc.getValueType(), VSrc.getValueType(), 2047 MVT::Other); 2048 2049 // Memory Operands: Base, Scale, Index, Disp, Segment 2050 SDValue Disp = CurDAG->getTargetConstant(0, MVT::i32); 2051 SDValue Segment = CurDAG->getRegister(0, MVT::i32); 2052 const SDValue Ops[] = { VSrc, Base, getI8Imm(Scale->getSExtValue()), VIdx, 2053 Disp, Segment, VMask, Chain}; 2054 SDNode *ResNode = CurDAG->getMachineNode(Opc, SDLoc(Node), VTs, Ops); 2055 // Node has 2 outputs: VDst and MVT::Other. 2056 // ResNode has 3 outputs: VDst, VMask_wb, and MVT::Other. 2057 // We replace VDst of Node with VDst of ResNode, and Other of Node with Other 2058 // of ResNode. 2059 ReplaceUses(SDValue(Node, 0), SDValue(ResNode, 0)); 2060 ReplaceUses(SDValue(Node, 1), SDValue(ResNode, 2)); 2061 return ResNode; 2062} 2063 2064SDNode *X86DAGToDAGISel::Select(SDNode *Node) { 2065 MVT NVT = Node->getSimpleValueType(0); 2066 unsigned Opc, MOpc; 2067 unsigned Opcode = Node->getOpcode(); 2068 SDLoc dl(Node); 2069 2070 DEBUG(dbgs() << "Selecting: "; Node->dump(CurDAG); dbgs() << '\n'); 2071 2072 if (Node->isMachineOpcode()) { 2073 DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << '\n'); 2074 Node->setNodeId(-1); 2075 return nullptr; // Already selected. 2076 } 2077 2078 switch (Opcode) { 2079 default: break; 2080 case ISD::INTRINSIC_W_CHAIN: { 2081 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue(); 2082 switch (IntNo) { 2083 default: break; 2084 case Intrinsic::x86_avx2_gather_d_pd: 2085 case Intrinsic::x86_avx2_gather_d_pd_256: 2086 case Intrinsic::x86_avx2_gather_q_pd: 2087 case Intrinsic::x86_avx2_gather_q_pd_256: 2088 case Intrinsic::x86_avx2_gather_d_ps: 2089 case Intrinsic::x86_avx2_gather_d_ps_256: 2090 case Intrinsic::x86_avx2_gather_q_ps: 2091 case Intrinsic::x86_avx2_gather_q_ps_256: 2092 case Intrinsic::x86_avx2_gather_d_q: 2093 case Intrinsic::x86_avx2_gather_d_q_256: 2094 case Intrinsic::x86_avx2_gather_q_q: 2095 case Intrinsic::x86_avx2_gather_q_q_256: 2096 case Intrinsic::x86_avx2_gather_d_d: 2097 case Intrinsic::x86_avx2_gather_d_d_256: 2098 case Intrinsic::x86_avx2_gather_q_d: 2099 case Intrinsic::x86_avx2_gather_q_d_256: { 2100 if (!Subtarget->hasAVX2()) 2101 break; 2102 unsigned Opc; 2103 switch (IntNo) { 2104 default: llvm_unreachable("Impossible intrinsic"); 2105 case Intrinsic::x86_avx2_gather_d_pd: Opc = X86::VGATHERDPDrm; break; 2106 case Intrinsic::x86_avx2_gather_d_pd_256: Opc = X86::VGATHERDPDYrm; break; 2107 case Intrinsic::x86_avx2_gather_q_pd: Opc = X86::VGATHERQPDrm; break; 2108 case Intrinsic::x86_avx2_gather_q_pd_256: Opc = X86::VGATHERQPDYrm; break; 2109 case Intrinsic::x86_avx2_gather_d_ps: Opc = X86::VGATHERDPSrm; break; 2110 case Intrinsic::x86_avx2_gather_d_ps_256: Opc = X86::VGATHERDPSYrm; break; 2111 case Intrinsic::x86_avx2_gather_q_ps: Opc = X86::VGATHERQPSrm; break; 2112 case Intrinsic::x86_avx2_gather_q_ps_256: Opc = X86::VGATHERQPSYrm; break; 2113 case Intrinsic::x86_avx2_gather_d_q: Opc = X86::VPGATHERDQrm; break; 2114 case Intrinsic::x86_avx2_gather_d_q_256: Opc = X86::VPGATHERDQYrm; break; 2115 case Intrinsic::x86_avx2_gather_q_q: Opc = X86::VPGATHERQQrm; break; 2116 case Intrinsic::x86_avx2_gather_q_q_256: Opc = X86::VPGATHERQQYrm; break; 2117 case Intrinsic::x86_avx2_gather_d_d: Opc = X86::VPGATHERDDrm; break; 2118 case Intrinsic::x86_avx2_gather_d_d_256: Opc = X86::VPGATHERDDYrm; break; 2119 case Intrinsic::x86_avx2_gather_q_d: Opc = X86::VPGATHERQDrm; break; 2120 case Intrinsic::x86_avx2_gather_q_d_256: Opc = X86::VPGATHERQDYrm; break; 2121 } 2122 SDNode *RetVal = SelectGather(Node, Opc); 2123 if (RetVal) 2124 // We already called ReplaceUses inside SelectGather. 2125 return nullptr; 2126 break; 2127 } 2128 } 2129 break; 2130 } 2131 case X86ISD::GlobalBaseReg: 2132 return getGlobalBaseReg(); 2133 2134 case X86ISD::SHRUNKBLEND: { 2135 // SHRUNKBLEND selects like a regular VSELECT. 2136 SDValue VSelect = CurDAG->getNode( 2137 ISD::VSELECT, SDLoc(Node), Node->getValueType(0), Node->getOperand(0), 2138 Node->getOperand(1), Node->getOperand(2)); 2139 ReplaceUses(SDValue(Node, 0), VSelect); 2140 SelectCode(VSelect.getNode()); 2141 // We already called ReplaceUses. 2142 return nullptr; 2143 } 2144 2145 case ISD::ATOMIC_LOAD_XOR: 2146 case ISD::ATOMIC_LOAD_AND: 2147 case ISD::ATOMIC_LOAD_OR: 2148 case ISD::ATOMIC_LOAD_ADD: { 2149 SDNode *RetVal = SelectAtomicLoadArith(Node, NVT); 2150 if (RetVal) 2151 return RetVal; 2152 break; 2153 } 2154 case ISD::AND: 2155 case ISD::OR: 2156 case ISD::XOR: { 2157 // For operations of the form (x << C1) op C2, check if we can use a smaller 2158 // encoding for C2 by transforming it into (x op (C2>>C1)) << C1. 2159 SDValue N0 = Node->getOperand(0); 2160 SDValue N1 = Node->getOperand(1); 2161 2162 if (N0->getOpcode() != ISD::SHL || !N0->hasOneUse()) 2163 break; 2164 2165 // i8 is unshrinkable, i16 should be promoted to i32. 2166 if (NVT != MVT::i32 && NVT != MVT::i64) 2167 break; 2168 2169 ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N1); 2170 ConstantSDNode *ShlCst = dyn_cast<ConstantSDNode>(N0->getOperand(1)); 2171 if (!Cst || !ShlCst) 2172 break; 2173 2174 int64_t Val = Cst->getSExtValue(); 2175 uint64_t ShlVal = ShlCst->getZExtValue(); 2176 2177 // Make sure that we don't change the operation by removing bits. 2178 // This only matters for OR and XOR, AND is unaffected. 2179 uint64_t RemovedBitsMask = (1ULL << ShlVal) - 1; 2180 if (Opcode != ISD::AND && (Val & RemovedBitsMask) != 0) 2181 break; 2182 2183 unsigned ShlOp, Op; 2184 MVT CstVT = NVT; 2185 2186 // Check the minimum bitwidth for the new constant. 2187 // TODO: AND32ri is the same as AND64ri32 with zext imm. 2188 // TODO: MOV32ri+OR64r is cheaper than MOV64ri64+OR64rr 2189 // TODO: Using 16 and 8 bit operations is also possible for or32 & xor32. 2190 if (!isInt<8>(Val) && isInt<8>(Val >> ShlVal)) 2191 CstVT = MVT::i8; 2192 else if (!isInt<32>(Val) && isInt<32>(Val >> ShlVal)) 2193 CstVT = MVT::i32; 2194 2195 // Bail if there is no smaller encoding. 2196 if (NVT == CstVT) 2197 break; 2198 2199 switch (NVT.SimpleTy) { 2200 default: llvm_unreachable("Unsupported VT!"); 2201 case MVT::i32: 2202 assert(CstVT == MVT::i8); 2203 ShlOp = X86::SHL32ri; 2204 2205 switch (Opcode) { 2206 default: llvm_unreachable("Impossible opcode"); 2207 case ISD::AND: Op = X86::AND32ri8; break; 2208 case ISD::OR: Op = X86::OR32ri8; break; 2209 case ISD::XOR: Op = X86::XOR32ri8; break; 2210 } 2211 break; 2212 case MVT::i64: 2213 assert(CstVT == MVT::i8 || CstVT == MVT::i32); 2214 ShlOp = X86::SHL64ri; 2215 2216 switch (Opcode) { 2217 default: llvm_unreachable("Impossible opcode"); 2218 case ISD::AND: Op = CstVT==MVT::i8? X86::AND64ri8 : X86::AND64ri32; break; 2219 case ISD::OR: Op = CstVT==MVT::i8? X86::OR64ri8 : X86::OR64ri32; break; 2220 case ISD::XOR: Op = CstVT==MVT::i8? X86::XOR64ri8 : X86::XOR64ri32; break; 2221 } 2222 break; 2223 } 2224 2225 // Emit the smaller op and the shift. 2226 SDValue NewCst = CurDAG->getTargetConstant(Val >> ShlVal, CstVT); 2227 SDNode *New = CurDAG->getMachineNode(Op, dl, NVT, N0->getOperand(0),NewCst); 2228 return CurDAG->SelectNodeTo(Node, ShlOp, NVT, SDValue(New, 0), 2229 getI8Imm(ShlVal)); 2230 } 2231 case X86ISD::UMUL8: 2232 case X86ISD::SMUL8: { 2233 SDValue N0 = Node->getOperand(0); 2234 SDValue N1 = Node->getOperand(1); 2235 2236 Opc = (Opcode == X86ISD::SMUL8 ? X86::IMUL8r : X86::MUL8r); 2237 2238 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, X86::AL, 2239 N0, SDValue()).getValue(1); 2240 2241 SDVTList VTs = CurDAG->getVTList(NVT, MVT::i32); 2242 SDValue Ops[] = {N1, InFlag}; 2243 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops); 2244 2245 ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0)); 2246 ReplaceUses(SDValue(Node, 1), SDValue(CNode, 1)); 2247 return nullptr; 2248 } 2249 2250 case X86ISD::UMUL: { 2251 SDValue N0 = Node->getOperand(0); 2252 SDValue N1 = Node->getOperand(1); 2253 2254 unsigned LoReg; 2255 switch (NVT.SimpleTy) { 2256 default: llvm_unreachable("Unsupported VT!"); 2257 case MVT::i8: LoReg = X86::AL; Opc = X86::MUL8r; break; 2258 case MVT::i16: LoReg = X86::AX; Opc = X86::MUL16r; break; 2259 case MVT::i32: LoReg = X86::EAX; Opc = X86::MUL32r; break; 2260 case MVT::i64: LoReg = X86::RAX; Opc = X86::MUL64r; break; 2261 } 2262 2263 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg, 2264 N0, SDValue()).getValue(1); 2265 2266 SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::i32); 2267 SDValue Ops[] = {N1, InFlag}; 2268 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops); 2269 2270 ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0)); 2271 ReplaceUses(SDValue(Node, 1), SDValue(CNode, 1)); 2272 ReplaceUses(SDValue(Node, 2), SDValue(CNode, 2)); 2273 return nullptr; 2274 } 2275 2276 case ISD::SMUL_LOHI: 2277 case ISD::UMUL_LOHI: { 2278 SDValue N0 = Node->getOperand(0); 2279 SDValue N1 = Node->getOperand(1); 2280 2281 bool isSigned = Opcode == ISD::SMUL_LOHI; 2282 bool hasBMI2 = Subtarget->hasBMI2(); 2283 if (!isSigned) { 2284 switch (NVT.SimpleTy) { 2285 default: llvm_unreachable("Unsupported VT!"); 2286 case MVT::i8: Opc = X86::MUL8r; MOpc = X86::MUL8m; break; 2287 case MVT::i16: Opc = X86::MUL16r; MOpc = X86::MUL16m; break; 2288 case MVT::i32: Opc = hasBMI2 ? X86::MULX32rr : X86::MUL32r; 2289 MOpc = hasBMI2 ? X86::MULX32rm : X86::MUL32m; break; 2290 case MVT::i64: Opc = hasBMI2 ? X86::MULX64rr : X86::MUL64r; 2291 MOpc = hasBMI2 ? X86::MULX64rm : X86::MUL64m; break; 2292 } 2293 } else { 2294 switch (NVT.SimpleTy) { 2295 default: llvm_unreachable("Unsupported VT!"); 2296 case MVT::i8: Opc = X86::IMUL8r; MOpc = X86::IMUL8m; break; 2297 case MVT::i16: Opc = X86::IMUL16r; MOpc = X86::IMUL16m; break; 2298 case MVT::i32: Opc = X86::IMUL32r; MOpc = X86::IMUL32m; break; 2299 case MVT::i64: Opc = X86::IMUL64r; MOpc = X86::IMUL64m; break; 2300 } 2301 } 2302 2303 unsigned SrcReg, LoReg, HiReg; 2304 switch (Opc) { 2305 default: llvm_unreachable("Unknown MUL opcode!"); 2306 case X86::IMUL8r: 2307 case X86::MUL8r: 2308 SrcReg = LoReg = X86::AL; HiReg = X86::AH; 2309 break; 2310 case X86::IMUL16r: 2311 case X86::MUL16r: 2312 SrcReg = LoReg = X86::AX; HiReg = X86::DX; 2313 break; 2314 case X86::IMUL32r: 2315 case X86::MUL32r: 2316 SrcReg = LoReg = X86::EAX; HiReg = X86::EDX; 2317 break; 2318 case X86::IMUL64r: 2319 case X86::MUL64r: 2320 SrcReg = LoReg = X86::RAX; HiReg = X86::RDX; 2321 break; 2322 case X86::MULX32rr: 2323 SrcReg = X86::EDX; LoReg = HiReg = 0; 2324 break; 2325 case X86::MULX64rr: 2326 SrcReg = X86::RDX; LoReg = HiReg = 0; 2327 break; 2328 } 2329 2330 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; 2331 bool foldedLoad = TryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4); 2332 // Multiply is commmutative. 2333 if (!foldedLoad) { 2334 foldedLoad = TryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4); 2335 if (foldedLoad) 2336 std::swap(N0, N1); 2337 } 2338 2339 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, SrcReg, 2340 N0, SDValue()).getValue(1); 2341 SDValue ResHi, ResLo; 2342 2343 if (foldedLoad) { 2344 SDValue Chain; 2345 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0), 2346 InFlag }; 2347 if (MOpc == X86::MULX32rm || MOpc == X86::MULX64rm) { 2348 SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::Other, MVT::Glue); 2349 SDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops); 2350 ResHi = SDValue(CNode, 0); 2351 ResLo = SDValue(CNode, 1); 2352 Chain = SDValue(CNode, 2); 2353 InFlag = SDValue(CNode, 3); 2354 } else { 2355 SDVTList VTs = CurDAG->getVTList(MVT::Other, MVT::Glue); 2356 SDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops); 2357 Chain = SDValue(CNode, 0); 2358 InFlag = SDValue(CNode, 1); 2359 } 2360 2361 // Update the chain. 2362 ReplaceUses(N1.getValue(1), Chain); 2363 } else { 2364 SDValue Ops[] = { N1, InFlag }; 2365 if (Opc == X86::MULX32rr || Opc == X86::MULX64rr) { 2366 SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::Glue); 2367 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops); 2368 ResHi = SDValue(CNode, 0); 2369 ResLo = SDValue(CNode, 1); 2370 InFlag = SDValue(CNode, 2); 2371 } else { 2372 SDVTList VTs = CurDAG->getVTList(MVT::Glue); 2373 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops); 2374 InFlag = SDValue(CNode, 0); 2375 } 2376 } 2377 2378 // Prevent use of AH in a REX instruction by referencing AX instead. 2379 if (HiReg == X86::AH && Subtarget->is64Bit() && 2380 !SDValue(Node, 1).use_empty()) { 2381 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 2382 X86::AX, MVT::i16, InFlag); 2383 InFlag = Result.getValue(2); 2384 // Get the low part if needed. Don't use getCopyFromReg for aliasing 2385 // registers. 2386 if (!SDValue(Node, 0).use_empty()) 2387 ReplaceUses(SDValue(Node, 1), 2388 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result)); 2389 2390 // Shift AX down 8 bits. 2391 Result = SDValue(CurDAG->getMachineNode(X86::SHR16ri, dl, MVT::i16, 2392 Result, 2393 CurDAG->getTargetConstant(8, MVT::i8)), 0); 2394 // Then truncate it down to i8. 2395 ReplaceUses(SDValue(Node, 1), 2396 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result)); 2397 } 2398 // Copy the low half of the result, if it is needed. 2399 if (!SDValue(Node, 0).use_empty()) { 2400 if (!ResLo.getNode()) { 2401 assert(LoReg && "Register for low half is not defined!"); 2402 ResLo = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, LoReg, NVT, 2403 InFlag); 2404 InFlag = ResLo.getValue(2); 2405 } 2406 ReplaceUses(SDValue(Node, 0), ResLo); 2407 DEBUG(dbgs() << "=> "; ResLo.getNode()->dump(CurDAG); dbgs() << '\n'); 2408 } 2409 // Copy the high half of the result, if it is needed. 2410 if (!SDValue(Node, 1).use_empty()) { 2411 if (!ResHi.getNode()) { 2412 assert(HiReg && "Register for high half is not defined!"); 2413 ResHi = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, HiReg, NVT, 2414 InFlag); 2415 InFlag = ResHi.getValue(2); 2416 } 2417 ReplaceUses(SDValue(Node, 1), ResHi); 2418 DEBUG(dbgs() << "=> "; ResHi.getNode()->dump(CurDAG); dbgs() << '\n'); 2419 } 2420 2421 return nullptr; 2422 } 2423 2424 case ISD::SDIVREM: 2425 case ISD::UDIVREM: 2426 case X86ISD::SDIVREM8_SEXT_HREG: 2427 case X86ISD::UDIVREM8_ZEXT_HREG: { 2428 SDValue N0 = Node->getOperand(0); 2429 SDValue N1 = Node->getOperand(1); 2430 2431 bool isSigned = (Opcode == ISD::SDIVREM || 2432 Opcode == X86ISD::SDIVREM8_SEXT_HREG); 2433 if (!isSigned) { 2434 switch (NVT.SimpleTy) { 2435 default: llvm_unreachable("Unsupported VT!"); 2436 case MVT::i8: Opc = X86::DIV8r; MOpc = X86::DIV8m; break; 2437 case MVT::i16: Opc = X86::DIV16r; MOpc = X86::DIV16m; break; 2438 case MVT::i32: Opc = X86::DIV32r; MOpc = X86::DIV32m; break; 2439 case MVT::i64: Opc = X86::DIV64r; MOpc = X86::DIV64m; break; 2440 } 2441 } else { 2442 switch (NVT.SimpleTy) { 2443 default: llvm_unreachable("Unsupported VT!"); 2444 case MVT::i8: Opc = X86::IDIV8r; MOpc = X86::IDIV8m; break; 2445 case MVT::i16: Opc = X86::IDIV16r; MOpc = X86::IDIV16m; break; 2446 case MVT::i32: Opc = X86::IDIV32r; MOpc = X86::IDIV32m; break; 2447 case MVT::i64: Opc = X86::IDIV64r; MOpc = X86::IDIV64m; break; 2448 } 2449 } 2450 2451 unsigned LoReg, HiReg, ClrReg; 2452 unsigned SExtOpcode; 2453 switch (NVT.SimpleTy) { 2454 default: llvm_unreachable("Unsupported VT!"); 2455 case MVT::i8: 2456 LoReg = X86::AL; ClrReg = HiReg = X86::AH; 2457 SExtOpcode = X86::CBW; 2458 break; 2459 case MVT::i16: 2460 LoReg = X86::AX; HiReg = X86::DX; 2461 ClrReg = X86::DX; 2462 SExtOpcode = X86::CWD; 2463 break; 2464 case MVT::i32: 2465 LoReg = X86::EAX; ClrReg = HiReg = X86::EDX; 2466 SExtOpcode = X86::CDQ; 2467 break; 2468 case MVT::i64: 2469 LoReg = X86::RAX; ClrReg = HiReg = X86::RDX; 2470 SExtOpcode = X86::CQO; 2471 break; 2472 } 2473 2474 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; 2475 bool foldedLoad = TryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4); 2476 bool signBitIsZero = CurDAG->SignBitIsZero(N0); 2477 2478 SDValue InFlag; 2479 if (NVT == MVT::i8 && (!isSigned || signBitIsZero)) { 2480 // Special case for div8, just use a move with zero extension to AX to 2481 // clear the upper 8 bits (AH). 2482 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Move, Chain; 2483 if (TryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) { 2484 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N0.getOperand(0) }; 2485 Move = 2486 SDValue(CurDAG->getMachineNode(X86::MOVZX32rm8, dl, MVT::i32, 2487 MVT::Other, Ops), 0); 2488 Chain = Move.getValue(1); 2489 ReplaceUses(N0.getValue(1), Chain); 2490 } else { 2491 Move = 2492 SDValue(CurDAG->getMachineNode(X86::MOVZX32rr8, dl, MVT::i32, N0),0); 2493 Chain = CurDAG->getEntryNode(); 2494 } 2495 Chain = CurDAG->getCopyToReg(Chain, dl, X86::EAX, Move, SDValue()); 2496 InFlag = Chain.getValue(1); 2497 } else { 2498 InFlag = 2499 CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, 2500 LoReg, N0, SDValue()).getValue(1); 2501 if (isSigned && !signBitIsZero) { 2502 // Sign extend the low part into the high part. 2503 InFlag = 2504 SDValue(CurDAG->getMachineNode(SExtOpcode, dl, MVT::Glue, InFlag),0); 2505 } else { 2506 // Zero out the high part, effectively zero extending the input. 2507 SDValue ClrNode = SDValue(CurDAG->getMachineNode(X86::MOV32r0, dl, NVT), 0); 2508 switch (NVT.SimpleTy) { 2509 case MVT::i16: 2510 ClrNode = 2511 SDValue(CurDAG->getMachineNode( 2512 TargetOpcode::EXTRACT_SUBREG, dl, MVT::i16, ClrNode, 2513 CurDAG->getTargetConstant(X86::sub_16bit, MVT::i32)), 2514 0); 2515 break; 2516 case MVT::i32: 2517 break; 2518 case MVT::i64: 2519 ClrNode = 2520 SDValue(CurDAG->getMachineNode( 2521 TargetOpcode::SUBREG_TO_REG, dl, MVT::i64, 2522 CurDAG->getTargetConstant(0, MVT::i64), ClrNode, 2523 CurDAG->getTargetConstant(X86::sub_32bit, MVT::i32)), 2524 0); 2525 break; 2526 default: 2527 llvm_unreachable("Unexpected division source"); 2528 } 2529 2530 InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, ClrReg, 2531 ClrNode, InFlag).getValue(1); 2532 } 2533 } 2534 2535 if (foldedLoad) { 2536 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0), 2537 InFlag }; 2538 SDNode *CNode = 2539 CurDAG->getMachineNode(MOpc, dl, MVT::Other, MVT::Glue, Ops); 2540 InFlag = SDValue(CNode, 1); 2541 // Update the chain. 2542 ReplaceUses(N1.getValue(1), SDValue(CNode, 0)); 2543 } else { 2544 InFlag = 2545 SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Glue, N1, InFlag), 0); 2546 } 2547 2548 // Prevent use of AH in a REX instruction by explicitly copying it to 2549 // an ABCD_L register. 2550 // 2551 // The current assumption of the register allocator is that isel 2552 // won't generate explicit references to the GR8_ABCD_H registers. If 2553 // the allocator and/or the backend get enhanced to be more robust in 2554 // that regard, this can be, and should be, removed. 2555 if (HiReg == X86::AH && !SDValue(Node, 1).use_empty()) { 2556 SDValue AHCopy = CurDAG->getRegister(X86::AH, MVT::i8); 2557 unsigned AHExtOpcode = 2558 isSigned ? X86::MOVSX32_NOREXrr8 : X86::MOVZX32_NOREXrr8; 2559 2560 SDNode *RNode = CurDAG->getMachineNode(AHExtOpcode, dl, MVT::i32, 2561 MVT::Glue, AHCopy, InFlag); 2562 SDValue Result(RNode, 0); 2563 InFlag = SDValue(RNode, 1); 2564 2565 if (Opcode == X86ISD::UDIVREM8_ZEXT_HREG || 2566 Opcode == X86ISD::SDIVREM8_SEXT_HREG) { 2567 if (Node->getValueType(1) == MVT::i64) { 2568 // It's not possible to directly movsx AH to a 64bit register, because 2569 // the latter needs the REX prefix, but the former can't have it. 2570 assert(Opcode != X86ISD::SDIVREM8_SEXT_HREG && 2571 "Unexpected i64 sext of h-register"); 2572 Result = 2573 SDValue(CurDAG->getMachineNode( 2574 TargetOpcode::SUBREG_TO_REG, dl, MVT::i64, 2575 CurDAG->getTargetConstant(0, MVT::i64), Result, 2576 CurDAG->getTargetConstant(X86::sub_32bit, MVT::i32)), 2577 0); 2578 } 2579 } else { 2580 Result = 2581 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result); 2582 } 2583 ReplaceUses(SDValue(Node, 1), Result); 2584 DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n'); 2585 } 2586 // Copy the division (low) result, if it is needed. 2587 if (!SDValue(Node, 0).use_empty()) { 2588 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 2589 LoReg, NVT, InFlag); 2590 InFlag = Result.getValue(2); 2591 ReplaceUses(SDValue(Node, 0), Result); 2592 DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n'); 2593 } 2594 // Copy the remainder (high) result, if it is needed. 2595 if (!SDValue(Node, 1).use_empty()) { 2596 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 2597 HiReg, NVT, InFlag); 2598 InFlag = Result.getValue(2); 2599 ReplaceUses(SDValue(Node, 1), Result); 2600 DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n'); 2601 } 2602 return nullptr; 2603 } 2604 2605 case X86ISD::CMP: 2606 case X86ISD::SUB: { 2607 // Sometimes a SUB is used to perform comparison. 2608 if (Opcode == X86ISD::SUB && Node->hasAnyUseOfValue(0)) 2609 // This node is not a CMP. 2610 break; 2611 SDValue N0 = Node->getOperand(0); 2612 SDValue N1 = Node->getOperand(1); 2613 2614 if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse() && 2615 HasNoSignedComparisonUses(Node)) { 2616 // Look for (X86cmp (truncate $op, i1), 0) and try to convert to a 2617 // smaller encoding 2618 if (Opcode == X86ISD::CMP && N0.getValueType() == MVT::i1 && 2619 X86::isZeroNode(N1)) { 2620 SDValue Reg = N0.getOperand(0); 2621 SDValue Imm = CurDAG->getTargetConstant(1, MVT::i8); 2622 2623 // Emit testb 2624 if (Reg.getScalarValueSizeInBits() > 8) 2625 Reg = CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Reg); 2626 // Emit a testb. 2627 SDNode *NewNode = CurDAG->getMachineNode(X86::TEST8ri, dl, MVT::i32, 2628 Reg, Imm); 2629 ReplaceUses(SDValue(Node, 0), SDValue(NewNode, 0)); 2630 return nullptr; 2631 } 2632 2633 N0 = N0.getOperand(0); 2634 } 2635 // Look for (X86cmp (and $op, $imm), 0) and see if we can convert it to 2636 // use a smaller encoding. 2637 // Look past the truncate if CMP is the only use of it. 2638 if ((N0.getNode()->getOpcode() == ISD::AND || 2639 (N0.getResNo() == 0 && N0.getNode()->getOpcode() == X86ISD::AND)) && 2640 N0.getNode()->hasOneUse() && 2641 N0.getValueType() != MVT::i8 && 2642 X86::isZeroNode(N1)) { 2643 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getNode()->getOperand(1)); 2644 if (!C) break; 2645 2646 // For example, convert "testl %eax, $8" to "testb %al, $8" 2647 if ((C->getZExtValue() & ~UINT64_C(0xff)) == 0 && 2648 (!(C->getZExtValue() & 0x80) || 2649 HasNoSignedComparisonUses(Node))) { 2650 SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i8); 2651 SDValue Reg = N0.getNode()->getOperand(0); 2652 2653 // On x86-32, only the ABCD registers have 8-bit subregisters. 2654 if (!Subtarget->is64Bit()) { 2655 const TargetRegisterClass *TRC; 2656 switch (N0.getSimpleValueType().SimpleTy) { 2657 case MVT::i32: TRC = &X86::GR32_ABCDRegClass; break; 2658 case MVT::i16: TRC = &X86::GR16_ABCDRegClass; break; 2659 default: llvm_unreachable("Unsupported TEST operand type!"); 2660 } 2661 SDValue RC = CurDAG->getTargetConstant(TRC->getID(), MVT::i32); 2662 Reg = SDValue(CurDAG->getMachineNode(X86::COPY_TO_REGCLASS, dl, 2663 Reg.getValueType(), Reg, RC), 0); 2664 } 2665 2666 // Extract the l-register. 2667 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, 2668 MVT::i8, Reg); 2669 2670 // Emit a testb. 2671 SDNode *NewNode = CurDAG->getMachineNode(X86::TEST8ri, dl, MVT::i32, 2672 Subreg, Imm); 2673 // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has 2674 // one, do not call ReplaceAllUsesWith. 2675 ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)), 2676 SDValue(NewNode, 0)); 2677 return nullptr; 2678 } 2679 2680 // For example, "testl %eax, $2048" to "testb %ah, $8". 2681 if ((C->getZExtValue() & ~UINT64_C(0xff00)) == 0 && 2682 (!(C->getZExtValue() & 0x8000) || 2683 HasNoSignedComparisonUses(Node))) { 2684 // Shift the immediate right by 8 bits. 2685 SDValue ShiftedImm = CurDAG->getTargetConstant(C->getZExtValue() >> 8, 2686 MVT::i8); 2687 SDValue Reg = N0.getNode()->getOperand(0); 2688 2689 // Put the value in an ABCD register. 2690 const TargetRegisterClass *TRC; 2691 switch (N0.getSimpleValueType().SimpleTy) { 2692 case MVT::i64: TRC = &X86::GR64_ABCDRegClass; break; 2693 case MVT::i32: TRC = &X86::GR32_ABCDRegClass; break; 2694 case MVT::i16: TRC = &X86::GR16_ABCDRegClass; break; 2695 default: llvm_unreachable("Unsupported TEST operand type!"); 2696 } 2697 SDValue RC = CurDAG->getTargetConstant(TRC->getID(), MVT::i32); 2698 Reg = SDValue(CurDAG->getMachineNode(X86::COPY_TO_REGCLASS, dl, 2699 Reg.getValueType(), Reg, RC), 0); 2700 2701 // Extract the h-register. 2702 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_8bit_hi, dl, 2703 MVT::i8, Reg); 2704 2705 // Emit a testb. The EXTRACT_SUBREG becomes a COPY that can only 2706 // target GR8_NOREX registers, so make sure the register class is 2707 // forced. 2708 SDNode *NewNode = CurDAG->getMachineNode(X86::TEST8ri_NOREX, dl, 2709 MVT::i32, Subreg, ShiftedImm); 2710 // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has 2711 // one, do not call ReplaceAllUsesWith. 2712 ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)), 2713 SDValue(NewNode, 0)); 2714 return nullptr; 2715 } 2716 2717 // For example, "testl %eax, $32776" to "testw %ax, $32776". 2718 if ((C->getZExtValue() & ~UINT64_C(0xffff)) == 0 && 2719 N0.getValueType() != MVT::i16 && 2720 (!(C->getZExtValue() & 0x8000) || 2721 HasNoSignedComparisonUses(Node))) { 2722 SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i16); 2723 SDValue Reg = N0.getNode()->getOperand(0); 2724 2725 // Extract the 16-bit subregister. 2726 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_16bit, dl, 2727 MVT::i16, Reg); 2728 2729 // Emit a testw. 2730 SDNode *NewNode = CurDAG->getMachineNode(X86::TEST16ri, dl, MVT::i32, 2731 Subreg, Imm); 2732 // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has 2733 // one, do not call ReplaceAllUsesWith. 2734 ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)), 2735 SDValue(NewNode, 0)); 2736 return nullptr; 2737 } 2738 2739 // For example, "testq %rax, $268468232" to "testl %eax, $268468232". 2740 if ((C->getZExtValue() & ~UINT64_C(0xffffffff)) == 0 && 2741 N0.getValueType() == MVT::i64 && 2742 (!(C->getZExtValue() & 0x80000000) || 2743 HasNoSignedComparisonUses(Node))) { 2744 SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32); 2745 SDValue Reg = N0.getNode()->getOperand(0); 2746 2747 // Extract the 32-bit subregister. 2748 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_32bit, dl, 2749 MVT::i32, Reg); 2750 2751 // Emit a testl. 2752 SDNode *NewNode = CurDAG->getMachineNode(X86::TEST32ri, dl, MVT::i32, 2753 Subreg, Imm); 2754 // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has 2755 // one, do not call ReplaceAllUsesWith. 2756 ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)), 2757 SDValue(NewNode, 0)); 2758 return nullptr; 2759 } 2760 } 2761 break; 2762 } 2763 case ISD::STORE: { 2764 // Change a chain of {load; incr or dec; store} of the same value into 2765 // a simple increment or decrement through memory of that value, if the 2766 // uses of the modified value and its address are suitable. 2767 // The DEC64m tablegen pattern is currently not able to match the case where 2768 // the EFLAGS on the original DEC are used. (This also applies to 2769 // {INC,DEC}X{64,32,16,8}.) 2770 // We'll need to improve tablegen to allow flags to be transferred from a 2771 // node in the pattern to the result node. probably with a new keyword 2772 // for example, we have this 2773 // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst", 2774 // [(store (add (loadi64 addr:$dst), -1), addr:$dst), 2775 // (implicit EFLAGS)]>; 2776 // but maybe need something like this 2777 // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst", 2778 // [(store (add (loadi64 addr:$dst), -1), addr:$dst), 2779 // (transferrable EFLAGS)]>; 2780 2781 StoreSDNode *StoreNode = cast<StoreSDNode>(Node); 2782 SDValue StoredVal = StoreNode->getOperand(1); 2783 unsigned Opc = StoredVal->getOpcode(); 2784 2785 LoadSDNode *LoadNode = nullptr; 2786 SDValue InputChain; 2787 if (!isLoadIncOrDecStore(StoreNode, Opc, StoredVal, CurDAG, 2788 LoadNode, InputChain)) 2789 break; 2790 2791 SDValue Base, Scale, Index, Disp, Segment; 2792 if (!SelectAddr(LoadNode, LoadNode->getBasePtr(), 2793 Base, Scale, Index, Disp, Segment)) 2794 break; 2795 2796 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(2); 2797 MemOp[0] = StoreNode->getMemOperand(); 2798 MemOp[1] = LoadNode->getMemOperand(); 2799 const SDValue Ops[] = { Base, Scale, Index, Disp, Segment, InputChain }; 2800 EVT LdVT = LoadNode->getMemoryVT(); 2801 unsigned newOpc = getFusedLdStOpcode(LdVT, Opc); 2802 MachineSDNode *Result = CurDAG->getMachineNode(newOpc, 2803 SDLoc(Node), 2804 MVT::i32, MVT::Other, Ops); 2805 Result->setMemRefs(MemOp, MemOp + 2); 2806 2807 ReplaceUses(SDValue(StoreNode, 0), SDValue(Result, 1)); 2808 ReplaceUses(SDValue(StoredVal.getNode(), 1), SDValue(Result, 0)); 2809 2810 return Result; 2811 } 2812 } 2813 2814 SDNode *ResNode = SelectCode(Node); 2815 2816 DEBUG(dbgs() << "=> "; 2817 if (ResNode == nullptr || ResNode == Node) 2818 Node->dump(CurDAG); 2819 else 2820 ResNode->dump(CurDAG); 2821 dbgs() << '\n'); 2822 2823 return ResNode; 2824} 2825 2826bool X86DAGToDAGISel:: 2827SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode, 2828 std::vector<SDValue> &OutOps) { 2829 SDValue Op0, Op1, Op2, Op3, Op4; 2830 switch (ConstraintCode) { 2831 case 'o': // offsetable ?? 2832 case 'v': // not offsetable ?? 2833 default: return true; 2834 case 'm': // memory 2835 if (!SelectAddr(nullptr, Op, Op0, Op1, Op2, Op3, Op4)) 2836 return true; 2837 break; 2838 } 2839 2840 OutOps.push_back(Op0); 2841 OutOps.push_back(Op1); 2842 OutOps.push_back(Op2); 2843 OutOps.push_back(Op3); 2844 OutOps.push_back(Op4); 2845 return false; 2846} 2847 2848/// createX86ISelDag - This pass converts a legalized DAG into a 2849/// X86-specific DAG, ready for instruction scheduling. 2850/// 2851FunctionPass *llvm::createX86ISelDag(X86TargetMachine &TM, 2852 CodeGenOpt::Level OptLevel) { 2853 return new X86DAGToDAGISel(TM, OptLevel); 2854} 2855