X86ISelDAGToDAG.cpp revision 4d3ace4da0a000428ad5baea72c82e585fcd531c
1//===- X86ISelDAGToDAG.cpp - A DAG pattern matching inst selector for X86 -===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines a DAG pattern matching instruction selector for X86, 11// converting from a legalized dag to a X86 dag. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "x86-isel" 16#include "X86.h" 17#include "X86InstrBuilder.h" 18#include "X86MachineFunctionInfo.h" 19#include "X86RegisterInfo.h" 20#include "X86Subtarget.h" 21#include "X86TargetMachine.h" 22#include "llvm/ADT/Statistic.h" 23#include "llvm/CodeGen/MachineFrameInfo.h" 24#include "llvm/CodeGen/MachineFunction.h" 25#include "llvm/CodeGen/MachineInstrBuilder.h" 26#include "llvm/CodeGen/MachineRegisterInfo.h" 27#include "llvm/CodeGen/SelectionDAGISel.h" 28#include "llvm/IR/Instructions.h" 29#include "llvm/IR/Intrinsics.h" 30#include "llvm/IR/Type.h" 31#include "llvm/Support/Debug.h" 32#include "llvm/Support/ErrorHandling.h" 33#include "llvm/Support/MathExtras.h" 34#include "llvm/Support/raw_ostream.h" 35#include "llvm/Target/TargetMachine.h" 36#include "llvm/Target/TargetOptions.h" 37using namespace llvm; 38 39STATISTIC(NumLoadMoved, "Number of loads moved below TokenFactor"); 40 41//===----------------------------------------------------------------------===// 42// Pattern Matcher Implementation 43//===----------------------------------------------------------------------===// 44 45namespace { 46 /// X86ISelAddressMode - This corresponds to X86AddressMode, but uses 47 /// SDValue's instead of register numbers for the leaves of the matched 48 /// tree. 49 struct X86ISelAddressMode { 50 enum { 51 RegBase, 52 FrameIndexBase 53 } BaseType; 54 55 // This is really a union, discriminated by BaseType! 56 SDValue Base_Reg; 57 int Base_FrameIndex; 58 59 unsigned Scale; 60 SDValue IndexReg; 61 int32_t Disp; 62 SDValue Segment; 63 const GlobalValue *GV; 64 const Constant *CP; 65 const BlockAddress *BlockAddr; 66 const char *ES; 67 int JT; 68 unsigned Align; // CP alignment. 69 unsigned char SymbolFlags; // X86II::MO_* 70 71 X86ISelAddressMode() 72 : BaseType(RegBase), Base_FrameIndex(0), Scale(1), IndexReg(), Disp(0), 73 Segment(), GV(0), CP(0), BlockAddr(0), ES(0), JT(-1), Align(0), 74 SymbolFlags(X86II::MO_NO_FLAG) { 75 } 76 77 bool hasSymbolicDisplacement() const { 78 return GV != 0 || CP != 0 || ES != 0 || JT != -1 || BlockAddr != 0; 79 } 80 81 bool hasBaseOrIndexReg() const { 82 return IndexReg.getNode() != 0 || Base_Reg.getNode() != 0; 83 } 84 85 /// isRIPRelative - Return true if this addressing mode is already RIP 86 /// relative. 87 bool isRIPRelative() const { 88 if (BaseType != RegBase) return false; 89 if (RegisterSDNode *RegNode = 90 dyn_cast_or_null<RegisterSDNode>(Base_Reg.getNode())) 91 return RegNode->getReg() == X86::RIP; 92 return false; 93 } 94 95 void setBaseReg(SDValue Reg) { 96 BaseType = RegBase; 97 Base_Reg = Reg; 98 } 99 100#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 101 void dump() { 102 dbgs() << "X86ISelAddressMode " << this << '\n'; 103 dbgs() << "Base_Reg "; 104 if (Base_Reg.getNode() != 0) 105 Base_Reg.getNode()->dump(); 106 else 107 dbgs() << "nul"; 108 dbgs() << " Base.FrameIndex " << Base_FrameIndex << '\n' 109 << " Scale" << Scale << '\n' 110 << "IndexReg "; 111 if (IndexReg.getNode() != 0) 112 IndexReg.getNode()->dump(); 113 else 114 dbgs() << "nul"; 115 dbgs() << " Disp " << Disp << '\n' 116 << "GV "; 117 if (GV) 118 GV->dump(); 119 else 120 dbgs() << "nul"; 121 dbgs() << " CP "; 122 if (CP) 123 CP->dump(); 124 else 125 dbgs() << "nul"; 126 dbgs() << '\n' 127 << "ES "; 128 if (ES) 129 dbgs() << ES; 130 else 131 dbgs() << "nul"; 132 dbgs() << " JT" << JT << " Align" << Align << '\n'; 133 } 134#endif 135 }; 136} 137 138namespace { 139 //===--------------------------------------------------------------------===// 140 /// ISel - X86 specific code to select X86 machine instructions for 141 /// SelectionDAG operations. 142 /// 143 class X86DAGToDAGISel : public SelectionDAGISel { 144 /// X86Lowering - This object fully describes how to lower LLVM code to an 145 /// X86-specific SelectionDAG. 146 const X86TargetLowering &X86Lowering; 147 148 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can 149 /// make the right decision when generating code for different targets. 150 const X86Subtarget *Subtarget; 151 152 /// OptForSize - If true, selector should try to optimize for code size 153 /// instead of performance. 154 bool OptForSize; 155 156 public: 157 explicit X86DAGToDAGISel(X86TargetMachine &tm, CodeGenOpt::Level OptLevel) 158 : SelectionDAGISel(tm, OptLevel), 159 X86Lowering(*tm.getTargetLowering()), 160 Subtarget(&tm.getSubtarget<X86Subtarget>()), 161 OptForSize(false) {} 162 163 virtual const char *getPassName() const { 164 return "X86 DAG->DAG Instruction Selection"; 165 } 166 167 virtual void EmitFunctionEntryCode(); 168 169 virtual bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const; 170 171 virtual void PreprocessISelDAG(); 172 173 inline bool immSext8(SDNode *N) const { 174 return isInt<8>(cast<ConstantSDNode>(N)->getSExtValue()); 175 } 176 177 // i64immSExt32 predicate - True if the 64-bit immediate fits in a 32-bit 178 // sign extended field. 179 inline bool i64immSExt32(SDNode *N) const { 180 uint64_t v = cast<ConstantSDNode>(N)->getZExtValue(); 181 return (int64_t)v == (int32_t)v; 182 } 183 184// Include the pieces autogenerated from the target description. 185#include "X86GenDAGISel.inc" 186 187 private: 188 SDNode *Select(SDNode *N); 189 SDNode *SelectGather(SDNode *N, unsigned Opc); 190 SDNode *SelectAtomic64(SDNode *Node, unsigned Opc); 191 SDNode *SelectAtomicLoadArith(SDNode *Node, EVT NVT); 192 193 bool FoldOffsetIntoAddress(uint64_t Offset, X86ISelAddressMode &AM); 194 bool MatchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM); 195 bool MatchWrapper(SDValue N, X86ISelAddressMode &AM); 196 bool MatchAddress(SDValue N, X86ISelAddressMode &AM); 197 bool MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM, 198 unsigned Depth); 199 bool MatchAddressBase(SDValue N, X86ISelAddressMode &AM); 200 bool SelectAddr(SDNode *Parent, SDValue N, SDValue &Base, 201 SDValue &Scale, SDValue &Index, SDValue &Disp, 202 SDValue &Segment); 203 bool SelectMOV64Imm32(SDValue N, SDValue &Imm); 204 bool SelectLEAAddr(SDValue N, SDValue &Base, 205 SDValue &Scale, SDValue &Index, SDValue &Disp, 206 SDValue &Segment); 207 bool SelectLEA64_32Addr(SDValue N, SDValue &Base, 208 SDValue &Scale, SDValue &Index, SDValue &Disp, 209 SDValue &Segment); 210 bool SelectTLSADDRAddr(SDValue N, SDValue &Base, 211 SDValue &Scale, SDValue &Index, SDValue &Disp, 212 SDValue &Segment); 213 bool SelectScalarSSELoad(SDNode *Root, SDValue N, 214 SDValue &Base, SDValue &Scale, 215 SDValue &Index, SDValue &Disp, 216 SDValue &Segment, 217 SDValue &NodeWithChain); 218 219 bool TryFoldLoad(SDNode *P, SDValue N, 220 SDValue &Base, SDValue &Scale, 221 SDValue &Index, SDValue &Disp, 222 SDValue &Segment); 223 224 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for 225 /// inline asm expressions. 226 virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op, 227 char ConstraintCode, 228 std::vector<SDValue> &OutOps); 229 230 void EmitSpecialCodeForMain(MachineBasicBlock *BB, MachineFrameInfo *MFI); 231 232 inline void getAddressOperands(X86ISelAddressMode &AM, SDValue &Base, 233 SDValue &Scale, SDValue &Index, 234 SDValue &Disp, SDValue &Segment) { 235 Base = (AM.BaseType == X86ISelAddressMode::FrameIndexBase) ? 236 CurDAG->getTargetFrameIndex(AM.Base_FrameIndex, TLI.getPointerTy()) : 237 AM.Base_Reg; 238 Scale = getI8Imm(AM.Scale); 239 Index = AM.IndexReg; 240 // These are 32-bit even in 64-bit mode since RIP relative offset 241 // is 32-bit. 242 if (AM.GV) 243 Disp = CurDAG->getTargetGlobalAddress(AM.GV, SDLoc(), 244 MVT::i32, AM.Disp, 245 AM.SymbolFlags); 246 else if (AM.CP) 247 Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i32, 248 AM.Align, AM.Disp, AM.SymbolFlags); 249 else if (AM.ES) { 250 assert(!AM.Disp && "Non-zero displacement is ignored with ES."); 251 Disp = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i32, AM.SymbolFlags); 252 } else if (AM.JT != -1) { 253 assert(!AM.Disp && "Non-zero displacement is ignored with JT."); 254 Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i32, AM.SymbolFlags); 255 } else if (AM.BlockAddr) 256 Disp = CurDAG->getTargetBlockAddress(AM.BlockAddr, MVT::i32, AM.Disp, 257 AM.SymbolFlags); 258 else 259 Disp = CurDAG->getTargetConstant(AM.Disp, MVT::i32); 260 261 if (AM.Segment.getNode()) 262 Segment = AM.Segment; 263 else 264 Segment = CurDAG->getRegister(0, MVT::i32); 265 } 266 267 /// getI8Imm - Return a target constant with the specified value, of type 268 /// i8. 269 inline SDValue getI8Imm(unsigned Imm) { 270 return CurDAG->getTargetConstant(Imm, MVT::i8); 271 } 272 273 /// getI32Imm - Return a target constant with the specified value, of type 274 /// i32. 275 inline SDValue getI32Imm(unsigned Imm) { 276 return CurDAG->getTargetConstant(Imm, MVT::i32); 277 } 278 279 /// getGlobalBaseReg - Return an SDNode that returns the value of 280 /// the global base register. Output instructions required to 281 /// initialize the global base register, if necessary. 282 /// 283 SDNode *getGlobalBaseReg(); 284 285 /// getTargetMachine - Return a reference to the TargetMachine, casted 286 /// to the target-specific type. 287 const X86TargetMachine &getTargetMachine() const { 288 return static_cast<const X86TargetMachine &>(TM); 289 } 290 291 /// getInstrInfo - Return a reference to the TargetInstrInfo, casted 292 /// to the target-specific type. 293 const X86InstrInfo *getInstrInfo() const { 294 return getTargetMachine().getInstrInfo(); 295 } 296 }; 297} 298 299 300bool 301X86DAGToDAGISel::IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const { 302 if (OptLevel == CodeGenOpt::None) return false; 303 304 if (!N.hasOneUse()) 305 return false; 306 307 if (N.getOpcode() != ISD::LOAD) 308 return true; 309 310 // If N is a load, do additional profitability checks. 311 if (U == Root) { 312 switch (U->getOpcode()) { 313 default: break; 314 case X86ISD::ADD: 315 case X86ISD::SUB: 316 case X86ISD::AND: 317 case X86ISD::XOR: 318 case X86ISD::OR: 319 case ISD::ADD: 320 case ISD::ADDC: 321 case ISD::ADDE: 322 case ISD::AND: 323 case ISD::OR: 324 case ISD::XOR: { 325 SDValue Op1 = U->getOperand(1); 326 327 // If the other operand is a 8-bit immediate we should fold the immediate 328 // instead. This reduces code size. 329 // e.g. 330 // movl 4(%esp), %eax 331 // addl $4, %eax 332 // vs. 333 // movl $4, %eax 334 // addl 4(%esp), %eax 335 // The former is 2 bytes shorter. In case where the increment is 1, then 336 // the saving can be 4 bytes (by using incl %eax). 337 if (ConstantSDNode *Imm = dyn_cast<ConstantSDNode>(Op1)) 338 if (Imm->getAPIntValue().isSignedIntN(8)) 339 return false; 340 341 // If the other operand is a TLS address, we should fold it instead. 342 // This produces 343 // movl %gs:0, %eax 344 // leal i@NTPOFF(%eax), %eax 345 // instead of 346 // movl $i@NTPOFF, %eax 347 // addl %gs:0, %eax 348 // if the block also has an access to a second TLS address this will save 349 // a load. 350 // FIXME: This is probably also true for non TLS addresses. 351 if (Op1.getOpcode() == X86ISD::Wrapper) { 352 SDValue Val = Op1.getOperand(0); 353 if (Val.getOpcode() == ISD::TargetGlobalTLSAddress) 354 return false; 355 } 356 } 357 } 358 } 359 360 return true; 361} 362 363/// MoveBelowCallOrigChain - Replace the original chain operand of the call with 364/// load's chain operand and move load below the call's chain operand. 365static void MoveBelowOrigChain(SelectionDAG *CurDAG, SDValue Load, 366 SDValue Call, SDValue OrigChain) { 367 SmallVector<SDValue, 8> Ops; 368 SDValue Chain = OrigChain.getOperand(0); 369 if (Chain.getNode() == Load.getNode()) 370 Ops.push_back(Load.getOperand(0)); 371 else { 372 assert(Chain.getOpcode() == ISD::TokenFactor && 373 "Unexpected chain operand"); 374 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i) 375 if (Chain.getOperand(i).getNode() == Load.getNode()) 376 Ops.push_back(Load.getOperand(0)); 377 else 378 Ops.push_back(Chain.getOperand(i)); 379 SDValue NewChain = 380 CurDAG->getNode(ISD::TokenFactor, SDLoc(Load), 381 MVT::Other, &Ops[0], Ops.size()); 382 Ops.clear(); 383 Ops.push_back(NewChain); 384 } 385 for (unsigned i = 1, e = OrigChain.getNumOperands(); i != e; ++i) 386 Ops.push_back(OrigChain.getOperand(i)); 387 CurDAG->UpdateNodeOperands(OrigChain.getNode(), &Ops[0], Ops.size()); 388 CurDAG->UpdateNodeOperands(Load.getNode(), Call.getOperand(0), 389 Load.getOperand(1), Load.getOperand(2)); 390 391 unsigned NumOps = Call.getNode()->getNumOperands(); 392 Ops.clear(); 393 Ops.push_back(SDValue(Load.getNode(), 1)); 394 for (unsigned i = 1, e = NumOps; i != e; ++i) 395 Ops.push_back(Call.getOperand(i)); 396 CurDAG->UpdateNodeOperands(Call.getNode(), &Ops[0], NumOps); 397} 398 399/// isCalleeLoad - Return true if call address is a load and it can be 400/// moved below CALLSEQ_START and the chains leading up to the call. 401/// Return the CALLSEQ_START by reference as a second output. 402/// In the case of a tail call, there isn't a callseq node between the call 403/// chain and the load. 404static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) { 405 // The transformation is somewhat dangerous if the call's chain was glued to 406 // the call. After MoveBelowOrigChain the load is moved between the call and 407 // the chain, this can create a cycle if the load is not folded. So it is 408 // *really* important that we are sure the load will be folded. 409 if (Callee.getNode() == Chain.getNode() || !Callee.hasOneUse()) 410 return false; 411 LoadSDNode *LD = dyn_cast<LoadSDNode>(Callee.getNode()); 412 if (!LD || 413 LD->isVolatile() || 414 LD->getAddressingMode() != ISD::UNINDEXED || 415 LD->getExtensionType() != ISD::NON_EXTLOAD) 416 return false; 417 418 // Now let's find the callseq_start. 419 while (HasCallSeq && Chain.getOpcode() != ISD::CALLSEQ_START) { 420 if (!Chain.hasOneUse()) 421 return false; 422 Chain = Chain.getOperand(0); 423 } 424 425 if (!Chain.getNumOperands()) 426 return false; 427 // Since we are not checking for AA here, conservatively abort if the chain 428 // writes to memory. It's not safe to move the callee (a load) across a store. 429 if (isa<MemSDNode>(Chain.getNode()) && 430 cast<MemSDNode>(Chain.getNode())->writeMem()) 431 return false; 432 if (Chain.getOperand(0).getNode() == Callee.getNode()) 433 return true; 434 if (Chain.getOperand(0).getOpcode() == ISD::TokenFactor && 435 Callee.getValue(1).isOperandOf(Chain.getOperand(0).getNode()) && 436 Callee.getValue(1).hasOneUse()) 437 return true; 438 return false; 439} 440 441void X86DAGToDAGISel::PreprocessISelDAG() { 442 // OptForSize is used in pattern predicates that isel is matching. 443 OptForSize = MF->getFunction()->getAttributes(). 444 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize); 445 446 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(), 447 E = CurDAG->allnodes_end(); I != E; ) { 448 SDNode *N = I++; // Preincrement iterator to avoid invalidation issues. 449 450 if (OptLevel != CodeGenOpt::None && 451 // Only does this when target favors doesn't favor register indirect 452 // call. 453 ((N->getOpcode() == X86ISD::CALL && !Subtarget->callRegIndirect()) || 454 (N->getOpcode() == X86ISD::TC_RETURN && 455 // Only does this if load can be folded into TC_RETURN. 456 (Subtarget->is64Bit() || 457 getTargetMachine().getRelocationModel() != Reloc::PIC_)))) { 458 /// Also try moving call address load from outside callseq_start to just 459 /// before the call to allow it to be folded. 460 /// 461 /// [Load chain] 462 /// ^ 463 /// | 464 /// [Load] 465 /// ^ ^ 466 /// | | 467 /// / \-- 468 /// / | 469 ///[CALLSEQ_START] | 470 /// ^ | 471 /// | | 472 /// [LOAD/C2Reg] | 473 /// | | 474 /// \ / 475 /// \ / 476 /// [CALL] 477 bool HasCallSeq = N->getOpcode() == X86ISD::CALL; 478 SDValue Chain = N->getOperand(0); 479 SDValue Load = N->getOperand(1); 480 if (!isCalleeLoad(Load, Chain, HasCallSeq)) 481 continue; 482 MoveBelowOrigChain(CurDAG, Load, SDValue(N, 0), Chain); 483 ++NumLoadMoved; 484 continue; 485 } 486 487 // Lower fpround and fpextend nodes that target the FP stack to be store and 488 // load to the stack. This is a gross hack. We would like to simply mark 489 // these as being illegal, but when we do that, legalize produces these when 490 // it expands calls, then expands these in the same legalize pass. We would 491 // like dag combine to be able to hack on these between the call expansion 492 // and the node legalization. As such this pass basically does "really 493 // late" legalization of these inline with the X86 isel pass. 494 // FIXME: This should only happen when not compiled with -O0. 495 if (N->getOpcode() != ISD::FP_ROUND && N->getOpcode() != ISD::FP_EXTEND) 496 continue; 497 498 EVT SrcVT = N->getOperand(0).getValueType(); 499 EVT DstVT = N->getValueType(0); 500 501 // If any of the sources are vectors, no fp stack involved. 502 if (SrcVT.isVector() || DstVT.isVector()) 503 continue; 504 505 // If the source and destination are SSE registers, then this is a legal 506 // conversion that should not be lowered. 507 bool SrcIsSSE = X86Lowering.isScalarFPTypeInSSEReg(SrcVT); 508 bool DstIsSSE = X86Lowering.isScalarFPTypeInSSEReg(DstVT); 509 if (SrcIsSSE && DstIsSSE) 510 continue; 511 512 if (!SrcIsSSE && !DstIsSSE) { 513 // If this is an FPStack extension, it is a noop. 514 if (N->getOpcode() == ISD::FP_EXTEND) 515 continue; 516 // If this is a value-preserving FPStack truncation, it is a noop. 517 if (N->getConstantOperandVal(1)) 518 continue; 519 } 520 521 // Here we could have an FP stack truncation or an FPStack <-> SSE convert. 522 // FPStack has extload and truncstore. SSE can fold direct loads into other 523 // operations. Based on this, decide what we want to do. 524 EVT MemVT; 525 if (N->getOpcode() == ISD::FP_ROUND) 526 MemVT = DstVT; // FP_ROUND must use DstVT, we can't do a 'trunc load'. 527 else 528 MemVT = SrcIsSSE ? SrcVT : DstVT; 529 530 SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT); 531 SDLoc dl(N); 532 533 // FIXME: optimize the case where the src/dest is a load or store? 534 SDValue Store = CurDAG->getTruncStore(CurDAG->getEntryNode(), dl, 535 N->getOperand(0), 536 MemTmp, MachinePointerInfo(), MemVT, 537 false, false, 0); 538 SDValue Result = CurDAG->getExtLoad(ISD::EXTLOAD, dl, DstVT, Store, MemTmp, 539 MachinePointerInfo(), 540 MemVT, false, false, 0); 541 542 // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the 543 // extload we created. This will cause general havok on the dag because 544 // anything below the conversion could be folded into other existing nodes. 545 // To avoid invalidating 'I', back it up to the convert node. 546 --I; 547 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result); 548 549 // Now that we did that, the node is dead. Increment the iterator to the 550 // next node to process, then delete N. 551 ++I; 552 CurDAG->DeleteNode(N); 553 } 554} 555 556 557/// EmitSpecialCodeForMain - Emit any code that needs to be executed only in 558/// the main function. 559void X86DAGToDAGISel::EmitSpecialCodeForMain(MachineBasicBlock *BB, 560 MachineFrameInfo *MFI) { 561 const TargetInstrInfo *TII = TM.getInstrInfo(); 562 if (Subtarget->isTargetCygMing()) { 563 unsigned CallOp = 564 Subtarget->is64Bit() ? X86::CALL64pcrel32 : X86::CALLpcrel32; 565 BuildMI(BB, DebugLoc(), 566 TII->get(CallOp)).addExternalSymbol("__main"); 567 } 568} 569 570void X86DAGToDAGISel::EmitFunctionEntryCode() { 571 // If this is main, emit special code for main. 572 if (const Function *Fn = MF->getFunction()) 573 if (Fn->hasExternalLinkage() && Fn->getName() == "main") 574 EmitSpecialCodeForMain(MF->begin(), MF->getFrameInfo()); 575} 576 577static bool isDispSafeForFrameIndex(int64_t Val) { 578 // On 64-bit platforms, we can run into an issue where a frame index 579 // includes a displacement that, when added to the explicit displacement, 580 // will overflow the displacement field. Assuming that the frame index 581 // displacement fits into a 31-bit integer (which is only slightly more 582 // aggressive than the current fundamental assumption that it fits into 583 // a 32-bit integer), a 31-bit disp should always be safe. 584 return isInt<31>(Val); 585} 586 587bool X86DAGToDAGISel::FoldOffsetIntoAddress(uint64_t Offset, 588 X86ISelAddressMode &AM) { 589 int64_t Val = AM.Disp + Offset; 590 CodeModel::Model M = TM.getCodeModel(); 591 if (Subtarget->is64Bit()) { 592 if (!X86::isOffsetSuitableForCodeModel(Val, M, 593 AM.hasSymbolicDisplacement())) 594 return true; 595 // In addition to the checks required for a register base, check that 596 // we do not try to use an unsafe Disp with a frame index. 597 if (AM.BaseType == X86ISelAddressMode::FrameIndexBase && 598 !isDispSafeForFrameIndex(Val)) 599 return true; 600 } 601 AM.Disp = Val; 602 return false; 603 604} 605 606bool X86DAGToDAGISel::MatchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM){ 607 SDValue Address = N->getOperand(1); 608 609 // load gs:0 -> GS segment register. 610 // load fs:0 -> FS segment register. 611 // 612 // This optimization is valid because the GNU TLS model defines that 613 // gs:0 (or fs:0 on X86-64) contains its own address. 614 // For more information see http://people.redhat.com/drepper/tls.pdf 615 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Address)) 616 if (C->getSExtValue() == 0 && AM.Segment.getNode() == 0 && 617 Subtarget->isTargetLinux()) 618 switch (N->getPointerInfo().getAddrSpace()) { 619 case 256: 620 AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16); 621 return false; 622 case 257: 623 AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16); 624 return false; 625 } 626 627 return true; 628} 629 630/// MatchWrapper - Try to match X86ISD::Wrapper and X86ISD::WrapperRIP nodes 631/// into an addressing mode. These wrap things that will resolve down into a 632/// symbol reference. If no match is possible, this returns true, otherwise it 633/// returns false. 634bool X86DAGToDAGISel::MatchWrapper(SDValue N, X86ISelAddressMode &AM) { 635 // If the addressing mode already has a symbol as the displacement, we can 636 // never match another symbol. 637 if (AM.hasSymbolicDisplacement()) 638 return true; 639 640 SDValue N0 = N.getOperand(0); 641 CodeModel::Model M = TM.getCodeModel(); 642 643 // Handle X86-64 rip-relative addresses. We check this before checking direct 644 // folding because RIP is preferable to non-RIP accesses. 645 if (Subtarget->is64Bit() && N.getOpcode() == X86ISD::WrapperRIP && 646 // Under X86-64 non-small code model, GV (and friends) are 64-bits, so 647 // they cannot be folded into immediate fields. 648 // FIXME: This can be improved for kernel and other models? 649 (M == CodeModel::Small || M == CodeModel::Kernel)) { 650 // Base and index reg must be 0 in order to use %rip as base. 651 if (AM.hasBaseOrIndexReg()) 652 return true; 653 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) { 654 X86ISelAddressMode Backup = AM; 655 AM.GV = G->getGlobal(); 656 AM.SymbolFlags = G->getTargetFlags(); 657 if (FoldOffsetIntoAddress(G->getOffset(), AM)) { 658 AM = Backup; 659 return true; 660 } 661 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) { 662 X86ISelAddressMode Backup = AM; 663 AM.CP = CP->getConstVal(); 664 AM.Align = CP->getAlignment(); 665 AM.SymbolFlags = CP->getTargetFlags(); 666 if (FoldOffsetIntoAddress(CP->getOffset(), AM)) { 667 AM = Backup; 668 return true; 669 } 670 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) { 671 AM.ES = S->getSymbol(); 672 AM.SymbolFlags = S->getTargetFlags(); 673 } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) { 674 AM.JT = J->getIndex(); 675 AM.SymbolFlags = J->getTargetFlags(); 676 } else if (BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(N0)) { 677 X86ISelAddressMode Backup = AM; 678 AM.BlockAddr = BA->getBlockAddress(); 679 AM.SymbolFlags = BA->getTargetFlags(); 680 if (FoldOffsetIntoAddress(BA->getOffset(), AM)) { 681 AM = Backup; 682 return true; 683 } 684 } else 685 llvm_unreachable("Unhandled symbol reference node."); 686 687 if (N.getOpcode() == X86ISD::WrapperRIP) 688 AM.setBaseReg(CurDAG->getRegister(X86::RIP, MVT::i64)); 689 return false; 690 } 691 692 // Handle the case when globals fit in our immediate field: This is true for 693 // X86-32 always and X86-64 when in -mcmodel=small mode. In 64-bit 694 // mode, this only applies to a non-RIP-relative computation. 695 if (!Subtarget->is64Bit() || 696 M == CodeModel::Small || M == CodeModel::Kernel) { 697 assert(N.getOpcode() != X86ISD::WrapperRIP && 698 "RIP-relative addressing already handled"); 699 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) { 700 AM.GV = G->getGlobal(); 701 AM.Disp += G->getOffset(); 702 AM.SymbolFlags = G->getTargetFlags(); 703 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) { 704 AM.CP = CP->getConstVal(); 705 AM.Align = CP->getAlignment(); 706 AM.Disp += CP->getOffset(); 707 AM.SymbolFlags = CP->getTargetFlags(); 708 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) { 709 AM.ES = S->getSymbol(); 710 AM.SymbolFlags = S->getTargetFlags(); 711 } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) { 712 AM.JT = J->getIndex(); 713 AM.SymbolFlags = J->getTargetFlags(); 714 } else if (BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(N0)) { 715 AM.BlockAddr = BA->getBlockAddress(); 716 AM.Disp += BA->getOffset(); 717 AM.SymbolFlags = BA->getTargetFlags(); 718 } else 719 llvm_unreachable("Unhandled symbol reference node."); 720 return false; 721 } 722 723 return true; 724} 725 726/// MatchAddress - Add the specified node to the specified addressing mode, 727/// returning true if it cannot be done. This just pattern matches for the 728/// addressing mode. 729bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM) { 730 if (MatchAddressRecursively(N, AM, 0)) 731 return true; 732 733 // Post-processing: Convert lea(,%reg,2) to lea(%reg,%reg), which has 734 // a smaller encoding and avoids a scaled-index. 735 if (AM.Scale == 2 && 736 AM.BaseType == X86ISelAddressMode::RegBase && 737 AM.Base_Reg.getNode() == 0) { 738 AM.Base_Reg = AM.IndexReg; 739 AM.Scale = 1; 740 } 741 742 // Post-processing: Convert foo to foo(%rip), even in non-PIC mode, 743 // because it has a smaller encoding. 744 // TODO: Which other code models can use this? 745 if (TM.getCodeModel() == CodeModel::Small && 746 Subtarget->is64Bit() && 747 AM.Scale == 1 && 748 AM.BaseType == X86ISelAddressMode::RegBase && 749 AM.Base_Reg.getNode() == 0 && 750 AM.IndexReg.getNode() == 0 && 751 AM.SymbolFlags == X86II::MO_NO_FLAG && 752 AM.hasSymbolicDisplacement()) 753 AM.Base_Reg = CurDAG->getRegister(X86::RIP, MVT::i64); 754 755 return false; 756} 757 758// Insert a node into the DAG at least before the Pos node's position. This 759// will reposition the node as needed, and will assign it a node ID that is <= 760// the Pos node's ID. Note that this does *not* preserve the uniqueness of node 761// IDs! The selection DAG must no longer depend on their uniqueness when this 762// is used. 763static void InsertDAGNode(SelectionDAG &DAG, SDValue Pos, SDValue N) { 764 if (N.getNode()->getNodeId() == -1 || 765 N.getNode()->getNodeId() > Pos.getNode()->getNodeId()) { 766 DAG.RepositionNode(Pos.getNode(), N.getNode()); 767 N.getNode()->setNodeId(Pos.getNode()->getNodeId()); 768 } 769} 770 771// Transform "(X >> (8-C1)) & C2" to "(X >> 8) & 0xff)" if safe. This 772// allows us to convert the shift and and into an h-register extract and 773// a scaled index. Returns false if the simplification is performed. 774static bool FoldMaskAndShiftToExtract(SelectionDAG &DAG, SDValue N, 775 uint64_t Mask, 776 SDValue Shift, SDValue X, 777 X86ISelAddressMode &AM) { 778 if (Shift.getOpcode() != ISD::SRL || 779 !isa<ConstantSDNode>(Shift.getOperand(1)) || 780 !Shift.hasOneUse()) 781 return true; 782 783 int ScaleLog = 8 - Shift.getConstantOperandVal(1); 784 if (ScaleLog <= 0 || ScaleLog >= 4 || 785 Mask != (0xffu << ScaleLog)) 786 return true; 787 788 EVT VT = N.getValueType(); 789 SDLoc DL(N); 790 SDValue Eight = DAG.getConstant(8, MVT::i8); 791 SDValue NewMask = DAG.getConstant(0xff, VT); 792 SDValue Srl = DAG.getNode(ISD::SRL, DL, VT, X, Eight); 793 SDValue And = DAG.getNode(ISD::AND, DL, VT, Srl, NewMask); 794 SDValue ShlCount = DAG.getConstant(ScaleLog, MVT::i8); 795 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, And, ShlCount); 796 797 // Insert the new nodes into the topological ordering. We must do this in 798 // a valid topological ordering as nothing is going to go back and re-sort 799 // these nodes. We continually insert before 'N' in sequence as this is 800 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no 801 // hierarchy left to express. 802 InsertDAGNode(DAG, N, Eight); 803 InsertDAGNode(DAG, N, Srl); 804 InsertDAGNode(DAG, N, NewMask); 805 InsertDAGNode(DAG, N, And); 806 InsertDAGNode(DAG, N, ShlCount); 807 InsertDAGNode(DAG, N, Shl); 808 DAG.ReplaceAllUsesWith(N, Shl); 809 AM.IndexReg = And; 810 AM.Scale = (1 << ScaleLog); 811 return false; 812} 813 814// Transforms "(X << C1) & C2" to "(X & (C2>>C1)) << C1" if safe and if this 815// allows us to fold the shift into this addressing mode. Returns false if the 816// transform succeeded. 817static bool FoldMaskedShiftToScaledMask(SelectionDAG &DAG, SDValue N, 818 uint64_t Mask, 819 SDValue Shift, SDValue X, 820 X86ISelAddressMode &AM) { 821 if (Shift.getOpcode() != ISD::SHL || 822 !isa<ConstantSDNode>(Shift.getOperand(1))) 823 return true; 824 825 // Not likely to be profitable if either the AND or SHIFT node has more 826 // than one use (unless all uses are for address computation). Besides, 827 // isel mechanism requires their node ids to be reused. 828 if (!N.hasOneUse() || !Shift.hasOneUse()) 829 return true; 830 831 // Verify that the shift amount is something we can fold. 832 unsigned ShiftAmt = Shift.getConstantOperandVal(1); 833 if (ShiftAmt != 1 && ShiftAmt != 2 && ShiftAmt != 3) 834 return true; 835 836 EVT VT = N.getValueType(); 837 SDLoc DL(N); 838 SDValue NewMask = DAG.getConstant(Mask >> ShiftAmt, VT); 839 SDValue NewAnd = DAG.getNode(ISD::AND, DL, VT, X, NewMask); 840 SDValue NewShift = DAG.getNode(ISD::SHL, DL, VT, NewAnd, Shift.getOperand(1)); 841 842 // Insert the new nodes into the topological ordering. We must do this in 843 // a valid topological ordering as nothing is going to go back and re-sort 844 // these nodes. We continually insert before 'N' in sequence as this is 845 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no 846 // hierarchy left to express. 847 InsertDAGNode(DAG, N, NewMask); 848 InsertDAGNode(DAG, N, NewAnd); 849 InsertDAGNode(DAG, N, NewShift); 850 DAG.ReplaceAllUsesWith(N, NewShift); 851 852 AM.Scale = 1 << ShiftAmt; 853 AM.IndexReg = NewAnd; 854 return false; 855} 856 857// Implement some heroics to detect shifts of masked values where the mask can 858// be replaced by extending the shift and undoing that in the addressing mode 859// scale. Patterns such as (shl (srl x, c1), c2) are canonicalized into (and 860// (srl x, SHIFT), MASK) by DAGCombines that don't know the shl can be done in 861// the addressing mode. This results in code such as: 862// 863// int f(short *y, int *lookup_table) { 864// ... 865// return *y + lookup_table[*y >> 11]; 866// } 867// 868// Turning into: 869// movzwl (%rdi), %eax 870// movl %eax, %ecx 871// shrl $11, %ecx 872// addl (%rsi,%rcx,4), %eax 873// 874// Instead of: 875// movzwl (%rdi), %eax 876// movl %eax, %ecx 877// shrl $9, %ecx 878// andl $124, %rcx 879// addl (%rsi,%rcx), %eax 880// 881// Note that this function assumes the mask is provided as a mask *after* the 882// value is shifted. The input chain may or may not match that, but computing 883// such a mask is trivial. 884static bool FoldMaskAndShiftToScale(SelectionDAG &DAG, SDValue N, 885 uint64_t Mask, 886 SDValue Shift, SDValue X, 887 X86ISelAddressMode &AM) { 888 if (Shift.getOpcode() != ISD::SRL || !Shift.hasOneUse() || 889 !isa<ConstantSDNode>(Shift.getOperand(1))) 890 return true; 891 892 unsigned ShiftAmt = Shift.getConstantOperandVal(1); 893 unsigned MaskLZ = countLeadingZeros(Mask); 894 unsigned MaskTZ = countTrailingZeros(Mask); 895 896 // The amount of shift we're trying to fit into the addressing mode is taken 897 // from the trailing zeros of the mask. 898 unsigned AMShiftAmt = MaskTZ; 899 900 // There is nothing we can do here unless the mask is removing some bits. 901 // Also, the addressing mode can only represent shifts of 1, 2, or 3 bits. 902 if (AMShiftAmt <= 0 || AMShiftAmt > 3) return true; 903 904 // We also need to ensure that mask is a continuous run of bits. 905 if (CountTrailingOnes_64(Mask >> MaskTZ) + MaskTZ + MaskLZ != 64) return true; 906 907 // Scale the leading zero count down based on the actual size of the value. 908 // Also scale it down based on the size of the shift. 909 MaskLZ -= (64 - X.getValueSizeInBits()) + ShiftAmt; 910 911 // The final check is to ensure that any masked out high bits of X are 912 // already known to be zero. Otherwise, the mask has a semantic impact 913 // other than masking out a couple of low bits. Unfortunately, because of 914 // the mask, zero extensions will be removed from operands in some cases. 915 // This code works extra hard to look through extensions because we can 916 // replace them with zero extensions cheaply if necessary. 917 bool ReplacingAnyExtend = false; 918 if (X.getOpcode() == ISD::ANY_EXTEND) { 919 unsigned ExtendBits = 920 X.getValueSizeInBits() - X.getOperand(0).getValueSizeInBits(); 921 // Assume that we'll replace the any-extend with a zero-extend, and 922 // narrow the search to the extended value. 923 X = X.getOperand(0); 924 MaskLZ = ExtendBits > MaskLZ ? 0 : MaskLZ - ExtendBits; 925 ReplacingAnyExtend = true; 926 } 927 APInt MaskedHighBits = APInt::getHighBitsSet(X.getValueSizeInBits(), 928 MaskLZ); 929 APInt KnownZero, KnownOne; 930 DAG.ComputeMaskedBits(X, KnownZero, KnownOne); 931 if (MaskedHighBits != KnownZero) return true; 932 933 // We've identified a pattern that can be transformed into a single shift 934 // and an addressing mode. Make it so. 935 EVT VT = N.getValueType(); 936 if (ReplacingAnyExtend) { 937 assert(X.getValueType() != VT); 938 // We looked through an ANY_EXTEND node, insert a ZERO_EXTEND. 939 SDValue NewX = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(X), VT, X); 940 InsertDAGNode(DAG, N, NewX); 941 X = NewX; 942 } 943 SDLoc DL(N); 944 SDValue NewSRLAmt = DAG.getConstant(ShiftAmt + AMShiftAmt, MVT::i8); 945 SDValue NewSRL = DAG.getNode(ISD::SRL, DL, VT, X, NewSRLAmt); 946 SDValue NewSHLAmt = DAG.getConstant(AMShiftAmt, MVT::i8); 947 SDValue NewSHL = DAG.getNode(ISD::SHL, DL, VT, NewSRL, NewSHLAmt); 948 949 // Insert the new nodes into the topological ordering. We must do this in 950 // a valid topological ordering as nothing is going to go back and re-sort 951 // these nodes. We continually insert before 'N' in sequence as this is 952 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no 953 // hierarchy left to express. 954 InsertDAGNode(DAG, N, NewSRLAmt); 955 InsertDAGNode(DAG, N, NewSRL); 956 InsertDAGNode(DAG, N, NewSHLAmt); 957 InsertDAGNode(DAG, N, NewSHL); 958 DAG.ReplaceAllUsesWith(N, NewSHL); 959 960 AM.Scale = 1 << AMShiftAmt; 961 AM.IndexReg = NewSRL; 962 return false; 963} 964 965bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM, 966 unsigned Depth) { 967 SDLoc dl(N); 968 DEBUG({ 969 dbgs() << "MatchAddress: "; 970 AM.dump(); 971 }); 972 // Limit recursion. 973 if (Depth > 5) 974 return MatchAddressBase(N, AM); 975 976 // If this is already a %rip relative address, we can only merge immediates 977 // into it. Instead of handling this in every case, we handle it here. 978 // RIP relative addressing: %rip + 32-bit displacement! 979 if (AM.isRIPRelative()) { 980 // FIXME: JumpTable and ExternalSymbol address currently don't like 981 // displacements. It isn't very important, but this should be fixed for 982 // consistency. 983 if (!AM.ES && AM.JT != -1) return true; 984 985 if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N)) 986 if (!FoldOffsetIntoAddress(Cst->getSExtValue(), AM)) 987 return false; 988 return true; 989 } 990 991 switch (N.getOpcode()) { 992 default: break; 993 case ISD::Constant: { 994 uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue(); 995 if (!FoldOffsetIntoAddress(Val, AM)) 996 return false; 997 break; 998 } 999 1000 case X86ISD::Wrapper: 1001 case X86ISD::WrapperRIP: 1002 if (!MatchWrapper(N, AM)) 1003 return false; 1004 break; 1005 1006 case ISD::LOAD: 1007 if (!MatchLoadInAddress(cast<LoadSDNode>(N), AM)) 1008 return false; 1009 break; 1010 1011 case ISD::FrameIndex: 1012 if (AM.BaseType == X86ISelAddressMode::RegBase && 1013 AM.Base_Reg.getNode() == 0 && 1014 (!Subtarget->is64Bit() || isDispSafeForFrameIndex(AM.Disp))) { 1015 AM.BaseType = X86ISelAddressMode::FrameIndexBase; 1016 AM.Base_FrameIndex = cast<FrameIndexSDNode>(N)->getIndex(); 1017 return false; 1018 } 1019 break; 1020 1021 case ISD::SHL: 1022 if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) 1023 break; 1024 1025 if (ConstantSDNode 1026 *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1))) { 1027 unsigned Val = CN->getZExtValue(); 1028 // Note that we handle x<<1 as (,x,2) rather than (x,x) here so 1029 // that the base operand remains free for further matching. If 1030 // the base doesn't end up getting used, a post-processing step 1031 // in MatchAddress turns (,x,2) into (x,x), which is cheaper. 1032 if (Val == 1 || Val == 2 || Val == 3) { 1033 AM.Scale = 1 << Val; 1034 SDValue ShVal = N.getNode()->getOperand(0); 1035 1036 // Okay, we know that we have a scale by now. However, if the scaled 1037 // value is an add of something and a constant, we can fold the 1038 // constant into the disp field here. 1039 if (CurDAG->isBaseWithConstantOffset(ShVal)) { 1040 AM.IndexReg = ShVal.getNode()->getOperand(0); 1041 ConstantSDNode *AddVal = 1042 cast<ConstantSDNode>(ShVal.getNode()->getOperand(1)); 1043 uint64_t Disp = (uint64_t)AddVal->getSExtValue() << Val; 1044 if (!FoldOffsetIntoAddress(Disp, AM)) 1045 return false; 1046 } 1047 1048 AM.IndexReg = ShVal; 1049 return false; 1050 } 1051 } 1052 break; 1053 1054 case ISD::SRL: { 1055 // Scale must not be used already. 1056 if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) break; 1057 1058 SDValue And = N.getOperand(0); 1059 if (And.getOpcode() != ISD::AND) break; 1060 SDValue X = And.getOperand(0); 1061 1062 // We only handle up to 64-bit values here as those are what matter for 1063 // addressing mode optimizations. 1064 if (X.getValueSizeInBits() > 64) break; 1065 1066 // The mask used for the transform is expected to be post-shift, but we 1067 // found the shift first so just apply the shift to the mask before passing 1068 // it down. 1069 if (!isa<ConstantSDNode>(N.getOperand(1)) || 1070 !isa<ConstantSDNode>(And.getOperand(1))) 1071 break; 1072 uint64_t Mask = And.getConstantOperandVal(1) >> N.getConstantOperandVal(1); 1073 1074 // Try to fold the mask and shift into the scale, and return false if we 1075 // succeed. 1076 if (!FoldMaskAndShiftToScale(*CurDAG, N, Mask, N, X, AM)) 1077 return false; 1078 break; 1079 } 1080 1081 case ISD::SMUL_LOHI: 1082 case ISD::UMUL_LOHI: 1083 // A mul_lohi where we need the low part can be folded as a plain multiply. 1084 if (N.getResNo() != 0) break; 1085 // FALL THROUGH 1086 case ISD::MUL: 1087 case X86ISD::MUL_IMM: 1088 // X*[3,5,9] -> X+X*[2,4,8] 1089 if (AM.BaseType == X86ISelAddressMode::RegBase && 1090 AM.Base_Reg.getNode() == 0 && 1091 AM.IndexReg.getNode() == 0) { 1092 if (ConstantSDNode 1093 *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1))) 1094 if (CN->getZExtValue() == 3 || CN->getZExtValue() == 5 || 1095 CN->getZExtValue() == 9) { 1096 AM.Scale = unsigned(CN->getZExtValue())-1; 1097 1098 SDValue MulVal = N.getNode()->getOperand(0); 1099 SDValue Reg; 1100 1101 // Okay, we know that we have a scale by now. However, if the scaled 1102 // value is an add of something and a constant, we can fold the 1103 // constant into the disp field here. 1104 if (MulVal.getNode()->getOpcode() == ISD::ADD && MulVal.hasOneUse() && 1105 isa<ConstantSDNode>(MulVal.getNode()->getOperand(1))) { 1106 Reg = MulVal.getNode()->getOperand(0); 1107 ConstantSDNode *AddVal = 1108 cast<ConstantSDNode>(MulVal.getNode()->getOperand(1)); 1109 uint64_t Disp = AddVal->getSExtValue() * CN->getZExtValue(); 1110 if (FoldOffsetIntoAddress(Disp, AM)) 1111 Reg = N.getNode()->getOperand(0); 1112 } else { 1113 Reg = N.getNode()->getOperand(0); 1114 } 1115 1116 AM.IndexReg = AM.Base_Reg = Reg; 1117 return false; 1118 } 1119 } 1120 break; 1121 1122 case ISD::SUB: { 1123 // Given A-B, if A can be completely folded into the address and 1124 // the index field with the index field unused, use -B as the index. 1125 // This is a win if a has multiple parts that can be folded into 1126 // the address. Also, this saves a mov if the base register has 1127 // other uses, since it avoids a two-address sub instruction, however 1128 // it costs an additional mov if the index register has other uses. 1129 1130 // Add an artificial use to this node so that we can keep track of 1131 // it if it gets CSE'd with a different node. 1132 HandleSDNode Handle(N); 1133 1134 // Test if the LHS of the sub can be folded. 1135 X86ISelAddressMode Backup = AM; 1136 if (MatchAddressRecursively(N.getNode()->getOperand(0), AM, Depth+1)) { 1137 AM = Backup; 1138 break; 1139 } 1140 // Test if the index field is free for use. 1141 if (AM.IndexReg.getNode() || AM.isRIPRelative()) { 1142 AM = Backup; 1143 break; 1144 } 1145 1146 int Cost = 0; 1147 SDValue RHS = Handle.getValue().getNode()->getOperand(1); 1148 // If the RHS involves a register with multiple uses, this 1149 // transformation incurs an extra mov, due to the neg instruction 1150 // clobbering its operand. 1151 if (!RHS.getNode()->hasOneUse() || 1152 RHS.getNode()->getOpcode() == ISD::CopyFromReg || 1153 RHS.getNode()->getOpcode() == ISD::TRUNCATE || 1154 RHS.getNode()->getOpcode() == ISD::ANY_EXTEND || 1155 (RHS.getNode()->getOpcode() == ISD::ZERO_EXTEND && 1156 RHS.getNode()->getOperand(0).getValueType() == MVT::i32)) 1157 ++Cost; 1158 // If the base is a register with multiple uses, this 1159 // transformation may save a mov. 1160 if ((AM.BaseType == X86ISelAddressMode::RegBase && 1161 AM.Base_Reg.getNode() && 1162 !AM.Base_Reg.getNode()->hasOneUse()) || 1163 AM.BaseType == X86ISelAddressMode::FrameIndexBase) 1164 --Cost; 1165 // If the folded LHS was interesting, this transformation saves 1166 // address arithmetic. 1167 if ((AM.hasSymbolicDisplacement() && !Backup.hasSymbolicDisplacement()) + 1168 ((AM.Disp != 0) && (Backup.Disp == 0)) + 1169 (AM.Segment.getNode() && !Backup.Segment.getNode()) >= 2) 1170 --Cost; 1171 // If it doesn't look like it may be an overall win, don't do it. 1172 if (Cost >= 0) { 1173 AM = Backup; 1174 break; 1175 } 1176 1177 // Ok, the transformation is legal and appears profitable. Go for it. 1178 SDValue Zero = CurDAG->getConstant(0, N.getValueType()); 1179 SDValue Neg = CurDAG->getNode(ISD::SUB, dl, N.getValueType(), Zero, RHS); 1180 AM.IndexReg = Neg; 1181 AM.Scale = 1; 1182 1183 // Insert the new nodes into the topological ordering. 1184 InsertDAGNode(*CurDAG, N, Zero); 1185 InsertDAGNode(*CurDAG, N, Neg); 1186 return false; 1187 } 1188 1189 case ISD::ADD: { 1190 // Add an artificial use to this node so that we can keep track of 1191 // it if it gets CSE'd with a different node. 1192 HandleSDNode Handle(N); 1193 1194 X86ISelAddressMode Backup = AM; 1195 if (!MatchAddressRecursively(N.getOperand(0), AM, Depth+1) && 1196 !MatchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1)) 1197 return false; 1198 AM = Backup; 1199 1200 // Try again after commuting the operands. 1201 if (!MatchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1)&& 1202 !MatchAddressRecursively(Handle.getValue().getOperand(0), AM, Depth+1)) 1203 return false; 1204 AM = Backup; 1205 1206 // If we couldn't fold both operands into the address at the same time, 1207 // see if we can just put each operand into a register and fold at least 1208 // the add. 1209 if (AM.BaseType == X86ISelAddressMode::RegBase && 1210 !AM.Base_Reg.getNode() && 1211 !AM.IndexReg.getNode()) { 1212 N = Handle.getValue(); 1213 AM.Base_Reg = N.getOperand(0); 1214 AM.IndexReg = N.getOperand(1); 1215 AM.Scale = 1; 1216 return false; 1217 } 1218 N = Handle.getValue(); 1219 break; 1220 } 1221 1222 case ISD::OR: 1223 // Handle "X | C" as "X + C" iff X is known to have C bits clear. 1224 if (CurDAG->isBaseWithConstantOffset(N)) { 1225 X86ISelAddressMode Backup = AM; 1226 ConstantSDNode *CN = cast<ConstantSDNode>(N.getOperand(1)); 1227 1228 // Start with the LHS as an addr mode. 1229 if (!MatchAddressRecursively(N.getOperand(0), AM, Depth+1) && 1230 !FoldOffsetIntoAddress(CN->getSExtValue(), AM)) 1231 return false; 1232 AM = Backup; 1233 } 1234 break; 1235 1236 case ISD::AND: { 1237 // Perform some heroic transforms on an and of a constant-count shift 1238 // with a constant to enable use of the scaled offset field. 1239 1240 // Scale must not be used already. 1241 if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) break; 1242 1243 SDValue Shift = N.getOperand(0); 1244 if (Shift.getOpcode() != ISD::SRL && Shift.getOpcode() != ISD::SHL) break; 1245 SDValue X = Shift.getOperand(0); 1246 1247 // We only handle up to 64-bit values here as those are what matter for 1248 // addressing mode optimizations. 1249 if (X.getValueSizeInBits() > 64) break; 1250 1251 if (!isa<ConstantSDNode>(N.getOperand(1))) 1252 break; 1253 uint64_t Mask = N.getConstantOperandVal(1); 1254 1255 // Try to fold the mask and shift into an extract and scale. 1256 if (!FoldMaskAndShiftToExtract(*CurDAG, N, Mask, Shift, X, AM)) 1257 return false; 1258 1259 // Try to fold the mask and shift directly into the scale. 1260 if (!FoldMaskAndShiftToScale(*CurDAG, N, Mask, Shift, X, AM)) 1261 return false; 1262 1263 // Try to swap the mask and shift to place shifts which can be done as 1264 // a scale on the outside of the mask. 1265 if (!FoldMaskedShiftToScaledMask(*CurDAG, N, Mask, Shift, X, AM)) 1266 return false; 1267 break; 1268 } 1269 } 1270 1271 return MatchAddressBase(N, AM); 1272} 1273 1274/// MatchAddressBase - Helper for MatchAddress. Add the specified node to the 1275/// specified addressing mode without any further recursion. 1276bool X86DAGToDAGISel::MatchAddressBase(SDValue N, X86ISelAddressMode &AM) { 1277 // Is the base register already occupied? 1278 if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base_Reg.getNode()) { 1279 // If so, check to see if the scale index register is set. 1280 if (AM.IndexReg.getNode() == 0) { 1281 AM.IndexReg = N; 1282 AM.Scale = 1; 1283 return false; 1284 } 1285 1286 // Otherwise, we cannot select it. 1287 return true; 1288 } 1289 1290 // Default, generate it as a register. 1291 AM.BaseType = X86ISelAddressMode::RegBase; 1292 AM.Base_Reg = N; 1293 return false; 1294} 1295 1296/// SelectAddr - returns true if it is able pattern match an addressing mode. 1297/// It returns the operands which make up the maximal addressing mode it can 1298/// match by reference. 1299/// 1300/// Parent is the parent node of the addr operand that is being matched. It 1301/// is always a load, store, atomic node, or null. It is only null when 1302/// checking memory operands for inline asm nodes. 1303bool X86DAGToDAGISel::SelectAddr(SDNode *Parent, SDValue N, SDValue &Base, 1304 SDValue &Scale, SDValue &Index, 1305 SDValue &Disp, SDValue &Segment) { 1306 X86ISelAddressMode AM; 1307 1308 if (Parent && 1309 // This list of opcodes are all the nodes that have an "addr:$ptr" operand 1310 // that are not a MemSDNode, and thus don't have proper addrspace info. 1311 Parent->getOpcode() != ISD::INTRINSIC_W_CHAIN && // unaligned loads, fixme 1312 Parent->getOpcode() != ISD::INTRINSIC_VOID && // nontemporal stores 1313 Parent->getOpcode() != X86ISD::TLSCALL && // Fixme 1314 Parent->getOpcode() != X86ISD::EH_SJLJ_SETJMP && // setjmp 1315 Parent->getOpcode() != X86ISD::EH_SJLJ_LONGJMP) { // longjmp 1316 unsigned AddrSpace = 1317 cast<MemSDNode>(Parent)->getPointerInfo().getAddrSpace(); 1318 // AddrSpace 256 -> GS, 257 -> FS. 1319 if (AddrSpace == 256) 1320 AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16); 1321 if (AddrSpace == 257) 1322 AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16); 1323 } 1324 1325 if (MatchAddress(N, AM)) 1326 return false; 1327 1328 EVT VT = N.getValueType(); 1329 if (AM.BaseType == X86ISelAddressMode::RegBase) { 1330 if (!AM.Base_Reg.getNode()) 1331 AM.Base_Reg = CurDAG->getRegister(0, VT); 1332 } 1333 1334 if (!AM.IndexReg.getNode()) 1335 AM.IndexReg = CurDAG->getRegister(0, VT); 1336 1337 getAddressOperands(AM, Base, Scale, Index, Disp, Segment); 1338 return true; 1339} 1340 1341/// SelectScalarSSELoad - Match a scalar SSE load. In particular, we want to 1342/// match a load whose top elements are either undef or zeros. The load flavor 1343/// is derived from the type of N, which is either v4f32 or v2f64. 1344/// 1345/// We also return: 1346/// PatternChainNode: this is the matched node that has a chain input and 1347/// output. 1348bool X86DAGToDAGISel::SelectScalarSSELoad(SDNode *Root, 1349 SDValue N, SDValue &Base, 1350 SDValue &Scale, SDValue &Index, 1351 SDValue &Disp, SDValue &Segment, 1352 SDValue &PatternNodeWithChain) { 1353 if (N.getOpcode() == ISD::SCALAR_TO_VECTOR) { 1354 PatternNodeWithChain = N.getOperand(0); 1355 if (ISD::isNON_EXTLoad(PatternNodeWithChain.getNode()) && 1356 PatternNodeWithChain.hasOneUse() && 1357 IsProfitableToFold(N.getOperand(0), N.getNode(), Root) && 1358 IsLegalToFold(N.getOperand(0), N.getNode(), Root, OptLevel)) { 1359 LoadSDNode *LD = cast<LoadSDNode>(PatternNodeWithChain); 1360 if (!SelectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp, Segment)) 1361 return false; 1362 return true; 1363 } 1364 } 1365 1366 // Also handle the case where we explicitly require zeros in the top 1367 // elements. This is a vector shuffle from the zero vector. 1368 if (N.getOpcode() == X86ISD::VZEXT_MOVL && N.getNode()->hasOneUse() && 1369 // Check to see if the top elements are all zeros (or bitcast of zeros). 1370 N.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR && 1371 N.getOperand(0).getNode()->hasOneUse() && 1372 ISD::isNON_EXTLoad(N.getOperand(0).getOperand(0).getNode()) && 1373 N.getOperand(0).getOperand(0).hasOneUse() && 1374 IsProfitableToFold(N.getOperand(0), N.getNode(), Root) && 1375 IsLegalToFold(N.getOperand(0), N.getNode(), Root, OptLevel)) { 1376 // Okay, this is a zero extending load. Fold it. 1377 LoadSDNode *LD = cast<LoadSDNode>(N.getOperand(0).getOperand(0)); 1378 if (!SelectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp, Segment)) 1379 return false; 1380 PatternNodeWithChain = SDValue(LD, 0); 1381 return true; 1382 } 1383 return false; 1384} 1385 1386 1387bool X86DAGToDAGISel::SelectMOV64Imm32(SDValue N, SDValue &Imm) { 1388 if (const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { 1389 uint64_t ImmVal = CN->getZExtValue(); 1390 if ((uint32_t)ImmVal != (uint64_t)ImmVal) 1391 return false; 1392 1393 Imm = CurDAG->getTargetConstant(ImmVal, MVT::i64); 1394 return true; 1395 } 1396 1397 // In static codegen with small code model, we can get the address of a label 1398 // into a register with 'movl'. TableGen has already made sure we're looking 1399 // at a label of some kind. 1400 assert(N->getOpcode() == X86ISD::Wrapper && 1401 "Unexpected node type for MOV32ri64"); 1402 N = N.getOperand(0); 1403 1404 if (N->getOpcode() != ISD::TargetConstantPool && 1405 N->getOpcode() != ISD::TargetJumpTable && 1406 N->getOpcode() != ISD::TargetGlobalAddress && 1407 N->getOpcode() != ISD::TargetExternalSymbol && 1408 N->getOpcode() != ISD::TargetBlockAddress) 1409 return false; 1410 1411 Imm = N; 1412 return TM.getCodeModel() == CodeModel::Small; 1413} 1414 1415bool X86DAGToDAGISel::SelectLEA64_32Addr(SDValue N, SDValue &Base, 1416 SDValue &Scale, SDValue &Index, 1417 SDValue &Disp, SDValue &Segment) { 1418 if (!SelectLEAAddr(N, Base, Scale, Index, Disp, Segment)) 1419 return false; 1420 1421 SDLoc DL(N); 1422 RegisterSDNode *RN = dyn_cast<RegisterSDNode>(Base); 1423 if (RN && RN->getReg() == 0) 1424 Base = CurDAG->getRegister(0, MVT::i64); 1425 else if (Base.getValueType() == MVT::i32) { 1426 // Base could already be %rip, particularly in the x32 ABI. 1427 Base = SDValue(CurDAG->getMachineNode( 1428 TargetOpcode::SUBREG_TO_REG, DL, MVT::i64, 1429 CurDAG->getTargetConstant(0, MVT::i64), 1430 Base, 1431 CurDAG->getTargetConstant(X86::sub_32bit, MVT::i32)), 1432 0); 1433 } 1434 1435 RN = dyn_cast<RegisterSDNode>(Index); 1436 if (RN && RN->getReg() == 0) 1437 Index = CurDAG->getRegister(0, MVT::i64); 1438 else { 1439 assert(Index.getValueType() == MVT::i32 && 1440 "Expect to be extending 32-bit registers for use in LEA"); 1441 Index = SDValue(CurDAG->getMachineNode( 1442 TargetOpcode::SUBREG_TO_REG, DL, MVT::i64, 1443 CurDAG->getTargetConstant(0, MVT::i64), 1444 Index, 1445 CurDAG->getTargetConstant(X86::sub_32bit, MVT::i32)), 1446 0); 1447 } 1448 1449 return true; 1450} 1451 1452/// SelectLEAAddr - it calls SelectAddr and determines if the maximal addressing 1453/// mode it matches can be cost effectively emitted as an LEA instruction. 1454bool X86DAGToDAGISel::SelectLEAAddr(SDValue N, 1455 SDValue &Base, SDValue &Scale, 1456 SDValue &Index, SDValue &Disp, 1457 SDValue &Segment) { 1458 X86ISelAddressMode AM; 1459 1460 // Set AM.Segment to prevent MatchAddress from using one. LEA doesn't support 1461 // segments. 1462 SDValue Copy = AM.Segment; 1463 SDValue T = CurDAG->getRegister(0, MVT::i32); 1464 AM.Segment = T; 1465 if (MatchAddress(N, AM)) 1466 return false; 1467 assert (T == AM.Segment); 1468 AM.Segment = Copy; 1469 1470 EVT VT = N.getValueType(); 1471 unsigned Complexity = 0; 1472 if (AM.BaseType == X86ISelAddressMode::RegBase) 1473 if (AM.Base_Reg.getNode()) 1474 Complexity = 1; 1475 else 1476 AM.Base_Reg = CurDAG->getRegister(0, VT); 1477 else if (AM.BaseType == X86ISelAddressMode::FrameIndexBase) 1478 Complexity = 4; 1479 1480 if (AM.IndexReg.getNode()) 1481 Complexity++; 1482 else 1483 AM.IndexReg = CurDAG->getRegister(0, VT); 1484 1485 // Don't match just leal(,%reg,2). It's cheaper to do addl %reg, %reg, or with 1486 // a simple shift. 1487 if (AM.Scale > 1) 1488 Complexity++; 1489 1490 // FIXME: We are artificially lowering the criteria to turn ADD %reg, $GA 1491 // to a LEA. This is determined with some expermentation but is by no means 1492 // optimal (especially for code size consideration). LEA is nice because of 1493 // its three-address nature. Tweak the cost function again when we can run 1494 // convertToThreeAddress() at register allocation time. 1495 if (AM.hasSymbolicDisplacement()) { 1496 // For X86-64, we should always use lea to materialize RIP relative 1497 // addresses. 1498 if (Subtarget->is64Bit()) 1499 Complexity = 4; 1500 else 1501 Complexity += 2; 1502 } 1503 1504 if (AM.Disp && (AM.Base_Reg.getNode() || AM.IndexReg.getNode())) 1505 Complexity++; 1506 1507 // If it isn't worth using an LEA, reject it. 1508 if (Complexity <= 2) 1509 return false; 1510 1511 getAddressOperands(AM, Base, Scale, Index, Disp, Segment); 1512 return true; 1513} 1514 1515/// SelectTLSADDRAddr - This is only run on TargetGlobalTLSAddress nodes. 1516bool X86DAGToDAGISel::SelectTLSADDRAddr(SDValue N, SDValue &Base, 1517 SDValue &Scale, SDValue &Index, 1518 SDValue &Disp, SDValue &Segment) { 1519 assert(N.getOpcode() == ISD::TargetGlobalTLSAddress); 1520 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N); 1521 1522 X86ISelAddressMode AM; 1523 AM.GV = GA->getGlobal(); 1524 AM.Disp += GA->getOffset(); 1525 AM.Base_Reg = CurDAG->getRegister(0, N.getValueType()); 1526 AM.SymbolFlags = GA->getTargetFlags(); 1527 1528 if (N.getValueType() == MVT::i32) { 1529 AM.Scale = 1; 1530 AM.IndexReg = CurDAG->getRegister(X86::EBX, MVT::i32); 1531 } else { 1532 AM.IndexReg = CurDAG->getRegister(0, MVT::i64); 1533 } 1534 1535 getAddressOperands(AM, Base, Scale, Index, Disp, Segment); 1536 return true; 1537} 1538 1539 1540bool X86DAGToDAGISel::TryFoldLoad(SDNode *P, SDValue N, 1541 SDValue &Base, SDValue &Scale, 1542 SDValue &Index, SDValue &Disp, 1543 SDValue &Segment) { 1544 if (!ISD::isNON_EXTLoad(N.getNode()) || 1545 !IsProfitableToFold(N, P, P) || 1546 !IsLegalToFold(N, P, P, OptLevel)) 1547 return false; 1548 1549 return SelectAddr(N.getNode(), 1550 N.getOperand(1), Base, Scale, Index, Disp, Segment); 1551} 1552 1553/// getGlobalBaseReg - Return an SDNode that returns the value of 1554/// the global base register. Output instructions required to 1555/// initialize the global base register, if necessary. 1556/// 1557SDNode *X86DAGToDAGISel::getGlobalBaseReg() { 1558 unsigned GlobalBaseReg = getInstrInfo()->getGlobalBaseReg(MF); 1559 return CurDAG->getRegister(GlobalBaseReg, TLI.getPointerTy()).getNode(); 1560} 1561 1562SDNode *X86DAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) { 1563 SDValue Chain = Node->getOperand(0); 1564 SDValue In1 = Node->getOperand(1); 1565 SDValue In2L = Node->getOperand(2); 1566 SDValue In2H = Node->getOperand(3); 1567 1568 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; 1569 if (!SelectAddr(Node, In1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) 1570 return NULL; 1571 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); 1572 MemOp[0] = cast<MemSDNode>(Node)->getMemOperand(); 1573 const SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, In2L, In2H, Chain}; 1574 SDNode *ResNode = CurDAG->getMachineNode(Opc, SDLoc(Node), 1575 MVT::i32, MVT::i32, MVT::Other, Ops); 1576 cast<MachineSDNode>(ResNode)->setMemRefs(MemOp, MemOp + 1); 1577 return ResNode; 1578} 1579 1580/// Atomic opcode table 1581/// 1582enum AtomicOpc { 1583 ADD, 1584 SUB, 1585 INC, 1586 DEC, 1587 OR, 1588 AND, 1589 XOR, 1590 AtomicOpcEnd 1591}; 1592 1593enum AtomicSz { 1594 ConstantI8, 1595 I8, 1596 SextConstantI16, 1597 ConstantI16, 1598 I16, 1599 SextConstantI32, 1600 ConstantI32, 1601 I32, 1602 SextConstantI64, 1603 ConstantI64, 1604 I64, 1605 AtomicSzEnd 1606}; 1607 1608static const uint16_t AtomicOpcTbl[AtomicOpcEnd][AtomicSzEnd] = { 1609 { 1610 X86::LOCK_ADD8mi, 1611 X86::LOCK_ADD8mr, 1612 X86::LOCK_ADD16mi8, 1613 X86::LOCK_ADD16mi, 1614 X86::LOCK_ADD16mr, 1615 X86::LOCK_ADD32mi8, 1616 X86::LOCK_ADD32mi, 1617 X86::LOCK_ADD32mr, 1618 X86::LOCK_ADD64mi8, 1619 X86::LOCK_ADD64mi32, 1620 X86::LOCK_ADD64mr, 1621 }, 1622 { 1623 X86::LOCK_SUB8mi, 1624 X86::LOCK_SUB8mr, 1625 X86::LOCK_SUB16mi8, 1626 X86::LOCK_SUB16mi, 1627 X86::LOCK_SUB16mr, 1628 X86::LOCK_SUB32mi8, 1629 X86::LOCK_SUB32mi, 1630 X86::LOCK_SUB32mr, 1631 X86::LOCK_SUB64mi8, 1632 X86::LOCK_SUB64mi32, 1633 X86::LOCK_SUB64mr, 1634 }, 1635 { 1636 0, 1637 X86::LOCK_INC8m, 1638 0, 1639 0, 1640 X86::LOCK_INC16m, 1641 0, 1642 0, 1643 X86::LOCK_INC32m, 1644 0, 1645 0, 1646 X86::LOCK_INC64m, 1647 }, 1648 { 1649 0, 1650 X86::LOCK_DEC8m, 1651 0, 1652 0, 1653 X86::LOCK_DEC16m, 1654 0, 1655 0, 1656 X86::LOCK_DEC32m, 1657 0, 1658 0, 1659 X86::LOCK_DEC64m, 1660 }, 1661 { 1662 X86::LOCK_OR8mi, 1663 X86::LOCK_OR8mr, 1664 X86::LOCK_OR16mi8, 1665 X86::LOCK_OR16mi, 1666 X86::LOCK_OR16mr, 1667 X86::LOCK_OR32mi8, 1668 X86::LOCK_OR32mi, 1669 X86::LOCK_OR32mr, 1670 X86::LOCK_OR64mi8, 1671 X86::LOCK_OR64mi32, 1672 X86::LOCK_OR64mr, 1673 }, 1674 { 1675 X86::LOCK_AND8mi, 1676 X86::LOCK_AND8mr, 1677 X86::LOCK_AND16mi8, 1678 X86::LOCK_AND16mi, 1679 X86::LOCK_AND16mr, 1680 X86::LOCK_AND32mi8, 1681 X86::LOCK_AND32mi, 1682 X86::LOCK_AND32mr, 1683 X86::LOCK_AND64mi8, 1684 X86::LOCK_AND64mi32, 1685 X86::LOCK_AND64mr, 1686 }, 1687 { 1688 X86::LOCK_XOR8mi, 1689 X86::LOCK_XOR8mr, 1690 X86::LOCK_XOR16mi8, 1691 X86::LOCK_XOR16mi, 1692 X86::LOCK_XOR16mr, 1693 X86::LOCK_XOR32mi8, 1694 X86::LOCK_XOR32mi, 1695 X86::LOCK_XOR32mr, 1696 X86::LOCK_XOR64mi8, 1697 X86::LOCK_XOR64mi32, 1698 X86::LOCK_XOR64mr, 1699 } 1700}; 1701 1702// Return the target constant operand for atomic-load-op and do simple 1703// translations, such as from atomic-load-add to lock-sub. The return value is 1704// one of the following 3 cases: 1705// + target-constant, the operand could be supported as a target constant. 1706// + empty, the operand is not needed any more with the new op selected. 1707// + non-empty, otherwise. 1708static SDValue getAtomicLoadArithTargetConstant(SelectionDAG *CurDAG, 1709 SDLoc dl, 1710 enum AtomicOpc &Op, EVT NVT, 1711 SDValue Val) { 1712 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Val)) { 1713 int64_t CNVal = CN->getSExtValue(); 1714 // Quit if not 32-bit imm. 1715 if ((int32_t)CNVal != CNVal) 1716 return Val; 1717 // For atomic-load-add, we could do some optimizations. 1718 if (Op == ADD) { 1719 // Translate to INC/DEC if ADD by 1 or -1. 1720 if ((CNVal == 1) || (CNVal == -1)) { 1721 Op = (CNVal == 1) ? INC : DEC; 1722 // No more constant operand after being translated into INC/DEC. 1723 return SDValue(); 1724 } 1725 // Translate to SUB if ADD by negative value. 1726 if (CNVal < 0) { 1727 Op = SUB; 1728 CNVal = -CNVal; 1729 } 1730 } 1731 return CurDAG->getTargetConstant(CNVal, NVT); 1732 } 1733 1734 // If the value operand is single-used, try to optimize it. 1735 if (Op == ADD && Val.hasOneUse()) { 1736 // Translate (atomic-load-add ptr (sub 0 x)) back to (lock-sub x). 1737 if (Val.getOpcode() == ISD::SUB && X86::isZeroNode(Val.getOperand(0))) { 1738 Op = SUB; 1739 return Val.getOperand(1); 1740 } 1741 // A special case for i16, which needs truncating as, in most cases, it's 1742 // promoted to i32. We will translate 1743 // (atomic-load-add (truncate (sub 0 x))) to (lock-sub (EXTRACT_SUBREG x)) 1744 if (Val.getOpcode() == ISD::TRUNCATE && NVT == MVT::i16 && 1745 Val.getOperand(0).getOpcode() == ISD::SUB && 1746 X86::isZeroNode(Val.getOperand(0).getOperand(0))) { 1747 Op = SUB; 1748 Val = Val.getOperand(0); 1749 return CurDAG->getTargetExtractSubreg(X86::sub_16bit, dl, NVT, 1750 Val.getOperand(1)); 1751 } 1752 } 1753 1754 return Val; 1755} 1756 1757SDNode *X86DAGToDAGISel::SelectAtomicLoadArith(SDNode *Node, EVT NVT) { 1758 if (Node->hasAnyUseOfValue(0)) 1759 return 0; 1760 1761 SDLoc dl(Node); 1762 1763 // Optimize common patterns for __sync_or_and_fetch and similar arith 1764 // operations where the result is not used. This allows us to use the "lock" 1765 // version of the arithmetic instruction. 1766 SDValue Chain = Node->getOperand(0); 1767 SDValue Ptr = Node->getOperand(1); 1768 SDValue Val = Node->getOperand(2); 1769 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; 1770 if (!SelectAddr(Node, Ptr, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) 1771 return 0; 1772 1773 // Which index into the table. 1774 enum AtomicOpc Op; 1775 switch (Node->getOpcode()) { 1776 default: 1777 return 0; 1778 case ISD::ATOMIC_LOAD_OR: 1779 Op = OR; 1780 break; 1781 case ISD::ATOMIC_LOAD_AND: 1782 Op = AND; 1783 break; 1784 case ISD::ATOMIC_LOAD_XOR: 1785 Op = XOR; 1786 break; 1787 case ISD::ATOMIC_LOAD_ADD: 1788 Op = ADD; 1789 break; 1790 } 1791 1792 Val = getAtomicLoadArithTargetConstant(CurDAG, dl, Op, NVT, Val); 1793 bool isUnOp = !Val.getNode(); 1794 bool isCN = Val.getNode() && (Val.getOpcode() == ISD::TargetConstant); 1795 1796 unsigned Opc = 0; 1797 switch (NVT.getSimpleVT().SimpleTy) { 1798 default: return 0; 1799 case MVT::i8: 1800 if (isCN) 1801 Opc = AtomicOpcTbl[Op][ConstantI8]; 1802 else 1803 Opc = AtomicOpcTbl[Op][I8]; 1804 break; 1805 case MVT::i16: 1806 if (isCN) { 1807 if (immSext8(Val.getNode())) 1808 Opc = AtomicOpcTbl[Op][SextConstantI16]; 1809 else 1810 Opc = AtomicOpcTbl[Op][ConstantI16]; 1811 } else 1812 Opc = AtomicOpcTbl[Op][I16]; 1813 break; 1814 case MVT::i32: 1815 if (isCN) { 1816 if (immSext8(Val.getNode())) 1817 Opc = AtomicOpcTbl[Op][SextConstantI32]; 1818 else 1819 Opc = AtomicOpcTbl[Op][ConstantI32]; 1820 } else 1821 Opc = AtomicOpcTbl[Op][I32]; 1822 break; 1823 case MVT::i64: 1824 Opc = AtomicOpcTbl[Op][I64]; 1825 if (isCN) { 1826 if (immSext8(Val.getNode())) 1827 Opc = AtomicOpcTbl[Op][SextConstantI64]; 1828 else if (i64immSExt32(Val.getNode())) 1829 Opc = AtomicOpcTbl[Op][ConstantI64]; 1830 } 1831 break; 1832 } 1833 1834 assert(Opc != 0 && "Invalid arith lock transform!"); 1835 1836 SDValue Ret; 1837 SDValue Undef = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, 1838 dl, NVT), 0); 1839 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); 1840 MemOp[0] = cast<MemSDNode>(Node)->getMemOperand(); 1841 if (isUnOp) { 1842 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Chain }; 1843 Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops), 0); 1844 } else { 1845 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Val, Chain }; 1846 Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops), 0); 1847 } 1848 cast<MachineSDNode>(Ret)->setMemRefs(MemOp, MemOp + 1); 1849 SDValue RetVals[] = { Undef, Ret }; 1850 return CurDAG->getMergeValues(RetVals, 2, dl).getNode(); 1851} 1852 1853/// HasNoSignedComparisonUses - Test whether the given X86ISD::CMP node has 1854/// any uses which require the SF or OF bits to be accurate. 1855static bool HasNoSignedComparisonUses(SDNode *N) { 1856 // Examine each user of the node. 1857 for (SDNode::use_iterator UI = N->use_begin(), 1858 UE = N->use_end(); UI != UE; ++UI) { 1859 // Only examine CopyToReg uses. 1860 if (UI->getOpcode() != ISD::CopyToReg) 1861 return false; 1862 // Only examine CopyToReg uses that copy to EFLAGS. 1863 if (cast<RegisterSDNode>(UI->getOperand(1))->getReg() != 1864 X86::EFLAGS) 1865 return false; 1866 // Examine each user of the CopyToReg use. 1867 for (SDNode::use_iterator FlagUI = UI->use_begin(), 1868 FlagUE = UI->use_end(); FlagUI != FlagUE; ++FlagUI) { 1869 // Only examine the Flag result. 1870 if (FlagUI.getUse().getResNo() != 1) continue; 1871 // Anything unusual: assume conservatively. 1872 if (!FlagUI->isMachineOpcode()) return false; 1873 // Examine the opcode of the user. 1874 switch (FlagUI->getMachineOpcode()) { 1875 // These comparisons don't treat the most significant bit specially. 1876 case X86::SETAr: case X86::SETAEr: case X86::SETBr: case X86::SETBEr: 1877 case X86::SETEr: case X86::SETNEr: case X86::SETPr: case X86::SETNPr: 1878 case X86::SETAm: case X86::SETAEm: case X86::SETBm: case X86::SETBEm: 1879 case X86::SETEm: case X86::SETNEm: case X86::SETPm: case X86::SETNPm: 1880 case X86::JA_4: case X86::JAE_4: case X86::JB_4: case X86::JBE_4: 1881 case X86::JE_4: case X86::JNE_4: case X86::JP_4: case X86::JNP_4: 1882 case X86::CMOVA16rr: case X86::CMOVA16rm: 1883 case X86::CMOVA32rr: case X86::CMOVA32rm: 1884 case X86::CMOVA64rr: case X86::CMOVA64rm: 1885 case X86::CMOVAE16rr: case X86::CMOVAE16rm: 1886 case X86::CMOVAE32rr: case X86::CMOVAE32rm: 1887 case X86::CMOVAE64rr: case X86::CMOVAE64rm: 1888 case X86::CMOVB16rr: case X86::CMOVB16rm: 1889 case X86::CMOVB32rr: case X86::CMOVB32rm: 1890 case X86::CMOVB64rr: case X86::CMOVB64rm: 1891 case X86::CMOVBE16rr: case X86::CMOVBE16rm: 1892 case X86::CMOVBE32rr: case X86::CMOVBE32rm: 1893 case X86::CMOVBE64rr: case X86::CMOVBE64rm: 1894 case X86::CMOVE16rr: case X86::CMOVE16rm: 1895 case X86::CMOVE32rr: case X86::CMOVE32rm: 1896 case X86::CMOVE64rr: case X86::CMOVE64rm: 1897 case X86::CMOVNE16rr: case X86::CMOVNE16rm: 1898 case X86::CMOVNE32rr: case X86::CMOVNE32rm: 1899 case X86::CMOVNE64rr: case X86::CMOVNE64rm: 1900 case X86::CMOVNP16rr: case X86::CMOVNP16rm: 1901 case X86::CMOVNP32rr: case X86::CMOVNP32rm: 1902 case X86::CMOVNP64rr: case X86::CMOVNP64rm: 1903 case X86::CMOVP16rr: case X86::CMOVP16rm: 1904 case X86::CMOVP32rr: case X86::CMOVP32rm: 1905 case X86::CMOVP64rr: case X86::CMOVP64rm: 1906 continue; 1907 // Anything else: assume conservatively. 1908 default: return false; 1909 } 1910 } 1911 } 1912 return true; 1913} 1914 1915/// isLoadIncOrDecStore - Check whether or not the chain ending in StoreNode 1916/// is suitable for doing the {load; increment or decrement; store} to modify 1917/// transformation. 1918static bool isLoadIncOrDecStore(StoreSDNode *StoreNode, unsigned Opc, 1919 SDValue StoredVal, SelectionDAG *CurDAG, 1920 LoadSDNode* &LoadNode, SDValue &InputChain) { 1921 1922 // is the value stored the result of a DEC or INC? 1923 if (!(Opc == X86ISD::DEC || Opc == X86ISD::INC)) return false; 1924 1925 // is the stored value result 0 of the load? 1926 if (StoredVal.getResNo() != 0) return false; 1927 1928 // are there other uses of the loaded value than the inc or dec? 1929 if (!StoredVal.getNode()->hasNUsesOfValue(1, 0)) return false; 1930 1931 // is the store non-extending and non-indexed? 1932 if (!ISD::isNormalStore(StoreNode) || StoreNode->isNonTemporal()) 1933 return false; 1934 1935 SDValue Load = StoredVal->getOperand(0); 1936 // Is the stored value a non-extending and non-indexed load? 1937 if (!ISD::isNormalLoad(Load.getNode())) return false; 1938 1939 // Return LoadNode by reference. 1940 LoadNode = cast<LoadSDNode>(Load); 1941 // is the size of the value one that we can handle? (i.e. 64, 32, 16, or 8) 1942 EVT LdVT = LoadNode->getMemoryVT(); 1943 if (LdVT != MVT::i64 && LdVT != MVT::i32 && LdVT != MVT::i16 && 1944 LdVT != MVT::i8) 1945 return false; 1946 1947 // Is store the only read of the loaded value? 1948 if (!Load.hasOneUse()) 1949 return false; 1950 1951 // Is the address of the store the same as the load? 1952 if (LoadNode->getBasePtr() != StoreNode->getBasePtr() || 1953 LoadNode->getOffset() != StoreNode->getOffset()) 1954 return false; 1955 1956 // Check if the chain is produced by the load or is a TokenFactor with 1957 // the load output chain as an operand. Return InputChain by reference. 1958 SDValue Chain = StoreNode->getChain(); 1959 1960 bool ChainCheck = false; 1961 if (Chain == Load.getValue(1)) { 1962 ChainCheck = true; 1963 InputChain = LoadNode->getChain(); 1964 } else if (Chain.getOpcode() == ISD::TokenFactor) { 1965 SmallVector<SDValue, 4> ChainOps; 1966 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i) { 1967 SDValue Op = Chain.getOperand(i); 1968 if (Op == Load.getValue(1)) { 1969 ChainCheck = true; 1970 continue; 1971 } 1972 1973 // Make sure using Op as part of the chain would not cause a cycle here. 1974 // In theory, we could check whether the chain node is a predecessor of 1975 // the load. But that can be very expensive. Instead visit the uses and 1976 // make sure they all have smaller node id than the load. 1977 int LoadId = LoadNode->getNodeId(); 1978 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 1979 UE = UI->use_end(); UI != UE; ++UI) { 1980 if (UI.getUse().getResNo() != 0) 1981 continue; 1982 if (UI->getNodeId() > LoadId) 1983 return false; 1984 } 1985 1986 ChainOps.push_back(Op); 1987 } 1988 1989 if (ChainCheck) 1990 // Make a new TokenFactor with all the other input chains except 1991 // for the load. 1992 InputChain = CurDAG->getNode(ISD::TokenFactor, SDLoc(Chain), 1993 MVT::Other, &ChainOps[0], ChainOps.size()); 1994 } 1995 if (!ChainCheck) 1996 return false; 1997 1998 return true; 1999} 2000 2001/// getFusedLdStOpcode - Get the appropriate X86 opcode for an in memory 2002/// increment or decrement. Opc should be X86ISD::DEC or X86ISD::INC. 2003static unsigned getFusedLdStOpcode(EVT &LdVT, unsigned Opc) { 2004 if (Opc == X86ISD::DEC) { 2005 if (LdVT == MVT::i64) return X86::DEC64m; 2006 if (LdVT == MVT::i32) return X86::DEC32m; 2007 if (LdVT == MVT::i16) return X86::DEC16m; 2008 if (LdVT == MVT::i8) return X86::DEC8m; 2009 } else { 2010 assert(Opc == X86ISD::INC && "unrecognized opcode"); 2011 if (LdVT == MVT::i64) return X86::INC64m; 2012 if (LdVT == MVT::i32) return X86::INC32m; 2013 if (LdVT == MVT::i16) return X86::INC16m; 2014 if (LdVT == MVT::i8) return X86::INC8m; 2015 } 2016 llvm_unreachable("unrecognized size for LdVT"); 2017} 2018 2019/// SelectGather - Customized ISel for GATHER operations. 2020/// 2021SDNode *X86DAGToDAGISel::SelectGather(SDNode *Node, unsigned Opc) { 2022 // Operands of Gather: VSrc, Base, VIdx, VMask, Scale 2023 SDValue Chain = Node->getOperand(0); 2024 SDValue VSrc = Node->getOperand(2); 2025 SDValue Base = Node->getOperand(3); 2026 SDValue VIdx = Node->getOperand(4); 2027 SDValue VMask = Node->getOperand(5); 2028 ConstantSDNode *Scale = dyn_cast<ConstantSDNode>(Node->getOperand(6)); 2029 if (!Scale) 2030 return 0; 2031 2032 SDVTList VTs = CurDAG->getVTList(VSrc.getValueType(), VSrc.getValueType(), 2033 MVT::Other); 2034 2035 // Memory Operands: Base, Scale, Index, Disp, Segment 2036 SDValue Disp = CurDAG->getTargetConstant(0, MVT::i32); 2037 SDValue Segment = CurDAG->getRegister(0, MVT::i32); 2038 const SDValue Ops[] = { VSrc, Base, getI8Imm(Scale->getSExtValue()), VIdx, 2039 Disp, Segment, VMask, Chain}; 2040 SDNode *ResNode = CurDAG->getMachineNode(Opc, SDLoc(Node), VTs, Ops); 2041 // Node has 2 outputs: VDst and MVT::Other. 2042 // ResNode has 3 outputs: VDst, VMask_wb, and MVT::Other. 2043 // We replace VDst of Node with VDst of ResNode, and Other of Node with Other 2044 // of ResNode. 2045 ReplaceUses(SDValue(Node, 0), SDValue(ResNode, 0)); 2046 ReplaceUses(SDValue(Node, 1), SDValue(ResNode, 2)); 2047 return ResNode; 2048} 2049 2050SDNode *X86DAGToDAGISel::Select(SDNode *Node) { 2051 EVT NVT = Node->getValueType(0); 2052 unsigned Opc, MOpc; 2053 unsigned Opcode = Node->getOpcode(); 2054 SDLoc dl(Node); 2055 2056 DEBUG(dbgs() << "Selecting: "; Node->dump(CurDAG); dbgs() << '\n'); 2057 2058 if (Node->isMachineOpcode()) { 2059 DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << '\n'); 2060 return NULL; // Already selected. 2061 } 2062 2063 switch (Opcode) { 2064 default: break; 2065 case ISD::INTRINSIC_W_CHAIN: { 2066 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue(); 2067 switch (IntNo) { 2068 default: break; 2069 case Intrinsic::x86_avx2_gather_d_pd: 2070 case Intrinsic::x86_avx2_gather_d_pd_256: 2071 case Intrinsic::x86_avx2_gather_q_pd: 2072 case Intrinsic::x86_avx2_gather_q_pd_256: 2073 case Intrinsic::x86_avx2_gather_d_ps: 2074 case Intrinsic::x86_avx2_gather_d_ps_256: 2075 case Intrinsic::x86_avx2_gather_q_ps: 2076 case Intrinsic::x86_avx2_gather_q_ps_256: 2077 case Intrinsic::x86_avx2_gather_d_q: 2078 case Intrinsic::x86_avx2_gather_d_q_256: 2079 case Intrinsic::x86_avx2_gather_q_q: 2080 case Intrinsic::x86_avx2_gather_q_q_256: 2081 case Intrinsic::x86_avx2_gather_d_d: 2082 case Intrinsic::x86_avx2_gather_d_d_256: 2083 case Intrinsic::x86_avx2_gather_q_d: 2084 case Intrinsic::x86_avx2_gather_q_d_256: { 2085 unsigned Opc; 2086 switch (IntNo) { 2087 default: llvm_unreachable("Impossible intrinsic"); 2088 case Intrinsic::x86_avx2_gather_d_pd: Opc = X86::VGATHERDPDrm; break; 2089 case Intrinsic::x86_avx2_gather_d_pd_256: Opc = X86::VGATHERDPDYrm; break; 2090 case Intrinsic::x86_avx2_gather_q_pd: Opc = X86::VGATHERQPDrm; break; 2091 case Intrinsic::x86_avx2_gather_q_pd_256: Opc = X86::VGATHERQPDYrm; break; 2092 case Intrinsic::x86_avx2_gather_d_ps: Opc = X86::VGATHERDPSrm; break; 2093 case Intrinsic::x86_avx2_gather_d_ps_256: Opc = X86::VGATHERDPSYrm; break; 2094 case Intrinsic::x86_avx2_gather_q_ps: Opc = X86::VGATHERQPSrm; break; 2095 case Intrinsic::x86_avx2_gather_q_ps_256: Opc = X86::VGATHERQPSYrm; break; 2096 case Intrinsic::x86_avx2_gather_d_q: Opc = X86::VPGATHERDQrm; break; 2097 case Intrinsic::x86_avx2_gather_d_q_256: Opc = X86::VPGATHERDQYrm; break; 2098 case Intrinsic::x86_avx2_gather_q_q: Opc = X86::VPGATHERQQrm; break; 2099 case Intrinsic::x86_avx2_gather_q_q_256: Opc = X86::VPGATHERQQYrm; break; 2100 case Intrinsic::x86_avx2_gather_d_d: Opc = X86::VPGATHERDDrm; break; 2101 case Intrinsic::x86_avx2_gather_d_d_256: Opc = X86::VPGATHERDDYrm; break; 2102 case Intrinsic::x86_avx2_gather_q_d: Opc = X86::VPGATHERQDrm; break; 2103 case Intrinsic::x86_avx2_gather_q_d_256: Opc = X86::VPGATHERQDYrm; break; 2104 } 2105 SDNode *RetVal = SelectGather(Node, Opc); 2106 if (RetVal) 2107 // We already called ReplaceUses inside SelectGather. 2108 return NULL; 2109 break; 2110 } 2111 } 2112 break; 2113 } 2114 case X86ISD::GlobalBaseReg: 2115 return getGlobalBaseReg(); 2116 2117 2118 case X86ISD::ATOMOR64_DAG: 2119 case X86ISD::ATOMXOR64_DAG: 2120 case X86ISD::ATOMADD64_DAG: 2121 case X86ISD::ATOMSUB64_DAG: 2122 case X86ISD::ATOMNAND64_DAG: 2123 case X86ISD::ATOMAND64_DAG: 2124 case X86ISD::ATOMMAX64_DAG: 2125 case X86ISD::ATOMMIN64_DAG: 2126 case X86ISD::ATOMUMAX64_DAG: 2127 case X86ISD::ATOMUMIN64_DAG: 2128 case X86ISD::ATOMSWAP64_DAG: { 2129 unsigned Opc; 2130 switch (Opcode) { 2131 default: llvm_unreachable("Impossible opcode"); 2132 case X86ISD::ATOMOR64_DAG: Opc = X86::ATOMOR6432; break; 2133 case X86ISD::ATOMXOR64_DAG: Opc = X86::ATOMXOR6432; break; 2134 case X86ISD::ATOMADD64_DAG: Opc = X86::ATOMADD6432; break; 2135 case X86ISD::ATOMSUB64_DAG: Opc = X86::ATOMSUB6432; break; 2136 case X86ISD::ATOMNAND64_DAG: Opc = X86::ATOMNAND6432; break; 2137 case X86ISD::ATOMAND64_DAG: Opc = X86::ATOMAND6432; break; 2138 case X86ISD::ATOMMAX64_DAG: Opc = X86::ATOMMAX6432; break; 2139 case X86ISD::ATOMMIN64_DAG: Opc = X86::ATOMMIN6432; break; 2140 case X86ISD::ATOMUMAX64_DAG: Opc = X86::ATOMUMAX6432; break; 2141 case X86ISD::ATOMUMIN64_DAG: Opc = X86::ATOMUMIN6432; break; 2142 case X86ISD::ATOMSWAP64_DAG: Opc = X86::ATOMSWAP6432; break; 2143 } 2144 SDNode *RetVal = SelectAtomic64(Node, Opc); 2145 if (RetVal) 2146 return RetVal; 2147 break; 2148 } 2149 2150 case ISD::ATOMIC_LOAD_XOR: 2151 case ISD::ATOMIC_LOAD_AND: 2152 case ISD::ATOMIC_LOAD_OR: 2153 case ISD::ATOMIC_LOAD_ADD: { 2154 SDNode *RetVal = SelectAtomicLoadArith(Node, NVT); 2155 if (RetVal) 2156 return RetVal; 2157 break; 2158 } 2159 case ISD::AND: 2160 case ISD::OR: 2161 case ISD::XOR: { 2162 // For operations of the form (x << C1) op C2, check if we can use a smaller 2163 // encoding for C2 by transforming it into (x op (C2>>C1)) << C1. 2164 SDValue N0 = Node->getOperand(0); 2165 SDValue N1 = Node->getOperand(1); 2166 2167 if (N0->getOpcode() != ISD::SHL || !N0->hasOneUse()) 2168 break; 2169 2170 // i8 is unshrinkable, i16 should be promoted to i32. 2171 if (NVT != MVT::i32 && NVT != MVT::i64) 2172 break; 2173 2174 ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N1); 2175 ConstantSDNode *ShlCst = dyn_cast<ConstantSDNode>(N0->getOperand(1)); 2176 if (!Cst || !ShlCst) 2177 break; 2178 2179 int64_t Val = Cst->getSExtValue(); 2180 uint64_t ShlVal = ShlCst->getZExtValue(); 2181 2182 // Make sure that we don't change the operation by removing bits. 2183 // This only matters for OR and XOR, AND is unaffected. 2184 uint64_t RemovedBitsMask = (1ULL << ShlVal) - 1; 2185 if (Opcode != ISD::AND && (Val & RemovedBitsMask) != 0) 2186 break; 2187 2188 unsigned ShlOp, Op; 2189 EVT CstVT = NVT; 2190 2191 // Check the minimum bitwidth for the new constant. 2192 // TODO: AND32ri is the same as AND64ri32 with zext imm. 2193 // TODO: MOV32ri+OR64r is cheaper than MOV64ri64+OR64rr 2194 // TODO: Using 16 and 8 bit operations is also possible for or32 & xor32. 2195 if (!isInt<8>(Val) && isInt<8>(Val >> ShlVal)) 2196 CstVT = MVT::i8; 2197 else if (!isInt<32>(Val) && isInt<32>(Val >> ShlVal)) 2198 CstVT = MVT::i32; 2199 2200 // Bail if there is no smaller encoding. 2201 if (NVT == CstVT) 2202 break; 2203 2204 switch (NVT.getSimpleVT().SimpleTy) { 2205 default: llvm_unreachable("Unsupported VT!"); 2206 case MVT::i32: 2207 assert(CstVT == MVT::i8); 2208 ShlOp = X86::SHL32ri; 2209 2210 switch (Opcode) { 2211 default: llvm_unreachable("Impossible opcode"); 2212 case ISD::AND: Op = X86::AND32ri8; break; 2213 case ISD::OR: Op = X86::OR32ri8; break; 2214 case ISD::XOR: Op = X86::XOR32ri8; break; 2215 } 2216 break; 2217 case MVT::i64: 2218 assert(CstVT == MVT::i8 || CstVT == MVT::i32); 2219 ShlOp = X86::SHL64ri; 2220 2221 switch (Opcode) { 2222 default: llvm_unreachable("Impossible opcode"); 2223 case ISD::AND: Op = CstVT==MVT::i8? X86::AND64ri8 : X86::AND64ri32; break; 2224 case ISD::OR: Op = CstVT==MVT::i8? X86::OR64ri8 : X86::OR64ri32; break; 2225 case ISD::XOR: Op = CstVT==MVT::i8? X86::XOR64ri8 : X86::XOR64ri32; break; 2226 } 2227 break; 2228 } 2229 2230 // Emit the smaller op and the shift. 2231 SDValue NewCst = CurDAG->getTargetConstant(Val >> ShlVal, CstVT); 2232 SDNode *New = CurDAG->getMachineNode(Op, dl, NVT, N0->getOperand(0),NewCst); 2233 return CurDAG->SelectNodeTo(Node, ShlOp, NVT, SDValue(New, 0), 2234 getI8Imm(ShlVal)); 2235 } 2236 case X86ISD::UMUL: { 2237 SDValue N0 = Node->getOperand(0); 2238 SDValue N1 = Node->getOperand(1); 2239 2240 unsigned LoReg; 2241 switch (NVT.getSimpleVT().SimpleTy) { 2242 default: llvm_unreachable("Unsupported VT!"); 2243 case MVT::i8: LoReg = X86::AL; Opc = X86::MUL8r; break; 2244 case MVT::i16: LoReg = X86::AX; Opc = X86::MUL16r; break; 2245 case MVT::i32: LoReg = X86::EAX; Opc = X86::MUL32r; break; 2246 case MVT::i64: LoReg = X86::RAX; Opc = X86::MUL64r; break; 2247 } 2248 2249 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg, 2250 N0, SDValue()).getValue(1); 2251 2252 SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::i32); 2253 SDValue Ops[] = {N1, InFlag}; 2254 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops); 2255 2256 ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0)); 2257 ReplaceUses(SDValue(Node, 1), SDValue(CNode, 1)); 2258 ReplaceUses(SDValue(Node, 2), SDValue(CNode, 2)); 2259 return NULL; 2260 } 2261 2262 case ISD::SMUL_LOHI: 2263 case ISD::UMUL_LOHI: { 2264 SDValue N0 = Node->getOperand(0); 2265 SDValue N1 = Node->getOperand(1); 2266 2267 bool isSigned = Opcode == ISD::SMUL_LOHI; 2268 bool hasBMI2 = Subtarget->hasBMI2(); 2269 if (!isSigned) { 2270 switch (NVT.getSimpleVT().SimpleTy) { 2271 default: llvm_unreachable("Unsupported VT!"); 2272 case MVT::i8: Opc = X86::MUL8r; MOpc = X86::MUL8m; break; 2273 case MVT::i16: Opc = X86::MUL16r; MOpc = X86::MUL16m; break; 2274 case MVT::i32: Opc = hasBMI2 ? X86::MULX32rr : X86::MUL32r; 2275 MOpc = hasBMI2 ? X86::MULX32rm : X86::MUL32m; break; 2276 case MVT::i64: Opc = hasBMI2 ? X86::MULX64rr : X86::MUL64r; 2277 MOpc = hasBMI2 ? X86::MULX64rm : X86::MUL64m; break; 2278 } 2279 } else { 2280 switch (NVT.getSimpleVT().SimpleTy) { 2281 default: llvm_unreachable("Unsupported VT!"); 2282 case MVT::i8: Opc = X86::IMUL8r; MOpc = X86::IMUL8m; break; 2283 case MVT::i16: Opc = X86::IMUL16r; MOpc = X86::IMUL16m; break; 2284 case MVT::i32: Opc = X86::IMUL32r; MOpc = X86::IMUL32m; break; 2285 case MVT::i64: Opc = X86::IMUL64r; MOpc = X86::IMUL64m; break; 2286 } 2287 } 2288 2289 unsigned SrcReg, LoReg, HiReg; 2290 switch (Opc) { 2291 default: llvm_unreachable("Unknown MUL opcode!"); 2292 case X86::IMUL8r: 2293 case X86::MUL8r: 2294 SrcReg = LoReg = X86::AL; HiReg = X86::AH; 2295 break; 2296 case X86::IMUL16r: 2297 case X86::MUL16r: 2298 SrcReg = LoReg = X86::AX; HiReg = X86::DX; 2299 break; 2300 case X86::IMUL32r: 2301 case X86::MUL32r: 2302 SrcReg = LoReg = X86::EAX; HiReg = X86::EDX; 2303 break; 2304 case X86::IMUL64r: 2305 case X86::MUL64r: 2306 SrcReg = LoReg = X86::RAX; HiReg = X86::RDX; 2307 break; 2308 case X86::MULX32rr: 2309 SrcReg = X86::EDX; LoReg = HiReg = 0; 2310 break; 2311 case X86::MULX64rr: 2312 SrcReg = X86::RDX; LoReg = HiReg = 0; 2313 break; 2314 } 2315 2316 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; 2317 bool foldedLoad = TryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4); 2318 // Multiply is commmutative. 2319 if (!foldedLoad) { 2320 foldedLoad = TryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4); 2321 if (foldedLoad) 2322 std::swap(N0, N1); 2323 } 2324 2325 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, SrcReg, 2326 N0, SDValue()).getValue(1); 2327 SDValue ResHi, ResLo; 2328 2329 if (foldedLoad) { 2330 SDValue Chain; 2331 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0), 2332 InFlag }; 2333 if (MOpc == X86::MULX32rm || MOpc == X86::MULX64rm) { 2334 SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::Other, MVT::Glue); 2335 SDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops); 2336 ResHi = SDValue(CNode, 0); 2337 ResLo = SDValue(CNode, 1); 2338 Chain = SDValue(CNode, 2); 2339 InFlag = SDValue(CNode, 3); 2340 } else { 2341 SDVTList VTs = CurDAG->getVTList(MVT::Other, MVT::Glue); 2342 SDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops); 2343 Chain = SDValue(CNode, 0); 2344 InFlag = SDValue(CNode, 1); 2345 } 2346 2347 // Update the chain. 2348 ReplaceUses(N1.getValue(1), Chain); 2349 } else { 2350 SDValue Ops[] = { N1, InFlag }; 2351 if (Opc == X86::MULX32rr || Opc == X86::MULX64rr) { 2352 SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::Glue); 2353 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops); 2354 ResHi = SDValue(CNode, 0); 2355 ResLo = SDValue(CNode, 1); 2356 InFlag = SDValue(CNode, 2); 2357 } else { 2358 SDVTList VTs = CurDAG->getVTList(MVT::Glue); 2359 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops); 2360 InFlag = SDValue(CNode, 0); 2361 } 2362 } 2363 2364 // Prevent use of AH in a REX instruction by referencing AX instead. 2365 if (HiReg == X86::AH && Subtarget->is64Bit() && 2366 !SDValue(Node, 1).use_empty()) { 2367 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 2368 X86::AX, MVT::i16, InFlag); 2369 InFlag = Result.getValue(2); 2370 // Get the low part if needed. Don't use getCopyFromReg for aliasing 2371 // registers. 2372 if (!SDValue(Node, 0).use_empty()) 2373 ReplaceUses(SDValue(Node, 1), 2374 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result)); 2375 2376 // Shift AX down 8 bits. 2377 Result = SDValue(CurDAG->getMachineNode(X86::SHR16ri, dl, MVT::i16, 2378 Result, 2379 CurDAG->getTargetConstant(8, MVT::i8)), 0); 2380 // Then truncate it down to i8. 2381 ReplaceUses(SDValue(Node, 1), 2382 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result)); 2383 } 2384 // Copy the low half of the result, if it is needed. 2385 if (!SDValue(Node, 0).use_empty()) { 2386 if (ResLo.getNode() == 0) { 2387 assert(LoReg && "Register for low half is not defined!"); 2388 ResLo = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, LoReg, NVT, 2389 InFlag); 2390 InFlag = ResLo.getValue(2); 2391 } 2392 ReplaceUses(SDValue(Node, 0), ResLo); 2393 DEBUG(dbgs() << "=> "; ResLo.getNode()->dump(CurDAG); dbgs() << '\n'); 2394 } 2395 // Copy the high half of the result, if it is needed. 2396 if (!SDValue(Node, 1).use_empty()) { 2397 if (ResHi.getNode() == 0) { 2398 assert(HiReg && "Register for high half is not defined!"); 2399 ResHi = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, HiReg, NVT, 2400 InFlag); 2401 InFlag = ResHi.getValue(2); 2402 } 2403 ReplaceUses(SDValue(Node, 1), ResHi); 2404 DEBUG(dbgs() << "=> "; ResHi.getNode()->dump(CurDAG); dbgs() << '\n'); 2405 } 2406 2407 return NULL; 2408 } 2409 2410 case ISD::SDIVREM: 2411 case ISD::UDIVREM: { 2412 SDValue N0 = Node->getOperand(0); 2413 SDValue N1 = Node->getOperand(1); 2414 2415 bool isSigned = Opcode == ISD::SDIVREM; 2416 if (!isSigned) { 2417 switch (NVT.getSimpleVT().SimpleTy) { 2418 default: llvm_unreachable("Unsupported VT!"); 2419 case MVT::i8: Opc = X86::DIV8r; MOpc = X86::DIV8m; break; 2420 case MVT::i16: Opc = X86::DIV16r; MOpc = X86::DIV16m; break; 2421 case MVT::i32: Opc = X86::DIV32r; MOpc = X86::DIV32m; break; 2422 case MVT::i64: Opc = X86::DIV64r; MOpc = X86::DIV64m; break; 2423 } 2424 } else { 2425 switch (NVT.getSimpleVT().SimpleTy) { 2426 default: llvm_unreachable("Unsupported VT!"); 2427 case MVT::i8: Opc = X86::IDIV8r; MOpc = X86::IDIV8m; break; 2428 case MVT::i16: Opc = X86::IDIV16r; MOpc = X86::IDIV16m; break; 2429 case MVT::i32: Opc = X86::IDIV32r; MOpc = X86::IDIV32m; break; 2430 case MVT::i64: Opc = X86::IDIV64r; MOpc = X86::IDIV64m; break; 2431 } 2432 } 2433 2434 unsigned LoReg, HiReg, ClrReg; 2435 unsigned SExtOpcode; 2436 switch (NVT.getSimpleVT().SimpleTy) { 2437 default: llvm_unreachable("Unsupported VT!"); 2438 case MVT::i8: 2439 LoReg = X86::AL; ClrReg = HiReg = X86::AH; 2440 SExtOpcode = X86::CBW; 2441 break; 2442 case MVT::i16: 2443 LoReg = X86::AX; HiReg = X86::DX; 2444 ClrReg = X86::DX; 2445 SExtOpcode = X86::CWD; 2446 break; 2447 case MVT::i32: 2448 LoReg = X86::EAX; ClrReg = HiReg = X86::EDX; 2449 SExtOpcode = X86::CDQ; 2450 break; 2451 case MVT::i64: 2452 LoReg = X86::RAX; ClrReg = HiReg = X86::RDX; 2453 SExtOpcode = X86::CQO; 2454 break; 2455 } 2456 2457 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; 2458 bool foldedLoad = TryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4); 2459 bool signBitIsZero = CurDAG->SignBitIsZero(N0); 2460 2461 SDValue InFlag; 2462 if (NVT == MVT::i8 && (!isSigned || signBitIsZero)) { 2463 // Special case for div8, just use a move with zero extension to AX to 2464 // clear the upper 8 bits (AH). 2465 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Move, Chain; 2466 if (TryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) { 2467 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N0.getOperand(0) }; 2468 Move = 2469 SDValue(CurDAG->getMachineNode(X86::MOVZX32rm8, dl, MVT::i32, 2470 MVT::Other, Ops), 0); 2471 Chain = Move.getValue(1); 2472 ReplaceUses(N0.getValue(1), Chain); 2473 } else { 2474 Move = 2475 SDValue(CurDAG->getMachineNode(X86::MOVZX32rr8, dl, MVT::i32, N0),0); 2476 Chain = CurDAG->getEntryNode(); 2477 } 2478 Chain = CurDAG->getCopyToReg(Chain, dl, X86::EAX, Move, SDValue()); 2479 InFlag = Chain.getValue(1); 2480 } else { 2481 InFlag = 2482 CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, 2483 LoReg, N0, SDValue()).getValue(1); 2484 if (isSigned && !signBitIsZero) { 2485 // Sign extend the low part into the high part. 2486 InFlag = 2487 SDValue(CurDAG->getMachineNode(SExtOpcode, dl, MVT::Glue, InFlag),0); 2488 } else { 2489 // Zero out the high part, effectively zero extending the input. 2490 SDValue ClrNode = SDValue(CurDAG->getMachineNode(X86::MOV32r0, dl, NVT), 0); 2491 switch (NVT.getSimpleVT().SimpleTy) { 2492 case MVT::i16: 2493 ClrNode = 2494 SDValue(CurDAG->getMachineNode( 2495 TargetOpcode::EXTRACT_SUBREG, dl, MVT::i16, ClrNode, 2496 CurDAG->getTargetConstant(X86::sub_16bit, MVT::i32)), 2497 0); 2498 break; 2499 case MVT::i32: 2500 break; 2501 case MVT::i64: 2502 ClrNode = 2503 SDValue(CurDAG->getMachineNode( 2504 TargetOpcode::SUBREG_TO_REG, dl, MVT::i64, 2505 CurDAG->getTargetConstant(0, MVT::i64), ClrNode, 2506 CurDAG->getTargetConstant(X86::sub_32bit, MVT::i32)), 2507 0); 2508 break; 2509 default: 2510 llvm_unreachable("Unexpected division source"); 2511 } 2512 2513 InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, ClrReg, 2514 ClrNode, InFlag).getValue(1); 2515 } 2516 } 2517 2518 if (foldedLoad) { 2519 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0), 2520 InFlag }; 2521 SDNode *CNode = 2522 CurDAG->getMachineNode(MOpc, dl, MVT::Other, MVT::Glue, Ops); 2523 InFlag = SDValue(CNode, 1); 2524 // Update the chain. 2525 ReplaceUses(N1.getValue(1), SDValue(CNode, 0)); 2526 } else { 2527 InFlag = 2528 SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Glue, N1, InFlag), 0); 2529 } 2530 2531 // Prevent use of AH in a REX instruction by referencing AX instead. 2532 // Shift it down 8 bits. 2533 if (HiReg == X86::AH && Subtarget->is64Bit() && 2534 !SDValue(Node, 1).use_empty()) { 2535 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 2536 X86::AX, MVT::i16, InFlag); 2537 InFlag = Result.getValue(2); 2538 2539 // If we also need AL (the quotient), get it by extracting a subreg from 2540 // Result. The fast register allocator does not like multiple CopyFromReg 2541 // nodes using aliasing registers. 2542 if (!SDValue(Node, 0).use_empty()) 2543 ReplaceUses(SDValue(Node, 0), 2544 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result)); 2545 2546 // Shift AX right by 8 bits instead of using AH. 2547 Result = SDValue(CurDAG->getMachineNode(X86::SHR16ri, dl, MVT::i16, 2548 Result, 2549 CurDAG->getTargetConstant(8, MVT::i8)), 2550 0); 2551 ReplaceUses(SDValue(Node, 1), 2552 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result)); 2553 } 2554 // Copy the division (low) result, if it is needed. 2555 if (!SDValue(Node, 0).use_empty()) { 2556 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 2557 LoReg, NVT, InFlag); 2558 InFlag = Result.getValue(2); 2559 ReplaceUses(SDValue(Node, 0), Result); 2560 DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n'); 2561 } 2562 // Copy the remainder (high) result, if it is needed. 2563 if (!SDValue(Node, 1).use_empty()) { 2564 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 2565 HiReg, NVT, InFlag); 2566 InFlag = Result.getValue(2); 2567 ReplaceUses(SDValue(Node, 1), Result); 2568 DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n'); 2569 } 2570 return NULL; 2571 } 2572 2573 case X86ISD::CMP: 2574 case X86ISD::SUB: { 2575 // Sometimes a SUB is used to perform comparison. 2576 if (Opcode == X86ISD::SUB && Node->hasAnyUseOfValue(0)) 2577 // This node is not a CMP. 2578 break; 2579 SDValue N0 = Node->getOperand(0); 2580 SDValue N1 = Node->getOperand(1); 2581 2582 // Look for (X86cmp (and $op, $imm), 0) and see if we can convert it to 2583 // use a smaller encoding. 2584 if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse() && 2585 HasNoSignedComparisonUses(Node)) 2586 // Look past the truncate if CMP is the only use of it. 2587 N0 = N0.getOperand(0); 2588 if ((N0.getNode()->getOpcode() == ISD::AND || 2589 (N0.getResNo() == 0 && N0.getNode()->getOpcode() == X86ISD::AND)) && 2590 N0.getNode()->hasOneUse() && 2591 N0.getValueType() != MVT::i8 && 2592 X86::isZeroNode(N1)) { 2593 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getNode()->getOperand(1)); 2594 if (!C) break; 2595 2596 // For example, convert "testl %eax, $8" to "testb %al, $8" 2597 if ((C->getZExtValue() & ~UINT64_C(0xff)) == 0 && 2598 (!(C->getZExtValue() & 0x80) || 2599 HasNoSignedComparisonUses(Node))) { 2600 SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i8); 2601 SDValue Reg = N0.getNode()->getOperand(0); 2602 2603 // On x86-32, only the ABCD registers have 8-bit subregisters. 2604 if (!Subtarget->is64Bit()) { 2605 const TargetRegisterClass *TRC; 2606 switch (N0.getValueType().getSimpleVT().SimpleTy) { 2607 case MVT::i32: TRC = &X86::GR32_ABCDRegClass; break; 2608 case MVT::i16: TRC = &X86::GR16_ABCDRegClass; break; 2609 default: llvm_unreachable("Unsupported TEST operand type!"); 2610 } 2611 SDValue RC = CurDAG->getTargetConstant(TRC->getID(), MVT::i32); 2612 Reg = SDValue(CurDAG->getMachineNode(X86::COPY_TO_REGCLASS, dl, 2613 Reg.getValueType(), Reg, RC), 0); 2614 } 2615 2616 // Extract the l-register. 2617 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, 2618 MVT::i8, Reg); 2619 2620 // Emit a testb. 2621 SDNode *NewNode = CurDAG->getMachineNode(X86::TEST8ri, dl, MVT::i32, 2622 Subreg, Imm); 2623 // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has 2624 // one, do not call ReplaceAllUsesWith. 2625 ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)), 2626 SDValue(NewNode, 0)); 2627 return NULL; 2628 } 2629 2630 // For example, "testl %eax, $2048" to "testb %ah, $8". 2631 if ((C->getZExtValue() & ~UINT64_C(0xff00)) == 0 && 2632 (!(C->getZExtValue() & 0x8000) || 2633 HasNoSignedComparisonUses(Node))) { 2634 // Shift the immediate right by 8 bits. 2635 SDValue ShiftedImm = CurDAG->getTargetConstant(C->getZExtValue() >> 8, 2636 MVT::i8); 2637 SDValue Reg = N0.getNode()->getOperand(0); 2638 2639 // Put the value in an ABCD register. 2640 const TargetRegisterClass *TRC; 2641 switch (N0.getValueType().getSimpleVT().SimpleTy) { 2642 case MVT::i64: TRC = &X86::GR64_ABCDRegClass; break; 2643 case MVT::i32: TRC = &X86::GR32_ABCDRegClass; break; 2644 case MVT::i16: TRC = &X86::GR16_ABCDRegClass; break; 2645 default: llvm_unreachable("Unsupported TEST operand type!"); 2646 } 2647 SDValue RC = CurDAG->getTargetConstant(TRC->getID(), MVT::i32); 2648 Reg = SDValue(CurDAG->getMachineNode(X86::COPY_TO_REGCLASS, dl, 2649 Reg.getValueType(), Reg, RC), 0); 2650 2651 // Extract the h-register. 2652 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_8bit_hi, dl, 2653 MVT::i8, Reg); 2654 2655 // Emit a testb. The EXTRACT_SUBREG becomes a COPY that can only 2656 // target GR8_NOREX registers, so make sure the register class is 2657 // forced. 2658 SDNode *NewNode = CurDAG->getMachineNode(X86::TEST8ri_NOREX, dl, 2659 MVT::i32, Subreg, ShiftedImm); 2660 // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has 2661 // one, do not call ReplaceAllUsesWith. 2662 ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)), 2663 SDValue(NewNode, 0)); 2664 return NULL; 2665 } 2666 2667 // For example, "testl %eax, $32776" to "testw %ax, $32776". 2668 if ((C->getZExtValue() & ~UINT64_C(0xffff)) == 0 && 2669 N0.getValueType() != MVT::i16 && 2670 (!(C->getZExtValue() & 0x8000) || 2671 HasNoSignedComparisonUses(Node))) { 2672 SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i16); 2673 SDValue Reg = N0.getNode()->getOperand(0); 2674 2675 // Extract the 16-bit subregister. 2676 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_16bit, dl, 2677 MVT::i16, Reg); 2678 2679 // Emit a testw. 2680 SDNode *NewNode = CurDAG->getMachineNode(X86::TEST16ri, dl, MVT::i32, 2681 Subreg, Imm); 2682 // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has 2683 // one, do not call ReplaceAllUsesWith. 2684 ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)), 2685 SDValue(NewNode, 0)); 2686 return NULL; 2687 } 2688 2689 // For example, "testq %rax, $268468232" to "testl %eax, $268468232". 2690 if ((C->getZExtValue() & ~UINT64_C(0xffffffff)) == 0 && 2691 N0.getValueType() == MVT::i64 && 2692 (!(C->getZExtValue() & 0x80000000) || 2693 HasNoSignedComparisonUses(Node))) { 2694 SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32); 2695 SDValue Reg = N0.getNode()->getOperand(0); 2696 2697 // Extract the 32-bit subregister. 2698 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_32bit, dl, 2699 MVT::i32, Reg); 2700 2701 // Emit a testl. 2702 SDNode *NewNode = CurDAG->getMachineNode(X86::TEST32ri, dl, MVT::i32, 2703 Subreg, Imm); 2704 // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has 2705 // one, do not call ReplaceAllUsesWith. 2706 ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)), 2707 SDValue(NewNode, 0)); 2708 return NULL; 2709 } 2710 } 2711 break; 2712 } 2713 case ISD::STORE: { 2714 // Change a chain of {load; incr or dec; store} of the same value into 2715 // a simple increment or decrement through memory of that value, if the 2716 // uses of the modified value and its address are suitable. 2717 // The DEC64m tablegen pattern is currently not able to match the case where 2718 // the EFLAGS on the original DEC are used. (This also applies to 2719 // {INC,DEC}X{64,32,16,8}.) 2720 // We'll need to improve tablegen to allow flags to be transferred from a 2721 // node in the pattern to the result node. probably with a new keyword 2722 // for example, we have this 2723 // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst", 2724 // [(store (add (loadi64 addr:$dst), -1), addr:$dst), 2725 // (implicit EFLAGS)]>; 2726 // but maybe need something like this 2727 // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst", 2728 // [(store (add (loadi64 addr:$dst), -1), addr:$dst), 2729 // (transferrable EFLAGS)]>; 2730 2731 StoreSDNode *StoreNode = cast<StoreSDNode>(Node); 2732 SDValue StoredVal = StoreNode->getOperand(1); 2733 unsigned Opc = StoredVal->getOpcode(); 2734 2735 LoadSDNode *LoadNode = 0; 2736 SDValue InputChain; 2737 if (!isLoadIncOrDecStore(StoreNode, Opc, StoredVal, CurDAG, 2738 LoadNode, InputChain)) 2739 break; 2740 2741 SDValue Base, Scale, Index, Disp, Segment; 2742 if (!SelectAddr(LoadNode, LoadNode->getBasePtr(), 2743 Base, Scale, Index, Disp, Segment)) 2744 break; 2745 2746 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(2); 2747 MemOp[0] = StoreNode->getMemOperand(); 2748 MemOp[1] = LoadNode->getMemOperand(); 2749 const SDValue Ops[] = { Base, Scale, Index, Disp, Segment, InputChain }; 2750 EVT LdVT = LoadNode->getMemoryVT(); 2751 unsigned newOpc = getFusedLdStOpcode(LdVT, Opc); 2752 MachineSDNode *Result = CurDAG->getMachineNode(newOpc, 2753 SDLoc(Node), 2754 MVT::i32, MVT::Other, Ops); 2755 Result->setMemRefs(MemOp, MemOp + 2); 2756 2757 ReplaceUses(SDValue(StoreNode, 0), SDValue(Result, 1)); 2758 ReplaceUses(SDValue(StoredVal.getNode(), 1), SDValue(Result, 0)); 2759 2760 return Result; 2761 } 2762 } 2763 2764 SDNode *ResNode = SelectCode(Node); 2765 2766 DEBUG(dbgs() << "=> "; 2767 if (ResNode == NULL || ResNode == Node) 2768 Node->dump(CurDAG); 2769 else 2770 ResNode->dump(CurDAG); 2771 dbgs() << '\n'); 2772 2773 return ResNode; 2774} 2775 2776bool X86DAGToDAGISel:: 2777SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode, 2778 std::vector<SDValue> &OutOps) { 2779 SDValue Op0, Op1, Op2, Op3, Op4; 2780 switch (ConstraintCode) { 2781 case 'o': // offsetable ?? 2782 case 'v': // not offsetable ?? 2783 default: return true; 2784 case 'm': // memory 2785 if (!SelectAddr(0, Op, Op0, Op1, Op2, Op3, Op4)) 2786 return true; 2787 break; 2788 } 2789 2790 OutOps.push_back(Op0); 2791 OutOps.push_back(Op1); 2792 OutOps.push_back(Op2); 2793 OutOps.push_back(Op3); 2794 OutOps.push_back(Op4); 2795 return false; 2796} 2797 2798/// createX86ISelDag - This pass converts a legalized DAG into a 2799/// X86-specific DAG, ready for instruction scheduling. 2800/// 2801FunctionPass *llvm::createX86ISelDag(X86TargetMachine &TM, 2802 CodeGenOpt::Level OptLevel) { 2803 return new X86DAGToDAGISel(TM, OptLevel); 2804} 2805