1//===- X86ISelDAGToDAG.cpp - A DAG pattern matching inst selector for X86 -===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines a DAG pattern matching instruction selector for X86,
11// converting from a legalized dag to a X86 dag.
12//
13//===----------------------------------------------------------------------===//
14
15#define DEBUG_TYPE "x86-isel"
16#include "X86.h"
17#include "X86InstrBuilder.h"
18#include "X86MachineFunctionInfo.h"
19#include "X86RegisterInfo.h"
20#include "X86Subtarget.h"
21#include "X86TargetMachine.h"
22#include "llvm/Instructions.h"
23#include "llvm/Intrinsics.h"
24#include "llvm/Type.h"
25#include "llvm/CodeGen/FunctionLoweringInfo.h"
26#include "llvm/CodeGen/MachineConstantPool.h"
27#include "llvm/CodeGen/MachineFunction.h"
28#include "llvm/CodeGen/MachineFrameInfo.h"
29#include "llvm/CodeGen/MachineInstrBuilder.h"
30#include "llvm/CodeGen/MachineRegisterInfo.h"
31#include "llvm/CodeGen/SelectionDAGISel.h"
32#include "llvm/Target/TargetMachine.h"
33#include "llvm/Target/TargetOptions.h"
34#include "llvm/Support/CFG.h"
35#include "llvm/Support/Debug.h"
36#include "llvm/Support/ErrorHandling.h"
37#include "llvm/Support/MathExtras.h"
38#include "llvm/Support/raw_ostream.h"
39#include "llvm/ADT/Statistic.h"
40using namespace llvm;
41
42STATISTIC(NumLoadMoved, "Number of loads moved below TokenFactor");
43
44//===----------------------------------------------------------------------===//
45//                      Pattern Matcher Implementation
46//===----------------------------------------------------------------------===//
47
48namespace {
49  /// X86ISelAddressMode - This corresponds to X86AddressMode, but uses
50  /// SDValue's instead of register numbers for the leaves of the matched
51  /// tree.
52  struct X86ISelAddressMode {
53    enum {
54      RegBase,
55      FrameIndexBase
56    } BaseType;
57
58    // This is really a union, discriminated by BaseType!
59    SDValue Base_Reg;
60    int Base_FrameIndex;
61
62    unsigned Scale;
63    SDValue IndexReg;
64    int32_t Disp;
65    SDValue Segment;
66    const GlobalValue *GV;
67    const Constant *CP;
68    const BlockAddress *BlockAddr;
69    const char *ES;
70    int JT;
71    unsigned Align;    // CP alignment.
72    unsigned char SymbolFlags;  // X86II::MO_*
73
74    X86ISelAddressMode()
75      : BaseType(RegBase), Base_FrameIndex(0), Scale(1), IndexReg(), Disp(0),
76        Segment(), GV(0), CP(0), BlockAddr(0), ES(0), JT(-1), Align(0),
77        SymbolFlags(X86II::MO_NO_FLAG) {
78    }
79
80    bool hasSymbolicDisplacement() const {
81      return GV != 0 || CP != 0 || ES != 0 || JT != -1 || BlockAddr != 0;
82    }
83
84    bool hasBaseOrIndexReg() const {
85      return IndexReg.getNode() != 0 || Base_Reg.getNode() != 0;
86    }
87
88    /// isRIPRelative - Return true if this addressing mode is already RIP
89    /// relative.
90    bool isRIPRelative() const {
91      if (BaseType != RegBase) return false;
92      if (RegisterSDNode *RegNode =
93            dyn_cast_or_null<RegisterSDNode>(Base_Reg.getNode()))
94        return RegNode->getReg() == X86::RIP;
95      return false;
96    }
97
98    void setBaseReg(SDValue Reg) {
99      BaseType = RegBase;
100      Base_Reg = Reg;
101    }
102
103#ifndef NDEBUG
104    void dump() {
105      dbgs() << "X86ISelAddressMode " << this << '\n';
106      dbgs() << "Base_Reg ";
107      if (Base_Reg.getNode() != 0)
108        Base_Reg.getNode()->dump();
109      else
110        dbgs() << "nul";
111      dbgs() << " Base.FrameIndex " << Base_FrameIndex << '\n'
112             << " Scale" << Scale << '\n'
113             << "IndexReg ";
114      if (IndexReg.getNode() != 0)
115        IndexReg.getNode()->dump();
116      else
117        dbgs() << "nul";
118      dbgs() << " Disp " << Disp << '\n'
119             << "GV ";
120      if (GV)
121        GV->dump();
122      else
123        dbgs() << "nul";
124      dbgs() << " CP ";
125      if (CP)
126        CP->dump();
127      else
128        dbgs() << "nul";
129      dbgs() << '\n'
130             << "ES ";
131      if (ES)
132        dbgs() << ES;
133      else
134        dbgs() << "nul";
135      dbgs() << " JT" << JT << " Align" << Align << '\n';
136    }
137#endif
138  };
139}
140
141namespace {
142  //===--------------------------------------------------------------------===//
143  /// ISel - X86 specific code to select X86 machine instructions for
144  /// SelectionDAG operations.
145  ///
146  class X86DAGToDAGISel : public SelectionDAGISel {
147    /// X86Lowering - This object fully describes how to lower LLVM code to an
148    /// X86-specific SelectionDAG.
149    const X86TargetLowering &X86Lowering;
150
151    /// Subtarget - Keep a pointer to the X86Subtarget around so that we can
152    /// make the right decision when generating code for different targets.
153    const X86Subtarget *Subtarget;
154
155    /// OptForSize - If true, selector should try to optimize for code size
156    /// instead of performance.
157    bool OptForSize;
158
159  public:
160    explicit X86DAGToDAGISel(X86TargetMachine &tm, CodeGenOpt::Level OptLevel)
161      : SelectionDAGISel(tm, OptLevel),
162        X86Lowering(*tm.getTargetLowering()),
163        Subtarget(&tm.getSubtarget<X86Subtarget>()),
164        OptForSize(false) {}
165
166    virtual const char *getPassName() const {
167      return "X86 DAG->DAG Instruction Selection";
168    }
169
170    virtual void EmitFunctionEntryCode();
171
172    virtual bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const;
173
174    virtual void PreprocessISelDAG();
175
176    inline bool immSext8(SDNode *N) const {
177      return isInt<8>(cast<ConstantSDNode>(N)->getSExtValue());
178    }
179
180    // i64immSExt32 predicate - True if the 64-bit immediate fits in a 32-bit
181    // sign extended field.
182    inline bool i64immSExt32(SDNode *N) const {
183      uint64_t v = cast<ConstantSDNode>(N)->getZExtValue();
184      return (int64_t)v == (int32_t)v;
185    }
186
187// Include the pieces autogenerated from the target description.
188#include "X86GenDAGISel.inc"
189
190  private:
191    SDNode *Select(SDNode *N);
192    SDNode *SelectGather(SDNode *N, unsigned Opc);
193    SDNode *SelectAtomic64(SDNode *Node, unsigned Opc);
194    SDNode *SelectAtomicLoadAdd(SDNode *Node, EVT NVT);
195    SDNode *SelectAtomicLoadArith(SDNode *Node, EVT NVT);
196
197    bool FoldOffsetIntoAddress(uint64_t Offset, X86ISelAddressMode &AM);
198    bool MatchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM);
199    bool MatchWrapper(SDValue N, X86ISelAddressMode &AM);
200    bool MatchAddress(SDValue N, X86ISelAddressMode &AM);
201    bool MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
202                                 unsigned Depth);
203    bool MatchAddressBase(SDValue N, X86ISelAddressMode &AM);
204    bool SelectAddr(SDNode *Parent, SDValue N, SDValue &Base,
205                    SDValue &Scale, SDValue &Index, SDValue &Disp,
206                    SDValue &Segment);
207    bool SelectLEAAddr(SDValue N, SDValue &Base,
208                       SDValue &Scale, SDValue &Index, SDValue &Disp,
209                       SDValue &Segment);
210    bool SelectTLSADDRAddr(SDValue N, SDValue &Base,
211                           SDValue &Scale, SDValue &Index, SDValue &Disp,
212                           SDValue &Segment);
213    bool SelectScalarSSELoad(SDNode *Root, SDValue N,
214                             SDValue &Base, SDValue &Scale,
215                             SDValue &Index, SDValue &Disp,
216                             SDValue &Segment,
217                             SDValue &NodeWithChain);
218
219    bool TryFoldLoad(SDNode *P, SDValue N,
220                     SDValue &Base, SDValue &Scale,
221                     SDValue &Index, SDValue &Disp,
222                     SDValue &Segment);
223
224    /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
225    /// inline asm expressions.
226    virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
227                                              char ConstraintCode,
228                                              std::vector<SDValue> &OutOps);
229
230    void EmitSpecialCodeForMain(MachineBasicBlock *BB, MachineFrameInfo *MFI);
231
232    inline void getAddressOperands(X86ISelAddressMode &AM, SDValue &Base,
233                                   SDValue &Scale, SDValue &Index,
234                                   SDValue &Disp, SDValue &Segment) {
235      Base  = (AM.BaseType == X86ISelAddressMode::FrameIndexBase) ?
236        CurDAG->getTargetFrameIndex(AM.Base_FrameIndex, TLI.getPointerTy()) :
237        AM.Base_Reg;
238      Scale = getI8Imm(AM.Scale);
239      Index = AM.IndexReg;
240      // These are 32-bit even in 64-bit mode since RIP relative offset
241      // is 32-bit.
242      if (AM.GV)
243        Disp = CurDAG->getTargetGlobalAddress(AM.GV, DebugLoc(),
244                                              MVT::i32, AM.Disp,
245                                              AM.SymbolFlags);
246      else if (AM.CP)
247        Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i32,
248                                             AM.Align, AM.Disp, AM.SymbolFlags);
249      else if (AM.ES)
250        Disp = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i32, AM.SymbolFlags);
251      else if (AM.JT != -1)
252        Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i32, AM.SymbolFlags);
253      else if (AM.BlockAddr)
254        Disp = CurDAG->getBlockAddress(AM.BlockAddr, MVT::i32,
255                                       true, AM.SymbolFlags);
256      else
257        Disp = CurDAG->getTargetConstant(AM.Disp, MVT::i32);
258
259      if (AM.Segment.getNode())
260        Segment = AM.Segment;
261      else
262        Segment = CurDAG->getRegister(0, MVT::i32);
263    }
264
265    /// getI8Imm - Return a target constant with the specified value, of type
266    /// i8.
267    inline SDValue getI8Imm(unsigned Imm) {
268      return CurDAG->getTargetConstant(Imm, MVT::i8);
269    }
270
271    /// getI32Imm - Return a target constant with the specified value, of type
272    /// i32.
273    inline SDValue getI32Imm(unsigned Imm) {
274      return CurDAG->getTargetConstant(Imm, MVT::i32);
275    }
276
277    /// getGlobalBaseReg - Return an SDNode that returns the value of
278    /// the global base register. Output instructions required to
279    /// initialize the global base register, if necessary.
280    ///
281    SDNode *getGlobalBaseReg();
282
283    /// getTargetMachine - Return a reference to the TargetMachine, casted
284    /// to the target-specific type.
285    const X86TargetMachine &getTargetMachine() {
286      return static_cast<const X86TargetMachine &>(TM);
287    }
288
289    /// getInstrInfo - Return a reference to the TargetInstrInfo, casted
290    /// to the target-specific type.
291    const X86InstrInfo *getInstrInfo() {
292      return getTargetMachine().getInstrInfo();
293    }
294  };
295}
296
297
298bool
299X86DAGToDAGISel::IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const {
300  if (OptLevel == CodeGenOpt::None) return false;
301
302  if (!N.hasOneUse())
303    return false;
304
305  if (N.getOpcode() != ISD::LOAD)
306    return true;
307
308  // If N is a load, do additional profitability checks.
309  if (U == Root) {
310    switch (U->getOpcode()) {
311    default: break;
312    case X86ISD::ADD:
313    case X86ISD::SUB:
314    case X86ISD::AND:
315    case X86ISD::XOR:
316    case X86ISD::OR:
317    case ISD::ADD:
318    case ISD::ADDC:
319    case ISD::ADDE:
320    case ISD::AND:
321    case ISD::OR:
322    case ISD::XOR: {
323      SDValue Op1 = U->getOperand(1);
324
325      // If the other operand is a 8-bit immediate we should fold the immediate
326      // instead. This reduces code size.
327      // e.g.
328      // movl 4(%esp), %eax
329      // addl $4, %eax
330      // vs.
331      // movl $4, %eax
332      // addl 4(%esp), %eax
333      // The former is 2 bytes shorter. In case where the increment is 1, then
334      // the saving can be 4 bytes (by using incl %eax).
335      if (ConstantSDNode *Imm = dyn_cast<ConstantSDNode>(Op1))
336        if (Imm->getAPIntValue().isSignedIntN(8))
337          return false;
338
339      // If the other operand is a TLS address, we should fold it instead.
340      // This produces
341      // movl    %gs:0, %eax
342      // leal    i@NTPOFF(%eax), %eax
343      // instead of
344      // movl    $i@NTPOFF, %eax
345      // addl    %gs:0, %eax
346      // if the block also has an access to a second TLS address this will save
347      // a load.
348      // FIXME: This is probably also true for non TLS addresses.
349      if (Op1.getOpcode() == X86ISD::Wrapper) {
350        SDValue Val = Op1.getOperand(0);
351        if (Val.getOpcode() == ISD::TargetGlobalTLSAddress)
352          return false;
353      }
354    }
355    }
356  }
357
358  return true;
359}
360
361/// MoveBelowCallOrigChain - Replace the original chain operand of the call with
362/// load's chain operand and move load below the call's chain operand.
363static void MoveBelowOrigChain(SelectionDAG *CurDAG, SDValue Load,
364                                  SDValue Call, SDValue OrigChain) {
365  SmallVector<SDValue, 8> Ops;
366  SDValue Chain = OrigChain.getOperand(0);
367  if (Chain.getNode() == Load.getNode())
368    Ops.push_back(Load.getOperand(0));
369  else {
370    assert(Chain.getOpcode() == ISD::TokenFactor &&
371           "Unexpected chain operand");
372    for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i)
373      if (Chain.getOperand(i).getNode() == Load.getNode())
374        Ops.push_back(Load.getOperand(0));
375      else
376        Ops.push_back(Chain.getOperand(i));
377    SDValue NewChain =
378      CurDAG->getNode(ISD::TokenFactor, Load.getDebugLoc(),
379                      MVT::Other, &Ops[0], Ops.size());
380    Ops.clear();
381    Ops.push_back(NewChain);
382  }
383  for (unsigned i = 1, e = OrigChain.getNumOperands(); i != e; ++i)
384    Ops.push_back(OrigChain.getOperand(i));
385  CurDAG->UpdateNodeOperands(OrigChain.getNode(), &Ops[0], Ops.size());
386  CurDAG->UpdateNodeOperands(Load.getNode(), Call.getOperand(0),
387                             Load.getOperand(1), Load.getOperand(2));
388  Ops.clear();
389  Ops.push_back(SDValue(Load.getNode(), 1));
390  for (unsigned i = 1, e = Call.getNode()->getNumOperands(); i != e; ++i)
391    Ops.push_back(Call.getOperand(i));
392  CurDAG->UpdateNodeOperands(Call.getNode(), &Ops[0], Ops.size());
393}
394
395/// isCalleeLoad - Return true if call address is a load and it can be
396/// moved below CALLSEQ_START and the chains leading up to the call.
397/// Return the CALLSEQ_START by reference as a second output.
398/// In the case of a tail call, there isn't a callseq node between the call
399/// chain and the load.
400static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) {
401  if (Callee.getNode() == Chain.getNode() || !Callee.hasOneUse())
402    return false;
403  LoadSDNode *LD = dyn_cast<LoadSDNode>(Callee.getNode());
404  if (!LD ||
405      LD->isVolatile() ||
406      LD->getAddressingMode() != ISD::UNINDEXED ||
407      LD->getExtensionType() != ISD::NON_EXTLOAD)
408    return false;
409
410  // Now let's find the callseq_start.
411  while (HasCallSeq && Chain.getOpcode() != ISD::CALLSEQ_START) {
412    if (!Chain.hasOneUse())
413      return false;
414    Chain = Chain.getOperand(0);
415  }
416
417  if (!Chain.getNumOperands())
418    return false;
419  if (Chain.getOperand(0).getNode() == Callee.getNode())
420    return true;
421  if (Chain.getOperand(0).getOpcode() == ISD::TokenFactor &&
422      Callee.getValue(1).isOperandOf(Chain.getOperand(0).getNode()) &&
423      Callee.getValue(1).hasOneUse())
424    return true;
425  return false;
426}
427
428void X86DAGToDAGISel::PreprocessISelDAG() {
429  // OptForSize is used in pattern predicates that isel is matching.
430  OptForSize = MF->getFunction()->hasFnAttr(Attribute::OptimizeForSize);
431
432  for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
433       E = CurDAG->allnodes_end(); I != E; ) {
434    SDNode *N = I++;  // Preincrement iterator to avoid invalidation issues.
435
436    if (OptLevel != CodeGenOpt::None &&
437        (N->getOpcode() == X86ISD::CALL ||
438         N->getOpcode() == X86ISD::TC_RETURN)) {
439      /// Also try moving call address load from outside callseq_start to just
440      /// before the call to allow it to be folded.
441      ///
442      ///     [Load chain]
443      ///         ^
444      ///         |
445      ///       [Load]
446      ///       ^    ^
447      ///       |    |
448      ///      /      \--
449      ///     /          |
450      ///[CALLSEQ_START] |
451      ///     ^          |
452      ///     |          |
453      /// [LOAD/C2Reg]   |
454      ///     |          |
455      ///      \        /
456      ///       \      /
457      ///       [CALL]
458      bool HasCallSeq = N->getOpcode() == X86ISD::CALL;
459      SDValue Chain = N->getOperand(0);
460      SDValue Load  = N->getOperand(1);
461      if (!isCalleeLoad(Load, Chain, HasCallSeq))
462        continue;
463      MoveBelowOrigChain(CurDAG, Load, SDValue(N, 0), Chain);
464      ++NumLoadMoved;
465      continue;
466    }
467
468    // Lower fpround and fpextend nodes that target the FP stack to be store and
469    // load to the stack.  This is a gross hack.  We would like to simply mark
470    // these as being illegal, but when we do that, legalize produces these when
471    // it expands calls, then expands these in the same legalize pass.  We would
472    // like dag combine to be able to hack on these between the call expansion
473    // and the node legalization.  As such this pass basically does "really
474    // late" legalization of these inline with the X86 isel pass.
475    // FIXME: This should only happen when not compiled with -O0.
476    if (N->getOpcode() != ISD::FP_ROUND && N->getOpcode() != ISD::FP_EXTEND)
477      continue;
478
479    EVT SrcVT = N->getOperand(0).getValueType();
480    EVT DstVT = N->getValueType(0);
481
482    // If any of the sources are vectors, no fp stack involved.
483    if (SrcVT.isVector() || DstVT.isVector())
484      continue;
485
486    // If the source and destination are SSE registers, then this is a legal
487    // conversion that should not be lowered.
488    bool SrcIsSSE = X86Lowering.isScalarFPTypeInSSEReg(SrcVT);
489    bool DstIsSSE = X86Lowering.isScalarFPTypeInSSEReg(DstVT);
490    if (SrcIsSSE && DstIsSSE)
491      continue;
492
493    if (!SrcIsSSE && !DstIsSSE) {
494      // If this is an FPStack extension, it is a noop.
495      if (N->getOpcode() == ISD::FP_EXTEND)
496        continue;
497      // If this is a value-preserving FPStack truncation, it is a noop.
498      if (N->getConstantOperandVal(1))
499        continue;
500    }
501
502    // Here we could have an FP stack truncation or an FPStack <-> SSE convert.
503    // FPStack has extload and truncstore.  SSE can fold direct loads into other
504    // operations.  Based on this, decide what we want to do.
505    EVT MemVT;
506    if (N->getOpcode() == ISD::FP_ROUND)
507      MemVT = DstVT;  // FP_ROUND must use DstVT, we can't do a 'trunc load'.
508    else
509      MemVT = SrcIsSSE ? SrcVT : DstVT;
510
511    SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT);
512    DebugLoc dl = N->getDebugLoc();
513
514    // FIXME: optimize the case where the src/dest is a load or store?
515    SDValue Store = CurDAG->getTruncStore(CurDAG->getEntryNode(), dl,
516                                          N->getOperand(0),
517                                          MemTmp, MachinePointerInfo(), MemVT,
518                                          false, false, 0);
519    SDValue Result = CurDAG->getExtLoad(ISD::EXTLOAD, dl, DstVT, Store, MemTmp,
520                                        MachinePointerInfo(),
521                                        MemVT, false, false, 0);
522
523    // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the
524    // extload we created.  This will cause general havok on the dag because
525    // anything below the conversion could be folded into other existing nodes.
526    // To avoid invalidating 'I', back it up to the convert node.
527    --I;
528    CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
529
530    // Now that we did that, the node is dead.  Increment the iterator to the
531    // next node to process, then delete N.
532    ++I;
533    CurDAG->DeleteNode(N);
534  }
535}
536
537
538/// EmitSpecialCodeForMain - Emit any code that needs to be executed only in
539/// the main function.
540void X86DAGToDAGISel::EmitSpecialCodeForMain(MachineBasicBlock *BB,
541                                             MachineFrameInfo *MFI) {
542  const TargetInstrInfo *TII = TM.getInstrInfo();
543  if (Subtarget->isTargetCygMing()) {
544    unsigned CallOp =
545      Subtarget->is64Bit() ? X86::CALL64pcrel32 : X86::CALLpcrel32;
546    BuildMI(BB, DebugLoc(),
547            TII->get(CallOp)).addExternalSymbol("__main");
548  }
549}
550
551void X86DAGToDAGISel::EmitFunctionEntryCode() {
552  // If this is main, emit special code for main.
553  if (const Function *Fn = MF->getFunction())
554    if (Fn->hasExternalLinkage() && Fn->getName() == "main")
555      EmitSpecialCodeForMain(MF->begin(), MF->getFrameInfo());
556}
557
558static bool isDispSafeForFrameIndex(int64_t Val) {
559  // On 64-bit platforms, we can run into an issue where a frame index
560  // includes a displacement that, when added to the explicit displacement,
561  // will overflow the displacement field. Assuming that the frame index
562  // displacement fits into a 31-bit integer  (which is only slightly more
563  // aggressive than the current fundamental assumption that it fits into
564  // a 32-bit integer), a 31-bit disp should always be safe.
565  return isInt<31>(Val);
566}
567
568bool X86DAGToDAGISel::FoldOffsetIntoAddress(uint64_t Offset,
569                                            X86ISelAddressMode &AM) {
570  int64_t Val = AM.Disp + Offset;
571  CodeModel::Model M = TM.getCodeModel();
572  if (Subtarget->is64Bit()) {
573    if (!X86::isOffsetSuitableForCodeModel(Val, M,
574                                           AM.hasSymbolicDisplacement()))
575      return true;
576    // In addition to the checks required for a register base, check that
577    // we do not try to use an unsafe Disp with a frame index.
578    if (AM.BaseType == X86ISelAddressMode::FrameIndexBase &&
579        !isDispSafeForFrameIndex(Val))
580      return true;
581  }
582  AM.Disp = Val;
583  return false;
584
585}
586
587bool X86DAGToDAGISel::MatchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM){
588  SDValue Address = N->getOperand(1);
589
590  // load gs:0 -> GS segment register.
591  // load fs:0 -> FS segment register.
592  //
593  // This optimization is valid because the GNU TLS model defines that
594  // gs:0 (or fs:0 on X86-64) contains its own address.
595  // For more information see http://people.redhat.com/drepper/tls.pdf
596  if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Address))
597    if (C->getSExtValue() == 0 && AM.Segment.getNode() == 0 &&
598        Subtarget->isTargetLinux())
599      switch (N->getPointerInfo().getAddrSpace()) {
600      case 256:
601        AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
602        return false;
603      case 257:
604        AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
605        return false;
606      }
607
608  return true;
609}
610
611/// MatchWrapper - Try to match X86ISD::Wrapper and X86ISD::WrapperRIP nodes
612/// into an addressing mode.  These wrap things that will resolve down into a
613/// symbol reference.  If no match is possible, this returns true, otherwise it
614/// returns false.
615bool X86DAGToDAGISel::MatchWrapper(SDValue N, X86ISelAddressMode &AM) {
616  // If the addressing mode already has a symbol as the displacement, we can
617  // never match another symbol.
618  if (AM.hasSymbolicDisplacement())
619    return true;
620
621  SDValue N0 = N.getOperand(0);
622  CodeModel::Model M = TM.getCodeModel();
623
624  // Handle X86-64 rip-relative addresses.  We check this before checking direct
625  // folding because RIP is preferable to non-RIP accesses.
626  if (Subtarget->is64Bit() && N.getOpcode() == X86ISD::WrapperRIP &&
627      // Under X86-64 non-small code model, GV (and friends) are 64-bits, so
628      // they cannot be folded into immediate fields.
629      // FIXME: This can be improved for kernel and other models?
630      (M == CodeModel::Small || M == CodeModel::Kernel)) {
631    // Base and index reg must be 0 in order to use %rip as base.
632    if (AM.hasBaseOrIndexReg())
633      return true;
634    if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
635      X86ISelAddressMode Backup = AM;
636      AM.GV = G->getGlobal();
637      AM.SymbolFlags = G->getTargetFlags();
638      if (FoldOffsetIntoAddress(G->getOffset(), AM)) {
639        AM = Backup;
640        return true;
641      }
642    } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
643      X86ISelAddressMode Backup = AM;
644      AM.CP = CP->getConstVal();
645      AM.Align = CP->getAlignment();
646      AM.SymbolFlags = CP->getTargetFlags();
647      if (FoldOffsetIntoAddress(CP->getOffset(), AM)) {
648        AM = Backup;
649        return true;
650      }
651    } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) {
652      AM.ES = S->getSymbol();
653      AM.SymbolFlags = S->getTargetFlags();
654    } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
655      AM.JT = J->getIndex();
656      AM.SymbolFlags = J->getTargetFlags();
657    } else {
658      AM.BlockAddr = cast<BlockAddressSDNode>(N0)->getBlockAddress();
659      AM.SymbolFlags = cast<BlockAddressSDNode>(N0)->getTargetFlags();
660    }
661
662    if (N.getOpcode() == X86ISD::WrapperRIP)
663      AM.setBaseReg(CurDAG->getRegister(X86::RIP, MVT::i64));
664    return false;
665  }
666
667  // Handle the case when globals fit in our immediate field: This is true for
668  // X86-32 always and X86-64 when in -mcmodel=small mode.  In 64-bit
669  // mode, this only applies to a non-RIP-relative computation.
670  if (!Subtarget->is64Bit() ||
671      M == CodeModel::Small || M == CodeModel::Kernel) {
672    assert(N.getOpcode() != X86ISD::WrapperRIP &&
673           "RIP-relative addressing already handled");
674    if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
675      AM.GV = G->getGlobal();
676      AM.Disp += G->getOffset();
677      AM.SymbolFlags = G->getTargetFlags();
678    } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
679      AM.CP = CP->getConstVal();
680      AM.Align = CP->getAlignment();
681      AM.Disp += CP->getOffset();
682      AM.SymbolFlags = CP->getTargetFlags();
683    } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) {
684      AM.ES = S->getSymbol();
685      AM.SymbolFlags = S->getTargetFlags();
686    } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
687      AM.JT = J->getIndex();
688      AM.SymbolFlags = J->getTargetFlags();
689    } else {
690      AM.BlockAddr = cast<BlockAddressSDNode>(N0)->getBlockAddress();
691      AM.SymbolFlags = cast<BlockAddressSDNode>(N0)->getTargetFlags();
692    }
693    return false;
694  }
695
696  return true;
697}
698
699/// MatchAddress - Add the specified node to the specified addressing mode,
700/// returning true if it cannot be done.  This just pattern matches for the
701/// addressing mode.
702bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM) {
703  if (MatchAddressRecursively(N, AM, 0))
704    return true;
705
706  // Post-processing: Convert lea(,%reg,2) to lea(%reg,%reg), which has
707  // a smaller encoding and avoids a scaled-index.
708  if (AM.Scale == 2 &&
709      AM.BaseType == X86ISelAddressMode::RegBase &&
710      AM.Base_Reg.getNode() == 0) {
711    AM.Base_Reg = AM.IndexReg;
712    AM.Scale = 1;
713  }
714
715  // Post-processing: Convert foo to foo(%rip), even in non-PIC mode,
716  // because it has a smaller encoding.
717  // TODO: Which other code models can use this?
718  if (TM.getCodeModel() == CodeModel::Small &&
719      Subtarget->is64Bit() &&
720      AM.Scale == 1 &&
721      AM.BaseType == X86ISelAddressMode::RegBase &&
722      AM.Base_Reg.getNode() == 0 &&
723      AM.IndexReg.getNode() == 0 &&
724      AM.SymbolFlags == X86II::MO_NO_FLAG &&
725      AM.hasSymbolicDisplacement())
726    AM.Base_Reg = CurDAG->getRegister(X86::RIP, MVT::i64);
727
728  return false;
729}
730
731// Insert a node into the DAG at least before the Pos node's position. This
732// will reposition the node as needed, and will assign it a node ID that is <=
733// the Pos node's ID. Note that this does *not* preserve the uniqueness of node
734// IDs! The selection DAG must no longer depend on their uniqueness when this
735// is used.
736static void InsertDAGNode(SelectionDAG &DAG, SDValue Pos, SDValue N) {
737  if (N.getNode()->getNodeId() == -1 ||
738      N.getNode()->getNodeId() > Pos.getNode()->getNodeId()) {
739    DAG.RepositionNode(Pos.getNode(), N.getNode());
740    N.getNode()->setNodeId(Pos.getNode()->getNodeId());
741  }
742}
743
744// Transform "(X >> (8-C1)) & C2" to "(X >> 8) & 0xff)" if safe. This
745// allows us to convert the shift and and into an h-register extract and
746// a scaled index. Returns false if the simplification is performed.
747static bool FoldMaskAndShiftToExtract(SelectionDAG &DAG, SDValue N,
748                                      uint64_t Mask,
749                                      SDValue Shift, SDValue X,
750                                      X86ISelAddressMode &AM) {
751  if (Shift.getOpcode() != ISD::SRL ||
752      !isa<ConstantSDNode>(Shift.getOperand(1)) ||
753      !Shift.hasOneUse())
754    return true;
755
756  int ScaleLog = 8 - Shift.getConstantOperandVal(1);
757  if (ScaleLog <= 0 || ScaleLog >= 4 ||
758      Mask != (0xffu << ScaleLog))
759    return true;
760
761  EVT VT = N.getValueType();
762  DebugLoc DL = N.getDebugLoc();
763  SDValue Eight = DAG.getConstant(8, MVT::i8);
764  SDValue NewMask = DAG.getConstant(0xff, VT);
765  SDValue Srl = DAG.getNode(ISD::SRL, DL, VT, X, Eight);
766  SDValue And = DAG.getNode(ISD::AND, DL, VT, Srl, NewMask);
767  SDValue ShlCount = DAG.getConstant(ScaleLog, MVT::i8);
768  SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, And, ShlCount);
769
770  // Insert the new nodes into the topological ordering. We must do this in
771  // a valid topological ordering as nothing is going to go back and re-sort
772  // these nodes. We continually insert before 'N' in sequence as this is
773  // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
774  // hierarchy left to express.
775  InsertDAGNode(DAG, N, Eight);
776  InsertDAGNode(DAG, N, Srl);
777  InsertDAGNode(DAG, N, NewMask);
778  InsertDAGNode(DAG, N, And);
779  InsertDAGNode(DAG, N, ShlCount);
780  InsertDAGNode(DAG, N, Shl);
781  DAG.ReplaceAllUsesWith(N, Shl);
782  AM.IndexReg = And;
783  AM.Scale = (1 << ScaleLog);
784  return false;
785}
786
787// Transforms "(X << C1) & C2" to "(X & (C2>>C1)) << C1" if safe and if this
788// allows us to fold the shift into this addressing mode. Returns false if the
789// transform succeeded.
790static bool FoldMaskedShiftToScaledMask(SelectionDAG &DAG, SDValue N,
791                                        uint64_t Mask,
792                                        SDValue Shift, SDValue X,
793                                        X86ISelAddressMode &AM) {
794  if (Shift.getOpcode() != ISD::SHL ||
795      !isa<ConstantSDNode>(Shift.getOperand(1)))
796    return true;
797
798  // Not likely to be profitable if either the AND or SHIFT node has more
799  // than one use (unless all uses are for address computation). Besides,
800  // isel mechanism requires their node ids to be reused.
801  if (!N.hasOneUse() || !Shift.hasOneUse())
802    return true;
803
804  // Verify that the shift amount is something we can fold.
805  unsigned ShiftAmt = Shift.getConstantOperandVal(1);
806  if (ShiftAmt != 1 && ShiftAmt != 2 && ShiftAmt != 3)
807    return true;
808
809  EVT VT = N.getValueType();
810  DebugLoc DL = N.getDebugLoc();
811  SDValue NewMask = DAG.getConstant(Mask >> ShiftAmt, VT);
812  SDValue NewAnd = DAG.getNode(ISD::AND, DL, VT, X, NewMask);
813  SDValue NewShift = DAG.getNode(ISD::SHL, DL, VT, NewAnd, Shift.getOperand(1));
814
815  // Insert the new nodes into the topological ordering. We must do this in
816  // a valid topological ordering as nothing is going to go back and re-sort
817  // these nodes. We continually insert before 'N' in sequence as this is
818  // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
819  // hierarchy left to express.
820  InsertDAGNode(DAG, N, NewMask);
821  InsertDAGNode(DAG, N, NewAnd);
822  InsertDAGNode(DAG, N, NewShift);
823  DAG.ReplaceAllUsesWith(N, NewShift);
824
825  AM.Scale = 1 << ShiftAmt;
826  AM.IndexReg = NewAnd;
827  return false;
828}
829
830// Implement some heroics to detect shifts of masked values where the mask can
831// be replaced by extending the shift and undoing that in the addressing mode
832// scale. Patterns such as (shl (srl x, c1), c2) are canonicalized into (and
833// (srl x, SHIFT), MASK) by DAGCombines that don't know the shl can be done in
834// the addressing mode. This results in code such as:
835//
836//   int f(short *y, int *lookup_table) {
837//     ...
838//     return *y + lookup_table[*y >> 11];
839//   }
840//
841// Turning into:
842//   movzwl (%rdi), %eax
843//   movl %eax, %ecx
844//   shrl $11, %ecx
845//   addl (%rsi,%rcx,4), %eax
846//
847// Instead of:
848//   movzwl (%rdi), %eax
849//   movl %eax, %ecx
850//   shrl $9, %ecx
851//   andl $124, %rcx
852//   addl (%rsi,%rcx), %eax
853//
854// Note that this function assumes the mask is provided as a mask *after* the
855// value is shifted. The input chain may or may not match that, but computing
856// such a mask is trivial.
857static bool FoldMaskAndShiftToScale(SelectionDAG &DAG, SDValue N,
858                                    uint64_t Mask,
859                                    SDValue Shift, SDValue X,
860                                    X86ISelAddressMode &AM) {
861  if (Shift.getOpcode() != ISD::SRL || !Shift.hasOneUse() ||
862      !isa<ConstantSDNode>(Shift.getOperand(1)))
863    return true;
864
865  unsigned ShiftAmt = Shift.getConstantOperandVal(1);
866  unsigned MaskLZ = CountLeadingZeros_64(Mask);
867  unsigned MaskTZ = CountTrailingZeros_64(Mask);
868
869  // The amount of shift we're trying to fit into the addressing mode is taken
870  // from the trailing zeros of the mask.
871  unsigned AMShiftAmt = MaskTZ;
872
873  // There is nothing we can do here unless the mask is removing some bits.
874  // Also, the addressing mode can only represent shifts of 1, 2, or 3 bits.
875  if (AMShiftAmt <= 0 || AMShiftAmt > 3) return true;
876
877  // We also need to ensure that mask is a continuous run of bits.
878  if (CountTrailingOnes_64(Mask >> MaskTZ) + MaskTZ + MaskLZ != 64) return true;
879
880  // Scale the leading zero count down based on the actual size of the value.
881  // Also scale it down based on the size of the shift.
882  MaskLZ -= (64 - X.getValueSizeInBits()) + ShiftAmt;
883
884  // The final check is to ensure that any masked out high bits of X are
885  // already known to be zero. Otherwise, the mask has a semantic impact
886  // other than masking out a couple of low bits. Unfortunately, because of
887  // the mask, zero extensions will be removed from operands in some cases.
888  // This code works extra hard to look through extensions because we can
889  // replace them with zero extensions cheaply if necessary.
890  bool ReplacingAnyExtend = false;
891  if (X.getOpcode() == ISD::ANY_EXTEND) {
892    unsigned ExtendBits =
893      X.getValueSizeInBits() - X.getOperand(0).getValueSizeInBits();
894    // Assume that we'll replace the any-extend with a zero-extend, and
895    // narrow the search to the extended value.
896    X = X.getOperand(0);
897    MaskLZ = ExtendBits > MaskLZ ? 0 : MaskLZ - ExtendBits;
898    ReplacingAnyExtend = true;
899  }
900  APInt MaskedHighBits = APInt::getHighBitsSet(X.getValueSizeInBits(),
901                                               MaskLZ);
902  APInt KnownZero, KnownOne;
903  DAG.ComputeMaskedBits(X, KnownZero, KnownOne);
904  if (MaskedHighBits != KnownZero) return true;
905
906  // We've identified a pattern that can be transformed into a single shift
907  // and an addressing mode. Make it so.
908  EVT VT = N.getValueType();
909  if (ReplacingAnyExtend) {
910    assert(X.getValueType() != VT);
911    // We looked through an ANY_EXTEND node, insert a ZERO_EXTEND.
912    SDValue NewX = DAG.getNode(ISD::ZERO_EXTEND, X.getDebugLoc(), VT, X);
913    InsertDAGNode(DAG, N, NewX);
914    X = NewX;
915  }
916  DebugLoc DL = N.getDebugLoc();
917  SDValue NewSRLAmt = DAG.getConstant(ShiftAmt + AMShiftAmt, MVT::i8);
918  SDValue NewSRL = DAG.getNode(ISD::SRL, DL, VT, X, NewSRLAmt);
919  SDValue NewSHLAmt = DAG.getConstant(AMShiftAmt, MVT::i8);
920  SDValue NewSHL = DAG.getNode(ISD::SHL, DL, VT, NewSRL, NewSHLAmt);
921
922  // Insert the new nodes into the topological ordering. We must do this in
923  // a valid topological ordering as nothing is going to go back and re-sort
924  // these nodes. We continually insert before 'N' in sequence as this is
925  // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
926  // hierarchy left to express.
927  InsertDAGNode(DAG, N, NewSRLAmt);
928  InsertDAGNode(DAG, N, NewSRL);
929  InsertDAGNode(DAG, N, NewSHLAmt);
930  InsertDAGNode(DAG, N, NewSHL);
931  DAG.ReplaceAllUsesWith(N, NewSHL);
932
933  AM.Scale = 1 << AMShiftAmt;
934  AM.IndexReg = NewSRL;
935  return false;
936}
937
938bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
939                                              unsigned Depth) {
940  DebugLoc dl = N.getDebugLoc();
941  DEBUG({
942      dbgs() << "MatchAddress: ";
943      AM.dump();
944    });
945  // Limit recursion.
946  if (Depth > 5)
947    return MatchAddressBase(N, AM);
948
949  // If this is already a %rip relative address, we can only merge immediates
950  // into it.  Instead of handling this in every case, we handle it here.
951  // RIP relative addressing: %rip + 32-bit displacement!
952  if (AM.isRIPRelative()) {
953    // FIXME: JumpTable and ExternalSymbol address currently don't like
954    // displacements.  It isn't very important, but this should be fixed for
955    // consistency.
956    if (!AM.ES && AM.JT != -1) return true;
957
958    if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N))
959      if (!FoldOffsetIntoAddress(Cst->getSExtValue(), AM))
960        return false;
961    return true;
962  }
963
964  switch (N.getOpcode()) {
965  default: break;
966  case ISD::Constant: {
967    uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue();
968    if (!FoldOffsetIntoAddress(Val, AM))
969      return false;
970    break;
971  }
972
973  case X86ISD::Wrapper:
974  case X86ISD::WrapperRIP:
975    if (!MatchWrapper(N, AM))
976      return false;
977    break;
978
979  case ISD::LOAD:
980    if (!MatchLoadInAddress(cast<LoadSDNode>(N), AM))
981      return false;
982    break;
983
984  case ISD::FrameIndex:
985    if (AM.BaseType == X86ISelAddressMode::RegBase &&
986        AM.Base_Reg.getNode() == 0 &&
987        (!Subtarget->is64Bit() || isDispSafeForFrameIndex(AM.Disp))) {
988      AM.BaseType = X86ISelAddressMode::FrameIndexBase;
989      AM.Base_FrameIndex = cast<FrameIndexSDNode>(N)->getIndex();
990      return false;
991    }
992    break;
993
994  case ISD::SHL:
995    if (AM.IndexReg.getNode() != 0 || AM.Scale != 1)
996      break;
997
998    if (ConstantSDNode
999          *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1))) {
1000      unsigned Val = CN->getZExtValue();
1001      // Note that we handle x<<1 as (,x,2) rather than (x,x) here so
1002      // that the base operand remains free for further matching. If
1003      // the base doesn't end up getting used, a post-processing step
1004      // in MatchAddress turns (,x,2) into (x,x), which is cheaper.
1005      if (Val == 1 || Val == 2 || Val == 3) {
1006        AM.Scale = 1 << Val;
1007        SDValue ShVal = N.getNode()->getOperand(0);
1008
1009        // Okay, we know that we have a scale by now.  However, if the scaled
1010        // value is an add of something and a constant, we can fold the
1011        // constant into the disp field here.
1012        if (CurDAG->isBaseWithConstantOffset(ShVal)) {
1013          AM.IndexReg = ShVal.getNode()->getOperand(0);
1014          ConstantSDNode *AddVal =
1015            cast<ConstantSDNode>(ShVal.getNode()->getOperand(1));
1016          uint64_t Disp = (uint64_t)AddVal->getSExtValue() << Val;
1017          if (!FoldOffsetIntoAddress(Disp, AM))
1018            return false;
1019        }
1020
1021        AM.IndexReg = ShVal;
1022        return false;
1023      }
1024    break;
1025    }
1026
1027  case ISD::SRL: {
1028    // Scale must not be used already.
1029    if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) break;
1030
1031    SDValue And = N.getOperand(0);
1032    if (And.getOpcode() != ISD::AND) break;
1033    SDValue X = And.getOperand(0);
1034
1035    // We only handle up to 64-bit values here as those are what matter for
1036    // addressing mode optimizations.
1037    if (X.getValueSizeInBits() > 64) break;
1038
1039    // The mask used for the transform is expected to be post-shift, but we
1040    // found the shift first so just apply the shift to the mask before passing
1041    // it down.
1042    if (!isa<ConstantSDNode>(N.getOperand(1)) ||
1043        !isa<ConstantSDNode>(And.getOperand(1)))
1044      break;
1045    uint64_t Mask = And.getConstantOperandVal(1) >> N.getConstantOperandVal(1);
1046
1047    // Try to fold the mask and shift into the scale, and return false if we
1048    // succeed.
1049    if (!FoldMaskAndShiftToScale(*CurDAG, N, Mask, N, X, AM))
1050      return false;
1051    break;
1052  }
1053
1054  case ISD::SMUL_LOHI:
1055  case ISD::UMUL_LOHI:
1056    // A mul_lohi where we need the low part can be folded as a plain multiply.
1057    if (N.getResNo() != 0) break;
1058    // FALL THROUGH
1059  case ISD::MUL:
1060  case X86ISD::MUL_IMM:
1061    // X*[3,5,9] -> X+X*[2,4,8]
1062    if (AM.BaseType == X86ISelAddressMode::RegBase &&
1063        AM.Base_Reg.getNode() == 0 &&
1064        AM.IndexReg.getNode() == 0) {
1065      if (ConstantSDNode
1066            *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1)))
1067        if (CN->getZExtValue() == 3 || CN->getZExtValue() == 5 ||
1068            CN->getZExtValue() == 9) {
1069          AM.Scale = unsigned(CN->getZExtValue())-1;
1070
1071          SDValue MulVal = N.getNode()->getOperand(0);
1072          SDValue Reg;
1073
1074          // Okay, we know that we have a scale by now.  However, if the scaled
1075          // value is an add of something and a constant, we can fold the
1076          // constant into the disp field here.
1077          if (MulVal.getNode()->getOpcode() == ISD::ADD && MulVal.hasOneUse() &&
1078              isa<ConstantSDNode>(MulVal.getNode()->getOperand(1))) {
1079            Reg = MulVal.getNode()->getOperand(0);
1080            ConstantSDNode *AddVal =
1081              cast<ConstantSDNode>(MulVal.getNode()->getOperand(1));
1082            uint64_t Disp = AddVal->getSExtValue() * CN->getZExtValue();
1083            if (FoldOffsetIntoAddress(Disp, AM))
1084              Reg = N.getNode()->getOperand(0);
1085          } else {
1086            Reg = N.getNode()->getOperand(0);
1087          }
1088
1089          AM.IndexReg = AM.Base_Reg = Reg;
1090          return false;
1091        }
1092    }
1093    break;
1094
1095  case ISD::SUB: {
1096    // Given A-B, if A can be completely folded into the address and
1097    // the index field with the index field unused, use -B as the index.
1098    // This is a win if a has multiple parts that can be folded into
1099    // the address. Also, this saves a mov if the base register has
1100    // other uses, since it avoids a two-address sub instruction, however
1101    // it costs an additional mov if the index register has other uses.
1102
1103    // Add an artificial use to this node so that we can keep track of
1104    // it if it gets CSE'd with a different node.
1105    HandleSDNode Handle(N);
1106
1107    // Test if the LHS of the sub can be folded.
1108    X86ISelAddressMode Backup = AM;
1109    if (MatchAddressRecursively(N.getNode()->getOperand(0), AM, Depth+1)) {
1110      AM = Backup;
1111      break;
1112    }
1113    // Test if the index field is free for use.
1114    if (AM.IndexReg.getNode() || AM.isRIPRelative()) {
1115      AM = Backup;
1116      break;
1117    }
1118
1119    int Cost = 0;
1120    SDValue RHS = Handle.getValue().getNode()->getOperand(1);
1121    // If the RHS involves a register with multiple uses, this
1122    // transformation incurs an extra mov, due to the neg instruction
1123    // clobbering its operand.
1124    if (!RHS.getNode()->hasOneUse() ||
1125        RHS.getNode()->getOpcode() == ISD::CopyFromReg ||
1126        RHS.getNode()->getOpcode() == ISD::TRUNCATE ||
1127        RHS.getNode()->getOpcode() == ISD::ANY_EXTEND ||
1128        (RHS.getNode()->getOpcode() == ISD::ZERO_EXTEND &&
1129         RHS.getNode()->getOperand(0).getValueType() == MVT::i32))
1130      ++Cost;
1131    // If the base is a register with multiple uses, this
1132    // transformation may save a mov.
1133    if ((AM.BaseType == X86ISelAddressMode::RegBase &&
1134         AM.Base_Reg.getNode() &&
1135         !AM.Base_Reg.getNode()->hasOneUse()) ||
1136        AM.BaseType == X86ISelAddressMode::FrameIndexBase)
1137      --Cost;
1138    // If the folded LHS was interesting, this transformation saves
1139    // address arithmetic.
1140    if ((AM.hasSymbolicDisplacement() && !Backup.hasSymbolicDisplacement()) +
1141        ((AM.Disp != 0) && (Backup.Disp == 0)) +
1142        (AM.Segment.getNode() && !Backup.Segment.getNode()) >= 2)
1143      --Cost;
1144    // If it doesn't look like it may be an overall win, don't do it.
1145    if (Cost >= 0) {
1146      AM = Backup;
1147      break;
1148    }
1149
1150    // Ok, the transformation is legal and appears profitable. Go for it.
1151    SDValue Zero = CurDAG->getConstant(0, N.getValueType());
1152    SDValue Neg = CurDAG->getNode(ISD::SUB, dl, N.getValueType(), Zero, RHS);
1153    AM.IndexReg = Neg;
1154    AM.Scale = 1;
1155
1156    // Insert the new nodes into the topological ordering.
1157    InsertDAGNode(*CurDAG, N, Zero);
1158    InsertDAGNode(*CurDAG, N, Neg);
1159    return false;
1160  }
1161
1162  case ISD::ADD: {
1163    // Add an artificial use to this node so that we can keep track of
1164    // it if it gets CSE'd with a different node.
1165    HandleSDNode Handle(N);
1166
1167    X86ISelAddressMode Backup = AM;
1168    if (!MatchAddressRecursively(N.getOperand(0), AM, Depth+1) &&
1169        !MatchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1))
1170      return false;
1171    AM = Backup;
1172
1173    // Try again after commuting the operands.
1174    if (!MatchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1)&&
1175        !MatchAddressRecursively(Handle.getValue().getOperand(0), AM, Depth+1))
1176      return false;
1177    AM = Backup;
1178
1179    // If we couldn't fold both operands into the address at the same time,
1180    // see if we can just put each operand into a register and fold at least
1181    // the add.
1182    if (AM.BaseType == X86ISelAddressMode::RegBase &&
1183        !AM.Base_Reg.getNode() &&
1184        !AM.IndexReg.getNode()) {
1185      N = Handle.getValue();
1186      AM.Base_Reg = N.getOperand(0);
1187      AM.IndexReg = N.getOperand(1);
1188      AM.Scale = 1;
1189      return false;
1190    }
1191    N = Handle.getValue();
1192    break;
1193  }
1194
1195  case ISD::OR:
1196    // Handle "X | C" as "X + C" iff X is known to have C bits clear.
1197    if (CurDAG->isBaseWithConstantOffset(N)) {
1198      X86ISelAddressMode Backup = AM;
1199      ConstantSDNode *CN = cast<ConstantSDNode>(N.getOperand(1));
1200
1201      // Start with the LHS as an addr mode.
1202      if (!MatchAddressRecursively(N.getOperand(0), AM, Depth+1) &&
1203          !FoldOffsetIntoAddress(CN->getSExtValue(), AM))
1204        return false;
1205      AM = Backup;
1206    }
1207    break;
1208
1209  case ISD::AND: {
1210    // Perform some heroic transforms on an and of a constant-count shift
1211    // with a constant to enable use of the scaled offset field.
1212
1213    // Scale must not be used already.
1214    if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) break;
1215
1216    SDValue Shift = N.getOperand(0);
1217    if (Shift.getOpcode() != ISD::SRL && Shift.getOpcode() != ISD::SHL) break;
1218    SDValue X = Shift.getOperand(0);
1219
1220    // We only handle up to 64-bit values here as those are what matter for
1221    // addressing mode optimizations.
1222    if (X.getValueSizeInBits() > 64) break;
1223
1224    if (!isa<ConstantSDNode>(N.getOperand(1)))
1225      break;
1226    uint64_t Mask = N.getConstantOperandVal(1);
1227
1228    // Try to fold the mask and shift into an extract and scale.
1229    if (!FoldMaskAndShiftToExtract(*CurDAG, N, Mask, Shift, X, AM))
1230      return false;
1231
1232    // Try to fold the mask and shift directly into the scale.
1233    if (!FoldMaskAndShiftToScale(*CurDAG, N, Mask, Shift, X, AM))
1234      return false;
1235
1236    // Try to swap the mask and shift to place shifts which can be done as
1237    // a scale on the outside of the mask.
1238    if (!FoldMaskedShiftToScaledMask(*CurDAG, N, Mask, Shift, X, AM))
1239      return false;
1240    break;
1241  }
1242  }
1243
1244  return MatchAddressBase(N, AM);
1245}
1246
1247/// MatchAddressBase - Helper for MatchAddress. Add the specified node to the
1248/// specified addressing mode without any further recursion.
1249bool X86DAGToDAGISel::MatchAddressBase(SDValue N, X86ISelAddressMode &AM) {
1250  // Is the base register already occupied?
1251  if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base_Reg.getNode()) {
1252    // If so, check to see if the scale index register is set.
1253    if (AM.IndexReg.getNode() == 0) {
1254      AM.IndexReg = N;
1255      AM.Scale = 1;
1256      return false;
1257    }
1258
1259    // Otherwise, we cannot select it.
1260    return true;
1261  }
1262
1263  // Default, generate it as a register.
1264  AM.BaseType = X86ISelAddressMode::RegBase;
1265  AM.Base_Reg = N;
1266  return false;
1267}
1268
1269/// SelectAddr - returns true if it is able pattern match an addressing mode.
1270/// It returns the operands which make up the maximal addressing mode it can
1271/// match by reference.
1272///
1273/// Parent is the parent node of the addr operand that is being matched.  It
1274/// is always a load, store, atomic node, or null.  It is only null when
1275/// checking memory operands for inline asm nodes.
1276bool X86DAGToDAGISel::SelectAddr(SDNode *Parent, SDValue N, SDValue &Base,
1277                                 SDValue &Scale, SDValue &Index,
1278                                 SDValue &Disp, SDValue &Segment) {
1279  X86ISelAddressMode AM;
1280
1281  if (Parent &&
1282      // This list of opcodes are all the nodes that have an "addr:$ptr" operand
1283      // that are not a MemSDNode, and thus don't have proper addrspace info.
1284      Parent->getOpcode() != ISD::INTRINSIC_W_CHAIN && // unaligned loads, fixme
1285      Parent->getOpcode() != ISD::INTRINSIC_VOID && // nontemporal stores
1286      Parent->getOpcode() != X86ISD::TLSCALL) { // Fixme
1287    unsigned AddrSpace =
1288      cast<MemSDNode>(Parent)->getPointerInfo().getAddrSpace();
1289    // AddrSpace 256 -> GS, 257 -> FS.
1290    if (AddrSpace == 256)
1291      AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
1292    if (AddrSpace == 257)
1293      AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
1294  }
1295
1296  if (MatchAddress(N, AM))
1297    return false;
1298
1299  EVT VT = N.getValueType();
1300  if (AM.BaseType == X86ISelAddressMode::RegBase) {
1301    if (!AM.Base_Reg.getNode())
1302      AM.Base_Reg = CurDAG->getRegister(0, VT);
1303  }
1304
1305  if (!AM.IndexReg.getNode())
1306    AM.IndexReg = CurDAG->getRegister(0, VT);
1307
1308  getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
1309  return true;
1310}
1311
1312/// SelectScalarSSELoad - Match a scalar SSE load.  In particular, we want to
1313/// match a load whose top elements are either undef or zeros.  The load flavor
1314/// is derived from the type of N, which is either v4f32 or v2f64.
1315///
1316/// We also return:
1317///   PatternChainNode: this is the matched node that has a chain input and
1318///   output.
1319bool X86DAGToDAGISel::SelectScalarSSELoad(SDNode *Root,
1320                                          SDValue N, SDValue &Base,
1321                                          SDValue &Scale, SDValue &Index,
1322                                          SDValue &Disp, SDValue &Segment,
1323                                          SDValue &PatternNodeWithChain) {
1324  if (N.getOpcode() == ISD::SCALAR_TO_VECTOR) {
1325    PatternNodeWithChain = N.getOperand(0);
1326    if (ISD::isNON_EXTLoad(PatternNodeWithChain.getNode()) &&
1327        PatternNodeWithChain.hasOneUse() &&
1328        IsProfitableToFold(N.getOperand(0), N.getNode(), Root) &&
1329        IsLegalToFold(N.getOperand(0), N.getNode(), Root, OptLevel)) {
1330      LoadSDNode *LD = cast<LoadSDNode>(PatternNodeWithChain);
1331      if (!SelectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp, Segment))
1332        return false;
1333      return true;
1334    }
1335  }
1336
1337  // Also handle the case where we explicitly require zeros in the top
1338  // elements.  This is a vector shuffle from the zero vector.
1339  if (N.getOpcode() == X86ISD::VZEXT_MOVL && N.getNode()->hasOneUse() &&
1340      // Check to see if the top elements are all zeros (or bitcast of zeros).
1341      N.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR &&
1342      N.getOperand(0).getNode()->hasOneUse() &&
1343      ISD::isNON_EXTLoad(N.getOperand(0).getOperand(0).getNode()) &&
1344      N.getOperand(0).getOperand(0).hasOneUse() &&
1345      IsProfitableToFold(N.getOperand(0), N.getNode(), Root) &&
1346      IsLegalToFold(N.getOperand(0), N.getNode(), Root, OptLevel)) {
1347    // Okay, this is a zero extending load.  Fold it.
1348    LoadSDNode *LD = cast<LoadSDNode>(N.getOperand(0).getOperand(0));
1349    if (!SelectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp, Segment))
1350      return false;
1351    PatternNodeWithChain = SDValue(LD, 0);
1352    return true;
1353  }
1354  return false;
1355}
1356
1357
1358/// SelectLEAAddr - it calls SelectAddr and determines if the maximal addressing
1359/// mode it matches can be cost effectively emitted as an LEA instruction.
1360bool X86DAGToDAGISel::SelectLEAAddr(SDValue N,
1361                                    SDValue &Base, SDValue &Scale,
1362                                    SDValue &Index, SDValue &Disp,
1363                                    SDValue &Segment) {
1364  X86ISelAddressMode AM;
1365
1366  // Set AM.Segment to prevent MatchAddress from using one. LEA doesn't support
1367  // segments.
1368  SDValue Copy = AM.Segment;
1369  SDValue T = CurDAG->getRegister(0, MVT::i32);
1370  AM.Segment = T;
1371  if (MatchAddress(N, AM))
1372    return false;
1373  assert (T == AM.Segment);
1374  AM.Segment = Copy;
1375
1376  EVT VT = N.getValueType();
1377  unsigned Complexity = 0;
1378  if (AM.BaseType == X86ISelAddressMode::RegBase)
1379    if (AM.Base_Reg.getNode())
1380      Complexity = 1;
1381    else
1382      AM.Base_Reg = CurDAG->getRegister(0, VT);
1383  else if (AM.BaseType == X86ISelAddressMode::FrameIndexBase)
1384    Complexity = 4;
1385
1386  if (AM.IndexReg.getNode())
1387    Complexity++;
1388  else
1389    AM.IndexReg = CurDAG->getRegister(0, VT);
1390
1391  // Don't match just leal(,%reg,2). It's cheaper to do addl %reg, %reg, or with
1392  // a simple shift.
1393  if (AM.Scale > 1)
1394    Complexity++;
1395
1396  // FIXME: We are artificially lowering the criteria to turn ADD %reg, $GA
1397  // to a LEA. This is determined with some expermentation but is by no means
1398  // optimal (especially for code size consideration). LEA is nice because of
1399  // its three-address nature. Tweak the cost function again when we can run
1400  // convertToThreeAddress() at register allocation time.
1401  if (AM.hasSymbolicDisplacement()) {
1402    // For X86-64, we should always use lea to materialize RIP relative
1403    // addresses.
1404    if (Subtarget->is64Bit())
1405      Complexity = 4;
1406    else
1407      Complexity += 2;
1408  }
1409
1410  if (AM.Disp && (AM.Base_Reg.getNode() || AM.IndexReg.getNode()))
1411    Complexity++;
1412
1413  // If it isn't worth using an LEA, reject it.
1414  if (Complexity <= 2)
1415    return false;
1416
1417  getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
1418  return true;
1419}
1420
1421/// SelectTLSADDRAddr - This is only run on TargetGlobalTLSAddress nodes.
1422bool X86DAGToDAGISel::SelectTLSADDRAddr(SDValue N, SDValue &Base,
1423                                        SDValue &Scale, SDValue &Index,
1424                                        SDValue &Disp, SDValue &Segment) {
1425  assert(N.getOpcode() == ISD::TargetGlobalTLSAddress);
1426  const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
1427
1428  X86ISelAddressMode AM;
1429  AM.GV = GA->getGlobal();
1430  AM.Disp += GA->getOffset();
1431  AM.Base_Reg = CurDAG->getRegister(0, N.getValueType());
1432  AM.SymbolFlags = GA->getTargetFlags();
1433
1434  if (N.getValueType() == MVT::i32) {
1435    AM.Scale = 1;
1436    AM.IndexReg = CurDAG->getRegister(X86::EBX, MVT::i32);
1437  } else {
1438    AM.IndexReg = CurDAG->getRegister(0, MVT::i64);
1439  }
1440
1441  getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
1442  return true;
1443}
1444
1445
1446bool X86DAGToDAGISel::TryFoldLoad(SDNode *P, SDValue N,
1447                                  SDValue &Base, SDValue &Scale,
1448                                  SDValue &Index, SDValue &Disp,
1449                                  SDValue &Segment) {
1450  if (!ISD::isNON_EXTLoad(N.getNode()) ||
1451      !IsProfitableToFold(N, P, P) ||
1452      !IsLegalToFold(N, P, P, OptLevel))
1453    return false;
1454
1455  return SelectAddr(N.getNode(),
1456                    N.getOperand(1), Base, Scale, Index, Disp, Segment);
1457}
1458
1459/// getGlobalBaseReg - Return an SDNode that returns the value of
1460/// the global base register. Output instructions required to
1461/// initialize the global base register, if necessary.
1462///
1463SDNode *X86DAGToDAGISel::getGlobalBaseReg() {
1464  unsigned GlobalBaseReg = getInstrInfo()->getGlobalBaseReg(MF);
1465  return CurDAG->getRegister(GlobalBaseReg, TLI.getPointerTy()).getNode();
1466}
1467
1468SDNode *X86DAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) {
1469  SDValue Chain = Node->getOperand(0);
1470  SDValue In1 = Node->getOperand(1);
1471  SDValue In2L = Node->getOperand(2);
1472  SDValue In2H = Node->getOperand(3);
1473  SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
1474  if (!SelectAddr(Node, In1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4))
1475    return NULL;
1476  MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1477  MemOp[0] = cast<MemSDNode>(Node)->getMemOperand();
1478  const SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, In2L, In2H, Chain};
1479  SDNode *ResNode = CurDAG->getMachineNode(Opc, Node->getDebugLoc(),
1480                                           MVT::i32, MVT::i32, MVT::Other, Ops,
1481                                           array_lengthof(Ops));
1482  cast<MachineSDNode>(ResNode)->setMemRefs(MemOp, MemOp + 1);
1483  return ResNode;
1484}
1485
1486// FIXME: Figure out some way to unify this with the 'or' and other code
1487// below.
1488SDNode *X86DAGToDAGISel::SelectAtomicLoadAdd(SDNode *Node, EVT NVT) {
1489  if (Node->hasAnyUseOfValue(0))
1490    return 0;
1491
1492  // Optimize common patterns for __sync_add_and_fetch and
1493  // __sync_sub_and_fetch where the result is not used. This allows us
1494  // to use "lock" version of add, sub, inc, dec instructions.
1495  // FIXME: Do not use special instructions but instead add the "lock"
1496  // prefix to the target node somehow. The extra information will then be
1497  // transferred to machine instruction and it denotes the prefix.
1498  SDValue Chain = Node->getOperand(0);
1499  SDValue Ptr = Node->getOperand(1);
1500  SDValue Val = Node->getOperand(2);
1501  SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
1502  if (!SelectAddr(Node, Ptr, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4))
1503    return 0;
1504
1505  bool isInc = false, isDec = false, isSub = false, isCN = false;
1506  ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Val);
1507  if (CN && CN->getSExtValue() == (int32_t)CN->getSExtValue()) {
1508    isCN = true;
1509    int64_t CNVal = CN->getSExtValue();
1510    if (CNVal == 1)
1511      isInc = true;
1512    else if (CNVal == -1)
1513      isDec = true;
1514    else if (CNVal >= 0)
1515      Val = CurDAG->getTargetConstant(CNVal, NVT);
1516    else {
1517      isSub = true;
1518      Val = CurDAG->getTargetConstant(-CNVal, NVT);
1519    }
1520  } else if (Val.hasOneUse() &&
1521             Val.getOpcode() == ISD::SUB &&
1522             X86::isZeroNode(Val.getOperand(0))) {
1523    isSub = true;
1524    Val = Val.getOperand(1);
1525  }
1526
1527  DebugLoc dl = Node->getDebugLoc();
1528  unsigned Opc = 0;
1529  switch (NVT.getSimpleVT().SimpleTy) {
1530  default: return 0;
1531  case MVT::i8:
1532    if (isInc)
1533      Opc = X86::LOCK_INC8m;
1534    else if (isDec)
1535      Opc = X86::LOCK_DEC8m;
1536    else if (isSub) {
1537      if (isCN)
1538        Opc = X86::LOCK_SUB8mi;
1539      else
1540        Opc = X86::LOCK_SUB8mr;
1541    } else {
1542      if (isCN)
1543        Opc = X86::LOCK_ADD8mi;
1544      else
1545        Opc = X86::LOCK_ADD8mr;
1546    }
1547    break;
1548  case MVT::i16:
1549    if (isInc)
1550      Opc = X86::LOCK_INC16m;
1551    else if (isDec)
1552      Opc = X86::LOCK_DEC16m;
1553    else if (isSub) {
1554      if (isCN) {
1555        if (immSext8(Val.getNode()))
1556          Opc = X86::LOCK_SUB16mi8;
1557        else
1558          Opc = X86::LOCK_SUB16mi;
1559      } else
1560        Opc = X86::LOCK_SUB16mr;
1561    } else {
1562      if (isCN) {
1563        if (immSext8(Val.getNode()))
1564          Opc = X86::LOCK_ADD16mi8;
1565        else
1566          Opc = X86::LOCK_ADD16mi;
1567      } else
1568        Opc = X86::LOCK_ADD16mr;
1569    }
1570    break;
1571  case MVT::i32:
1572    if (isInc)
1573      Opc = X86::LOCK_INC32m;
1574    else if (isDec)
1575      Opc = X86::LOCK_DEC32m;
1576    else if (isSub) {
1577      if (isCN) {
1578        if (immSext8(Val.getNode()))
1579          Opc = X86::LOCK_SUB32mi8;
1580        else
1581          Opc = X86::LOCK_SUB32mi;
1582      } else
1583        Opc = X86::LOCK_SUB32mr;
1584    } else {
1585      if (isCN) {
1586        if (immSext8(Val.getNode()))
1587          Opc = X86::LOCK_ADD32mi8;
1588        else
1589          Opc = X86::LOCK_ADD32mi;
1590      } else
1591        Opc = X86::LOCK_ADD32mr;
1592    }
1593    break;
1594  case MVT::i64:
1595    if (isInc)
1596      Opc = X86::LOCK_INC64m;
1597    else if (isDec)
1598      Opc = X86::LOCK_DEC64m;
1599    else if (isSub) {
1600      Opc = X86::LOCK_SUB64mr;
1601      if (isCN) {
1602        if (immSext8(Val.getNode()))
1603          Opc = X86::LOCK_SUB64mi8;
1604        else if (i64immSExt32(Val.getNode()))
1605          Opc = X86::LOCK_SUB64mi32;
1606      }
1607    } else {
1608      Opc = X86::LOCK_ADD64mr;
1609      if (isCN) {
1610        if (immSext8(Val.getNode()))
1611          Opc = X86::LOCK_ADD64mi8;
1612        else if (i64immSExt32(Val.getNode()))
1613          Opc = X86::LOCK_ADD64mi32;
1614      }
1615    }
1616    break;
1617  }
1618
1619  SDValue Undef = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
1620                                                 dl, NVT), 0);
1621  MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1622  MemOp[0] = cast<MemSDNode>(Node)->getMemOperand();
1623  if (isInc || isDec) {
1624    SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Chain };
1625    SDValue Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 6), 0);
1626    cast<MachineSDNode>(Ret)->setMemRefs(MemOp, MemOp + 1);
1627    SDValue RetVals[] = { Undef, Ret };
1628    return CurDAG->getMergeValues(RetVals, 2, dl).getNode();
1629  } else {
1630    SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Val, Chain };
1631    SDValue Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 7), 0);
1632    cast<MachineSDNode>(Ret)->setMemRefs(MemOp, MemOp + 1);
1633    SDValue RetVals[] = { Undef, Ret };
1634    return CurDAG->getMergeValues(RetVals, 2, dl).getNode();
1635  }
1636}
1637
1638enum AtomicOpc {
1639  OR,
1640  AND,
1641  XOR,
1642  AtomicOpcEnd
1643};
1644
1645enum AtomicSz {
1646  ConstantI8,
1647  I8,
1648  SextConstantI16,
1649  ConstantI16,
1650  I16,
1651  SextConstantI32,
1652  ConstantI32,
1653  I32,
1654  SextConstantI64,
1655  ConstantI64,
1656  I64,
1657  AtomicSzEnd
1658};
1659
1660static const uint16_t AtomicOpcTbl[AtomicOpcEnd][AtomicSzEnd] = {
1661  {
1662    X86::LOCK_OR8mi,
1663    X86::LOCK_OR8mr,
1664    X86::LOCK_OR16mi8,
1665    X86::LOCK_OR16mi,
1666    X86::LOCK_OR16mr,
1667    X86::LOCK_OR32mi8,
1668    X86::LOCK_OR32mi,
1669    X86::LOCK_OR32mr,
1670    X86::LOCK_OR64mi8,
1671    X86::LOCK_OR64mi32,
1672    X86::LOCK_OR64mr
1673  },
1674  {
1675    X86::LOCK_AND8mi,
1676    X86::LOCK_AND8mr,
1677    X86::LOCK_AND16mi8,
1678    X86::LOCK_AND16mi,
1679    X86::LOCK_AND16mr,
1680    X86::LOCK_AND32mi8,
1681    X86::LOCK_AND32mi,
1682    X86::LOCK_AND32mr,
1683    X86::LOCK_AND64mi8,
1684    X86::LOCK_AND64mi32,
1685    X86::LOCK_AND64mr
1686  },
1687  {
1688    X86::LOCK_XOR8mi,
1689    X86::LOCK_XOR8mr,
1690    X86::LOCK_XOR16mi8,
1691    X86::LOCK_XOR16mi,
1692    X86::LOCK_XOR16mr,
1693    X86::LOCK_XOR32mi8,
1694    X86::LOCK_XOR32mi,
1695    X86::LOCK_XOR32mr,
1696    X86::LOCK_XOR64mi8,
1697    X86::LOCK_XOR64mi32,
1698    X86::LOCK_XOR64mr
1699  }
1700};
1701
1702SDNode *X86DAGToDAGISel::SelectAtomicLoadArith(SDNode *Node, EVT NVT) {
1703  if (Node->hasAnyUseOfValue(0))
1704    return 0;
1705
1706  // Optimize common patterns for __sync_or_and_fetch and similar arith
1707  // operations where the result is not used. This allows us to use the "lock"
1708  // version of the arithmetic instruction.
1709  // FIXME: Same as for 'add' and 'sub', try to merge those down here.
1710  SDValue Chain = Node->getOperand(0);
1711  SDValue Ptr = Node->getOperand(1);
1712  SDValue Val = Node->getOperand(2);
1713  SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
1714  if (!SelectAddr(Node, Ptr, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4))
1715    return 0;
1716
1717  // Which index into the table.
1718  enum AtomicOpc Op;
1719  switch (Node->getOpcode()) {
1720    case ISD::ATOMIC_LOAD_OR:
1721      Op = OR;
1722      break;
1723    case ISD::ATOMIC_LOAD_AND:
1724      Op = AND;
1725      break;
1726    case ISD::ATOMIC_LOAD_XOR:
1727      Op = XOR;
1728      break;
1729    default:
1730      return 0;
1731  }
1732
1733  bool isCN = false;
1734  ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Val);
1735  if (CN && (int32_t)CN->getSExtValue() == CN->getSExtValue()) {
1736    isCN = true;
1737    Val = CurDAG->getTargetConstant(CN->getSExtValue(), NVT);
1738  }
1739
1740  unsigned Opc = 0;
1741  switch (NVT.getSimpleVT().SimpleTy) {
1742    default: return 0;
1743    case MVT::i8:
1744      if (isCN)
1745        Opc = AtomicOpcTbl[Op][ConstantI8];
1746      else
1747        Opc = AtomicOpcTbl[Op][I8];
1748      break;
1749    case MVT::i16:
1750      if (isCN) {
1751        if (immSext8(Val.getNode()))
1752          Opc = AtomicOpcTbl[Op][SextConstantI16];
1753        else
1754          Opc = AtomicOpcTbl[Op][ConstantI16];
1755      } else
1756        Opc = AtomicOpcTbl[Op][I16];
1757      break;
1758    case MVT::i32:
1759      if (isCN) {
1760        if (immSext8(Val.getNode()))
1761          Opc = AtomicOpcTbl[Op][SextConstantI32];
1762        else
1763          Opc = AtomicOpcTbl[Op][ConstantI32];
1764      } else
1765        Opc = AtomicOpcTbl[Op][I32];
1766      break;
1767    case MVT::i64:
1768      Opc = AtomicOpcTbl[Op][I64];
1769      if (isCN) {
1770        if (immSext8(Val.getNode()))
1771          Opc = AtomicOpcTbl[Op][SextConstantI64];
1772        else if (i64immSExt32(Val.getNode()))
1773          Opc = AtomicOpcTbl[Op][ConstantI64];
1774      }
1775      break;
1776  }
1777
1778  assert(Opc != 0 && "Invalid arith lock transform!");
1779
1780  DebugLoc dl = Node->getDebugLoc();
1781  SDValue Undef = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
1782                                                 dl, NVT), 0);
1783  MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1784  MemOp[0] = cast<MemSDNode>(Node)->getMemOperand();
1785  SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Val, Chain };
1786  SDValue Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 7), 0);
1787  cast<MachineSDNode>(Ret)->setMemRefs(MemOp, MemOp + 1);
1788  SDValue RetVals[] = { Undef, Ret };
1789  return CurDAG->getMergeValues(RetVals, 2, dl).getNode();
1790}
1791
1792/// HasNoSignedComparisonUses - Test whether the given X86ISD::CMP node has
1793/// any uses which require the SF or OF bits to be accurate.
1794static bool HasNoSignedComparisonUses(SDNode *N) {
1795  // Examine each user of the node.
1796  for (SDNode::use_iterator UI = N->use_begin(),
1797         UE = N->use_end(); UI != UE; ++UI) {
1798    // Only examine CopyToReg uses.
1799    if (UI->getOpcode() != ISD::CopyToReg)
1800      return false;
1801    // Only examine CopyToReg uses that copy to EFLAGS.
1802    if (cast<RegisterSDNode>(UI->getOperand(1))->getReg() !=
1803          X86::EFLAGS)
1804      return false;
1805    // Examine each user of the CopyToReg use.
1806    for (SDNode::use_iterator FlagUI = UI->use_begin(),
1807           FlagUE = UI->use_end(); FlagUI != FlagUE; ++FlagUI) {
1808      // Only examine the Flag result.
1809      if (FlagUI.getUse().getResNo() != 1) continue;
1810      // Anything unusual: assume conservatively.
1811      if (!FlagUI->isMachineOpcode()) return false;
1812      // Examine the opcode of the user.
1813      switch (FlagUI->getMachineOpcode()) {
1814      // These comparisons don't treat the most significant bit specially.
1815      case X86::SETAr: case X86::SETAEr: case X86::SETBr: case X86::SETBEr:
1816      case X86::SETEr: case X86::SETNEr: case X86::SETPr: case X86::SETNPr:
1817      case X86::SETAm: case X86::SETAEm: case X86::SETBm: case X86::SETBEm:
1818      case X86::SETEm: case X86::SETNEm: case X86::SETPm: case X86::SETNPm:
1819      case X86::JA_4: case X86::JAE_4: case X86::JB_4: case X86::JBE_4:
1820      case X86::JE_4: case X86::JNE_4: case X86::JP_4: case X86::JNP_4:
1821      case X86::CMOVA16rr: case X86::CMOVA16rm:
1822      case X86::CMOVA32rr: case X86::CMOVA32rm:
1823      case X86::CMOVA64rr: case X86::CMOVA64rm:
1824      case X86::CMOVAE16rr: case X86::CMOVAE16rm:
1825      case X86::CMOVAE32rr: case X86::CMOVAE32rm:
1826      case X86::CMOVAE64rr: case X86::CMOVAE64rm:
1827      case X86::CMOVB16rr: case X86::CMOVB16rm:
1828      case X86::CMOVB32rr: case X86::CMOVB32rm:
1829      case X86::CMOVB64rr: case X86::CMOVB64rm:
1830      case X86::CMOVBE16rr: case X86::CMOVBE16rm:
1831      case X86::CMOVBE32rr: case X86::CMOVBE32rm:
1832      case X86::CMOVBE64rr: case X86::CMOVBE64rm:
1833      case X86::CMOVE16rr: case X86::CMOVE16rm:
1834      case X86::CMOVE32rr: case X86::CMOVE32rm:
1835      case X86::CMOVE64rr: case X86::CMOVE64rm:
1836      case X86::CMOVNE16rr: case X86::CMOVNE16rm:
1837      case X86::CMOVNE32rr: case X86::CMOVNE32rm:
1838      case X86::CMOVNE64rr: case X86::CMOVNE64rm:
1839      case X86::CMOVNP16rr: case X86::CMOVNP16rm:
1840      case X86::CMOVNP32rr: case X86::CMOVNP32rm:
1841      case X86::CMOVNP64rr: case X86::CMOVNP64rm:
1842      case X86::CMOVP16rr: case X86::CMOVP16rm:
1843      case X86::CMOVP32rr: case X86::CMOVP32rm:
1844      case X86::CMOVP64rr: case X86::CMOVP64rm:
1845        continue;
1846      // Anything else: assume conservatively.
1847      default: return false;
1848      }
1849    }
1850  }
1851  return true;
1852}
1853
1854/// isLoadIncOrDecStore - Check whether or not the chain ending in StoreNode
1855/// is suitable for doing the {load; increment or decrement; store} to modify
1856/// transformation.
1857static bool isLoadIncOrDecStore(StoreSDNode *StoreNode, unsigned Opc,
1858                                SDValue StoredVal, SelectionDAG *CurDAG,
1859                                LoadSDNode* &LoadNode, SDValue &InputChain) {
1860
1861  // is the value stored the result of a DEC or INC?
1862  if (!(Opc == X86ISD::DEC || Opc == X86ISD::INC)) return false;
1863
1864  // is the stored value result 0 of the load?
1865  if (StoredVal.getResNo() != 0) return false;
1866
1867  // are there other uses of the loaded value than the inc or dec?
1868  if (!StoredVal.getNode()->hasNUsesOfValue(1, 0)) return false;
1869
1870  // is the store non-extending and non-indexed?
1871  if (!ISD::isNormalStore(StoreNode) || StoreNode->isNonTemporal())
1872    return false;
1873
1874  SDValue Load = StoredVal->getOperand(0);
1875  // Is the stored value a non-extending and non-indexed load?
1876  if (!ISD::isNormalLoad(Load.getNode())) return false;
1877
1878  // Return LoadNode by reference.
1879  LoadNode = cast<LoadSDNode>(Load);
1880  // is the size of the value one that we can handle? (i.e. 64, 32, 16, or 8)
1881  EVT LdVT = LoadNode->getMemoryVT();
1882  if (LdVT != MVT::i64 && LdVT != MVT::i32 && LdVT != MVT::i16 &&
1883      LdVT != MVT::i8)
1884    return false;
1885
1886  // Is store the only read of the loaded value?
1887  if (!Load.hasOneUse())
1888    return false;
1889
1890  // Is the address of the store the same as the load?
1891  if (LoadNode->getBasePtr() != StoreNode->getBasePtr() ||
1892      LoadNode->getOffset() != StoreNode->getOffset())
1893    return false;
1894
1895  // Check if the chain is produced by the load or is a TokenFactor with
1896  // the load output chain as an operand. Return InputChain by reference.
1897  SDValue Chain = StoreNode->getChain();
1898
1899  bool ChainCheck = false;
1900  if (Chain == Load.getValue(1)) {
1901    ChainCheck = true;
1902    InputChain = LoadNode->getChain();
1903  } else if (Chain.getOpcode() == ISD::TokenFactor) {
1904    SmallVector<SDValue, 4> ChainOps;
1905    for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i) {
1906      SDValue Op = Chain.getOperand(i);
1907      if (Op == Load.getValue(1)) {
1908        ChainCheck = true;
1909        continue;
1910      }
1911
1912      // Make sure using Op as part of the chain would not cause a cycle here.
1913      // In theory, we could check whether the chain node is a predecessor of
1914      // the load. But that can be very expensive. Instead visit the uses and
1915      // make sure they all have smaller node id than the load.
1916      int LoadId = LoadNode->getNodeId();
1917      for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
1918             UE = UI->use_end(); UI != UE; ++UI) {
1919        if (UI.getUse().getResNo() != 0)
1920          continue;
1921        if (UI->getNodeId() > LoadId)
1922          return false;
1923      }
1924
1925      ChainOps.push_back(Op);
1926    }
1927
1928    if (ChainCheck)
1929      // Make a new TokenFactor with all the other input chains except
1930      // for the load.
1931      InputChain = CurDAG->getNode(ISD::TokenFactor, Chain.getDebugLoc(),
1932                                   MVT::Other, &ChainOps[0], ChainOps.size());
1933  }
1934  if (!ChainCheck)
1935    return false;
1936
1937  return true;
1938}
1939
1940/// getFusedLdStOpcode - Get the appropriate X86 opcode for an in memory
1941/// increment or decrement. Opc should be X86ISD::DEC or X86ISD::INC.
1942static unsigned getFusedLdStOpcode(EVT &LdVT, unsigned Opc) {
1943  if (Opc == X86ISD::DEC) {
1944    if (LdVT == MVT::i64) return X86::DEC64m;
1945    if (LdVT == MVT::i32) return X86::DEC32m;
1946    if (LdVT == MVT::i16) return X86::DEC16m;
1947    if (LdVT == MVT::i8)  return X86::DEC8m;
1948  } else {
1949    assert(Opc == X86ISD::INC && "unrecognized opcode");
1950    if (LdVT == MVT::i64) return X86::INC64m;
1951    if (LdVT == MVT::i32) return X86::INC32m;
1952    if (LdVT == MVT::i16) return X86::INC16m;
1953    if (LdVT == MVT::i8)  return X86::INC8m;
1954  }
1955  llvm_unreachable("unrecognized size for LdVT");
1956}
1957
1958/// SelectGather - Customized ISel for GATHER operations.
1959///
1960SDNode *X86DAGToDAGISel::SelectGather(SDNode *Node, unsigned Opc) {
1961  // Operands of Gather: VSrc, Base, VIdx, VMask, Scale
1962  SDValue Chain = Node->getOperand(0);
1963  SDValue VSrc = Node->getOperand(2);
1964  SDValue Base = Node->getOperand(3);
1965  SDValue VIdx = Node->getOperand(4);
1966  SDValue VMask = Node->getOperand(5);
1967  ConstantSDNode *Scale = dyn_cast<ConstantSDNode>(Node->getOperand(6));
1968  if (!Scale)
1969    return 0;
1970
1971  SDVTList VTs = CurDAG->getVTList(VSrc.getValueType(), VSrc.getValueType(),
1972                                   MVT::Other);
1973
1974  // Memory Operands: Base, Scale, Index, Disp, Segment
1975  SDValue Disp = CurDAG->getTargetConstant(0, MVT::i32);
1976  SDValue Segment = CurDAG->getRegister(0, MVT::i32);
1977  const SDValue Ops[] = { VSrc, Base, getI8Imm(Scale->getSExtValue()), VIdx,
1978                          Disp, Segment, VMask, Chain};
1979  SDNode *ResNode = CurDAG->getMachineNode(Opc, Node->getDebugLoc(),
1980                                           VTs, Ops, array_lengthof(Ops));
1981  // Node has 2 outputs: VDst and MVT::Other.
1982  // ResNode has 3 outputs: VDst, VMask_wb, and MVT::Other.
1983  // We replace VDst of Node with VDst of ResNode, and Other of Node with Other
1984  // of ResNode.
1985  ReplaceUses(SDValue(Node, 0), SDValue(ResNode, 0));
1986  ReplaceUses(SDValue(Node, 1), SDValue(ResNode, 2));
1987  return ResNode;
1988}
1989
1990SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
1991  EVT NVT = Node->getValueType(0);
1992  unsigned Opc, MOpc;
1993  unsigned Opcode = Node->getOpcode();
1994  DebugLoc dl = Node->getDebugLoc();
1995
1996  DEBUG(dbgs() << "Selecting: "; Node->dump(CurDAG); dbgs() << '\n');
1997
1998  if (Node->isMachineOpcode()) {
1999    DEBUG(dbgs() << "== ";  Node->dump(CurDAG); dbgs() << '\n');
2000    return NULL;   // Already selected.
2001  }
2002
2003  switch (Opcode) {
2004  default: break;
2005  case ISD::INTRINSIC_W_CHAIN: {
2006    unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
2007    switch (IntNo) {
2008    default: break;
2009    case Intrinsic::x86_avx2_gather_d_pd:
2010    case Intrinsic::x86_avx2_gather_d_pd_256:
2011    case Intrinsic::x86_avx2_gather_q_pd:
2012    case Intrinsic::x86_avx2_gather_q_pd_256:
2013    case Intrinsic::x86_avx2_gather_d_ps:
2014    case Intrinsic::x86_avx2_gather_d_ps_256:
2015    case Intrinsic::x86_avx2_gather_q_ps:
2016    case Intrinsic::x86_avx2_gather_q_ps_256:
2017    case Intrinsic::x86_avx2_gather_d_q:
2018    case Intrinsic::x86_avx2_gather_d_q_256:
2019    case Intrinsic::x86_avx2_gather_q_q:
2020    case Intrinsic::x86_avx2_gather_q_q_256:
2021    case Intrinsic::x86_avx2_gather_d_d:
2022    case Intrinsic::x86_avx2_gather_d_d_256:
2023    case Intrinsic::x86_avx2_gather_q_d:
2024    case Intrinsic::x86_avx2_gather_q_d_256: {
2025      unsigned Opc;
2026      switch (IntNo) {
2027      default: llvm_unreachable("Impossible intrinsic");
2028      case Intrinsic::x86_avx2_gather_d_pd:     Opc = X86::VGATHERDPDrm;  break;
2029      case Intrinsic::x86_avx2_gather_d_pd_256: Opc = X86::VGATHERDPDYrm; break;
2030      case Intrinsic::x86_avx2_gather_q_pd:     Opc = X86::VGATHERQPDrm;  break;
2031      case Intrinsic::x86_avx2_gather_q_pd_256: Opc = X86::VGATHERQPDYrm; break;
2032      case Intrinsic::x86_avx2_gather_d_ps:     Opc = X86::VGATHERDPSrm;  break;
2033      case Intrinsic::x86_avx2_gather_d_ps_256: Opc = X86::VGATHERDPSYrm; break;
2034      case Intrinsic::x86_avx2_gather_q_ps:     Opc = X86::VGATHERQPSrm;  break;
2035      case Intrinsic::x86_avx2_gather_q_ps_256: Opc = X86::VGATHERQPSYrm; break;
2036      case Intrinsic::x86_avx2_gather_d_q:      Opc = X86::VPGATHERDQrm;  break;
2037      case Intrinsic::x86_avx2_gather_d_q_256:  Opc = X86::VPGATHERDQYrm; break;
2038      case Intrinsic::x86_avx2_gather_q_q:      Opc = X86::VPGATHERQQrm;  break;
2039      case Intrinsic::x86_avx2_gather_q_q_256:  Opc = X86::VPGATHERQQYrm; break;
2040      case Intrinsic::x86_avx2_gather_d_d:      Opc = X86::VPGATHERDDrm;  break;
2041      case Intrinsic::x86_avx2_gather_d_d_256:  Opc = X86::VPGATHERDDYrm; break;
2042      case Intrinsic::x86_avx2_gather_q_d:      Opc = X86::VPGATHERQDrm;  break;
2043      case Intrinsic::x86_avx2_gather_q_d_256:  Opc = X86::VPGATHERQDYrm; break;
2044      }
2045      SDNode *RetVal = SelectGather(Node, Opc);
2046      if (RetVal)
2047        // We already called ReplaceUses inside SelectGather.
2048        return NULL;
2049      break;
2050    }
2051    }
2052    break;
2053  }
2054  case X86ISD::GlobalBaseReg:
2055    return getGlobalBaseReg();
2056
2057
2058  case X86ISD::ATOMOR64_DAG:
2059  case X86ISD::ATOMXOR64_DAG:
2060  case X86ISD::ATOMADD64_DAG:
2061  case X86ISD::ATOMSUB64_DAG:
2062  case X86ISD::ATOMNAND64_DAG:
2063  case X86ISD::ATOMAND64_DAG:
2064  case X86ISD::ATOMSWAP64_DAG: {
2065    unsigned Opc;
2066    switch (Opcode) {
2067    default: llvm_unreachable("Impossible opcode");
2068    case X86ISD::ATOMOR64_DAG:   Opc = X86::ATOMOR6432;   break;
2069    case X86ISD::ATOMXOR64_DAG:  Opc = X86::ATOMXOR6432;  break;
2070    case X86ISD::ATOMADD64_DAG:  Opc = X86::ATOMADD6432;  break;
2071    case X86ISD::ATOMSUB64_DAG:  Opc = X86::ATOMSUB6432;  break;
2072    case X86ISD::ATOMNAND64_DAG: Opc = X86::ATOMNAND6432; break;
2073    case X86ISD::ATOMAND64_DAG:  Opc = X86::ATOMAND6432;  break;
2074    case X86ISD::ATOMSWAP64_DAG: Opc = X86::ATOMSWAP6432; break;
2075    }
2076    SDNode *RetVal = SelectAtomic64(Node, Opc);
2077    if (RetVal)
2078      return RetVal;
2079    break;
2080  }
2081
2082  case ISD::ATOMIC_LOAD_ADD: {
2083    SDNode *RetVal = SelectAtomicLoadAdd(Node, NVT);
2084    if (RetVal)
2085      return RetVal;
2086    break;
2087  }
2088  case ISD::ATOMIC_LOAD_XOR:
2089  case ISD::ATOMIC_LOAD_AND:
2090  case ISD::ATOMIC_LOAD_OR: {
2091    SDNode *RetVal = SelectAtomicLoadArith(Node, NVT);
2092    if (RetVal)
2093      return RetVal;
2094    break;
2095  }
2096  case ISD::AND:
2097  case ISD::OR:
2098  case ISD::XOR: {
2099    // For operations of the form (x << C1) op C2, check if we can use a smaller
2100    // encoding for C2 by transforming it into (x op (C2>>C1)) << C1.
2101    SDValue N0 = Node->getOperand(0);
2102    SDValue N1 = Node->getOperand(1);
2103
2104    if (N0->getOpcode() != ISD::SHL || !N0->hasOneUse())
2105      break;
2106
2107    // i8 is unshrinkable, i16 should be promoted to i32.
2108    if (NVT != MVT::i32 && NVT != MVT::i64)
2109      break;
2110
2111    ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N1);
2112    ConstantSDNode *ShlCst = dyn_cast<ConstantSDNode>(N0->getOperand(1));
2113    if (!Cst || !ShlCst)
2114      break;
2115
2116    int64_t Val = Cst->getSExtValue();
2117    uint64_t ShlVal = ShlCst->getZExtValue();
2118
2119    // Make sure that we don't change the operation by removing bits.
2120    // This only matters for OR and XOR, AND is unaffected.
2121    uint64_t RemovedBitsMask = (1ULL << ShlVal) - 1;
2122    if (Opcode != ISD::AND && (Val & RemovedBitsMask) != 0)
2123      break;
2124
2125    unsigned ShlOp, Op;
2126    EVT CstVT = NVT;
2127
2128    // Check the minimum bitwidth for the new constant.
2129    // TODO: AND32ri is the same as AND64ri32 with zext imm.
2130    // TODO: MOV32ri+OR64r is cheaper than MOV64ri64+OR64rr
2131    // TODO: Using 16 and 8 bit operations is also possible for or32 & xor32.
2132    if (!isInt<8>(Val) && isInt<8>(Val >> ShlVal))
2133      CstVT = MVT::i8;
2134    else if (!isInt<32>(Val) && isInt<32>(Val >> ShlVal))
2135      CstVT = MVT::i32;
2136
2137    // Bail if there is no smaller encoding.
2138    if (NVT == CstVT)
2139      break;
2140
2141    switch (NVT.getSimpleVT().SimpleTy) {
2142    default: llvm_unreachable("Unsupported VT!");
2143    case MVT::i32:
2144      assert(CstVT == MVT::i8);
2145      ShlOp = X86::SHL32ri;
2146
2147      switch (Opcode) {
2148      default: llvm_unreachable("Impossible opcode");
2149      case ISD::AND: Op = X86::AND32ri8; break;
2150      case ISD::OR:  Op =  X86::OR32ri8; break;
2151      case ISD::XOR: Op = X86::XOR32ri8; break;
2152      }
2153      break;
2154    case MVT::i64:
2155      assert(CstVT == MVT::i8 || CstVT == MVT::i32);
2156      ShlOp = X86::SHL64ri;
2157
2158      switch (Opcode) {
2159      default: llvm_unreachable("Impossible opcode");
2160      case ISD::AND: Op = CstVT==MVT::i8? X86::AND64ri8 : X86::AND64ri32; break;
2161      case ISD::OR:  Op = CstVT==MVT::i8?  X86::OR64ri8 :  X86::OR64ri32; break;
2162      case ISD::XOR: Op = CstVT==MVT::i8? X86::XOR64ri8 : X86::XOR64ri32; break;
2163      }
2164      break;
2165    }
2166
2167    // Emit the smaller op and the shift.
2168    SDValue NewCst = CurDAG->getTargetConstant(Val >> ShlVal, CstVT);
2169    SDNode *New = CurDAG->getMachineNode(Op, dl, NVT, N0->getOperand(0),NewCst);
2170    return CurDAG->SelectNodeTo(Node, ShlOp, NVT, SDValue(New, 0),
2171                                getI8Imm(ShlVal));
2172  }
2173  case X86ISD::UMUL: {
2174    SDValue N0 = Node->getOperand(0);
2175    SDValue N1 = Node->getOperand(1);
2176
2177    unsigned LoReg;
2178    switch (NVT.getSimpleVT().SimpleTy) {
2179    default: llvm_unreachable("Unsupported VT!");
2180    case MVT::i8:  LoReg = X86::AL;  Opc = X86::MUL8r; break;
2181    case MVT::i16: LoReg = X86::AX;  Opc = X86::MUL16r; break;
2182    case MVT::i32: LoReg = X86::EAX; Opc = X86::MUL32r; break;
2183    case MVT::i64: LoReg = X86::RAX; Opc = X86::MUL64r; break;
2184    }
2185
2186    SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg,
2187                                          N0, SDValue()).getValue(1);
2188
2189    SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::i32);
2190    SDValue Ops[] = {N1, InFlag};
2191    SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops, 2);
2192
2193    ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0));
2194    ReplaceUses(SDValue(Node, 1), SDValue(CNode, 1));
2195    ReplaceUses(SDValue(Node, 2), SDValue(CNode, 2));
2196    return NULL;
2197  }
2198
2199  case ISD::SMUL_LOHI:
2200  case ISD::UMUL_LOHI: {
2201    SDValue N0 = Node->getOperand(0);
2202    SDValue N1 = Node->getOperand(1);
2203
2204    bool isSigned = Opcode == ISD::SMUL_LOHI;
2205    if (!isSigned) {
2206      switch (NVT.getSimpleVT().SimpleTy) {
2207      default: llvm_unreachable("Unsupported VT!");
2208      case MVT::i8:  Opc = X86::MUL8r;  MOpc = X86::MUL8m;  break;
2209      case MVT::i16: Opc = X86::MUL16r; MOpc = X86::MUL16m; break;
2210      case MVT::i32: Opc = X86::MUL32r; MOpc = X86::MUL32m; break;
2211      case MVT::i64: Opc = X86::MUL64r; MOpc = X86::MUL64m; break;
2212      }
2213    } else {
2214      switch (NVT.getSimpleVT().SimpleTy) {
2215      default: llvm_unreachable("Unsupported VT!");
2216      case MVT::i8:  Opc = X86::IMUL8r;  MOpc = X86::IMUL8m;  break;
2217      case MVT::i16: Opc = X86::IMUL16r; MOpc = X86::IMUL16m; break;
2218      case MVT::i32: Opc = X86::IMUL32r; MOpc = X86::IMUL32m; break;
2219      case MVT::i64: Opc = X86::IMUL64r; MOpc = X86::IMUL64m; break;
2220      }
2221    }
2222
2223    unsigned LoReg, HiReg;
2224    switch (NVT.getSimpleVT().SimpleTy) {
2225    default: llvm_unreachable("Unsupported VT!");
2226    case MVT::i8:  LoReg = X86::AL;  HiReg = X86::AH;  break;
2227    case MVT::i16: LoReg = X86::AX;  HiReg = X86::DX;  break;
2228    case MVT::i32: LoReg = X86::EAX; HiReg = X86::EDX; break;
2229    case MVT::i64: LoReg = X86::RAX; HiReg = X86::RDX; break;
2230    }
2231
2232    SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
2233    bool foldedLoad = TryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
2234    // Multiply is commmutative.
2235    if (!foldedLoad) {
2236      foldedLoad = TryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
2237      if (foldedLoad)
2238        std::swap(N0, N1);
2239    }
2240
2241    SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg,
2242                                          N0, SDValue()).getValue(1);
2243
2244    if (foldedLoad) {
2245      SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
2246                        InFlag };
2247      SDNode *CNode =
2248        CurDAG->getMachineNode(MOpc, dl, MVT::Other, MVT::Glue, Ops,
2249                               array_lengthof(Ops));
2250      InFlag = SDValue(CNode, 1);
2251
2252      // Update the chain.
2253      ReplaceUses(N1.getValue(1), SDValue(CNode, 0));
2254    } else {
2255      SDNode *CNode = CurDAG->getMachineNode(Opc, dl, MVT::Glue, N1, InFlag);
2256      InFlag = SDValue(CNode, 0);
2257    }
2258
2259    // Prevent use of AH in a REX instruction by referencing AX instead.
2260    if (HiReg == X86::AH && Subtarget->is64Bit() &&
2261        !SDValue(Node, 1).use_empty()) {
2262      SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2263                                              X86::AX, MVT::i16, InFlag);
2264      InFlag = Result.getValue(2);
2265      // Get the low part if needed. Don't use getCopyFromReg for aliasing
2266      // registers.
2267      if (!SDValue(Node, 0).use_empty())
2268        ReplaceUses(SDValue(Node, 1),
2269          CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result));
2270
2271      // Shift AX down 8 bits.
2272      Result = SDValue(CurDAG->getMachineNode(X86::SHR16ri, dl, MVT::i16,
2273                                              Result,
2274                                     CurDAG->getTargetConstant(8, MVT::i8)), 0);
2275      // Then truncate it down to i8.
2276      ReplaceUses(SDValue(Node, 1),
2277        CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result));
2278    }
2279    // Copy the low half of the result, if it is needed.
2280    if (!SDValue(Node, 0).use_empty()) {
2281      SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2282                                              LoReg, NVT, InFlag);
2283      InFlag = Result.getValue(2);
2284      ReplaceUses(SDValue(Node, 0), Result);
2285      DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
2286    }
2287    // Copy the high half of the result, if it is needed.
2288    if (!SDValue(Node, 1).use_empty()) {
2289      SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2290                                              HiReg, NVT, InFlag);
2291      InFlag = Result.getValue(2);
2292      ReplaceUses(SDValue(Node, 1), Result);
2293      DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
2294    }
2295
2296    return NULL;
2297  }
2298
2299  case ISD::SDIVREM:
2300  case ISD::UDIVREM: {
2301    SDValue N0 = Node->getOperand(0);
2302    SDValue N1 = Node->getOperand(1);
2303
2304    bool isSigned = Opcode == ISD::SDIVREM;
2305    if (!isSigned) {
2306      switch (NVT.getSimpleVT().SimpleTy) {
2307      default: llvm_unreachable("Unsupported VT!");
2308      case MVT::i8:  Opc = X86::DIV8r;  MOpc = X86::DIV8m;  break;
2309      case MVT::i16: Opc = X86::DIV16r; MOpc = X86::DIV16m; break;
2310      case MVT::i32: Opc = X86::DIV32r; MOpc = X86::DIV32m; break;
2311      case MVT::i64: Opc = X86::DIV64r; MOpc = X86::DIV64m; break;
2312      }
2313    } else {
2314      switch (NVT.getSimpleVT().SimpleTy) {
2315      default: llvm_unreachable("Unsupported VT!");
2316      case MVT::i8:  Opc = X86::IDIV8r;  MOpc = X86::IDIV8m;  break;
2317      case MVT::i16: Opc = X86::IDIV16r; MOpc = X86::IDIV16m; break;
2318      case MVT::i32: Opc = X86::IDIV32r; MOpc = X86::IDIV32m; break;
2319      case MVT::i64: Opc = X86::IDIV64r; MOpc = X86::IDIV64m; break;
2320      }
2321    }
2322
2323    unsigned LoReg, HiReg, ClrReg;
2324    unsigned ClrOpcode, SExtOpcode;
2325    switch (NVT.getSimpleVT().SimpleTy) {
2326    default: llvm_unreachable("Unsupported VT!");
2327    case MVT::i8:
2328      LoReg = X86::AL;  ClrReg = HiReg = X86::AH;
2329      ClrOpcode  = 0;
2330      SExtOpcode = X86::CBW;
2331      break;
2332    case MVT::i16:
2333      LoReg = X86::AX;  HiReg = X86::DX;
2334      ClrOpcode  = X86::MOV16r0; ClrReg = X86::DX;
2335      SExtOpcode = X86::CWD;
2336      break;
2337    case MVT::i32:
2338      LoReg = X86::EAX; ClrReg = HiReg = X86::EDX;
2339      ClrOpcode  = X86::MOV32r0;
2340      SExtOpcode = X86::CDQ;
2341      break;
2342    case MVT::i64:
2343      LoReg = X86::RAX; ClrReg = HiReg = X86::RDX;
2344      ClrOpcode  = X86::MOV64r0;
2345      SExtOpcode = X86::CQO;
2346      break;
2347    }
2348
2349    SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
2350    bool foldedLoad = TryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
2351    bool signBitIsZero = CurDAG->SignBitIsZero(N0);
2352
2353    SDValue InFlag;
2354    if (NVT == MVT::i8 && (!isSigned || signBitIsZero)) {
2355      // Special case for div8, just use a move with zero extension to AX to
2356      // clear the upper 8 bits (AH).
2357      SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Move, Chain;
2358      if (TryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
2359        SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N0.getOperand(0) };
2360        Move =
2361          SDValue(CurDAG->getMachineNode(X86::MOVZX32rm8, dl, MVT::i32,
2362                                         MVT::Other, Ops,
2363                                         array_lengthof(Ops)), 0);
2364        Chain = Move.getValue(1);
2365        ReplaceUses(N0.getValue(1), Chain);
2366      } else {
2367        Move =
2368          SDValue(CurDAG->getMachineNode(X86::MOVZX32rr8, dl, MVT::i32, N0),0);
2369        Chain = CurDAG->getEntryNode();
2370      }
2371      Chain  = CurDAG->getCopyToReg(Chain, dl, X86::EAX, Move, SDValue());
2372      InFlag = Chain.getValue(1);
2373    } else {
2374      InFlag =
2375        CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl,
2376                             LoReg, N0, SDValue()).getValue(1);
2377      if (isSigned && !signBitIsZero) {
2378        // Sign extend the low part into the high part.
2379        InFlag =
2380          SDValue(CurDAG->getMachineNode(SExtOpcode, dl, MVT::Glue, InFlag),0);
2381      } else {
2382        // Zero out the high part, effectively zero extending the input.
2383        SDValue ClrNode =
2384          SDValue(CurDAG->getMachineNode(ClrOpcode, dl, NVT), 0);
2385        InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, ClrReg,
2386                                      ClrNode, InFlag).getValue(1);
2387      }
2388    }
2389
2390    if (foldedLoad) {
2391      SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
2392                        InFlag };
2393      SDNode *CNode =
2394        CurDAG->getMachineNode(MOpc, dl, MVT::Other, MVT::Glue, Ops,
2395                               array_lengthof(Ops));
2396      InFlag = SDValue(CNode, 1);
2397      // Update the chain.
2398      ReplaceUses(N1.getValue(1), SDValue(CNode, 0));
2399    } else {
2400      InFlag =
2401        SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Glue, N1, InFlag), 0);
2402    }
2403
2404    // Prevent use of AH in a REX instruction by referencing AX instead.
2405    // Shift it down 8 bits.
2406    if (HiReg == X86::AH && Subtarget->is64Bit() &&
2407        !SDValue(Node, 1).use_empty()) {
2408      SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2409                                              X86::AX, MVT::i16, InFlag);
2410      InFlag = Result.getValue(2);
2411
2412      // If we also need AL (the quotient), get it by extracting a subreg from
2413      // Result. The fast register allocator does not like multiple CopyFromReg
2414      // nodes using aliasing registers.
2415      if (!SDValue(Node, 0).use_empty())
2416        ReplaceUses(SDValue(Node, 0),
2417          CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result));
2418
2419      // Shift AX right by 8 bits instead of using AH.
2420      Result = SDValue(CurDAG->getMachineNode(X86::SHR16ri, dl, MVT::i16,
2421                                         Result,
2422                                         CurDAG->getTargetConstant(8, MVT::i8)),
2423                       0);
2424      ReplaceUses(SDValue(Node, 1),
2425        CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result));
2426    }
2427    // Copy the division (low) result, if it is needed.
2428    if (!SDValue(Node, 0).use_empty()) {
2429      SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2430                                                LoReg, NVT, InFlag);
2431      InFlag = Result.getValue(2);
2432      ReplaceUses(SDValue(Node, 0), Result);
2433      DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
2434    }
2435    // Copy the remainder (high) result, if it is needed.
2436    if (!SDValue(Node, 1).use_empty()) {
2437      SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2438                                              HiReg, NVT, InFlag);
2439      InFlag = Result.getValue(2);
2440      ReplaceUses(SDValue(Node, 1), Result);
2441      DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
2442    }
2443    return NULL;
2444  }
2445
2446  case X86ISD::CMP:
2447  case X86ISD::SUB: {
2448    // Sometimes a SUB is used to perform comparison.
2449    if (Opcode == X86ISD::SUB && Node->hasAnyUseOfValue(0))
2450      // This node is not a CMP.
2451      break;
2452    SDValue N0 = Node->getOperand(0);
2453    SDValue N1 = Node->getOperand(1);
2454
2455    // Look for (X86cmp (and $op, $imm), 0) and see if we can convert it to
2456    // use a smaller encoding.
2457    if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse() &&
2458        HasNoSignedComparisonUses(Node))
2459      // Look past the truncate if CMP is the only use of it.
2460      N0 = N0.getOperand(0);
2461    if ((N0.getNode()->getOpcode() == ISD::AND ||
2462         (N0.getResNo() == 0 && N0.getNode()->getOpcode() == X86ISD::AND)) &&
2463        N0.getNode()->hasOneUse() &&
2464        N0.getValueType() != MVT::i8 &&
2465        X86::isZeroNode(N1)) {
2466      ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getNode()->getOperand(1));
2467      if (!C) break;
2468
2469      // For example, convert "testl %eax, $8" to "testb %al, $8"
2470      if ((C->getZExtValue() & ~UINT64_C(0xff)) == 0 &&
2471          (!(C->getZExtValue() & 0x80) ||
2472           HasNoSignedComparisonUses(Node))) {
2473        SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i8);
2474        SDValue Reg = N0.getNode()->getOperand(0);
2475
2476        // On x86-32, only the ABCD registers have 8-bit subregisters.
2477        if (!Subtarget->is64Bit()) {
2478          const TargetRegisterClass *TRC;
2479          switch (N0.getValueType().getSimpleVT().SimpleTy) {
2480          case MVT::i32: TRC = &X86::GR32_ABCDRegClass; break;
2481          case MVT::i16: TRC = &X86::GR16_ABCDRegClass; break;
2482          default: llvm_unreachable("Unsupported TEST operand type!");
2483          }
2484          SDValue RC = CurDAG->getTargetConstant(TRC->getID(), MVT::i32);
2485          Reg = SDValue(CurDAG->getMachineNode(X86::COPY_TO_REGCLASS, dl,
2486                                               Reg.getValueType(), Reg, RC), 0);
2487        }
2488
2489        // Extract the l-register.
2490        SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl,
2491                                                        MVT::i8, Reg);
2492
2493        // Emit a testb.
2494        return CurDAG->getMachineNode(X86::TEST8ri, dl, MVT::i32, Subreg, Imm);
2495      }
2496
2497      // For example, "testl %eax, $2048" to "testb %ah, $8".
2498      if ((C->getZExtValue() & ~UINT64_C(0xff00)) == 0 &&
2499          (!(C->getZExtValue() & 0x8000) ||
2500           HasNoSignedComparisonUses(Node))) {
2501        // Shift the immediate right by 8 bits.
2502        SDValue ShiftedImm = CurDAG->getTargetConstant(C->getZExtValue() >> 8,
2503                                                       MVT::i8);
2504        SDValue Reg = N0.getNode()->getOperand(0);
2505
2506        // Put the value in an ABCD register.
2507        const TargetRegisterClass *TRC;
2508        switch (N0.getValueType().getSimpleVT().SimpleTy) {
2509        case MVT::i64: TRC = &X86::GR64_ABCDRegClass; break;
2510        case MVT::i32: TRC = &X86::GR32_ABCDRegClass; break;
2511        case MVT::i16: TRC = &X86::GR16_ABCDRegClass; break;
2512        default: llvm_unreachable("Unsupported TEST operand type!");
2513        }
2514        SDValue RC = CurDAG->getTargetConstant(TRC->getID(), MVT::i32);
2515        Reg = SDValue(CurDAG->getMachineNode(X86::COPY_TO_REGCLASS, dl,
2516                                             Reg.getValueType(), Reg, RC), 0);
2517
2518        // Extract the h-register.
2519        SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_8bit_hi, dl,
2520                                                        MVT::i8, Reg);
2521
2522        // Emit a testb.  The EXTRACT_SUBREG becomes a COPY that can only
2523        // target GR8_NOREX registers, so make sure the register class is
2524        // forced.
2525        return CurDAG->getMachineNode(X86::TEST8ri_NOREX, dl, MVT::i32,
2526                                      Subreg, ShiftedImm);
2527      }
2528
2529      // For example, "testl %eax, $32776" to "testw %ax, $32776".
2530      if ((C->getZExtValue() & ~UINT64_C(0xffff)) == 0 &&
2531          N0.getValueType() != MVT::i16 &&
2532          (!(C->getZExtValue() & 0x8000) ||
2533           HasNoSignedComparisonUses(Node))) {
2534        SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i16);
2535        SDValue Reg = N0.getNode()->getOperand(0);
2536
2537        // Extract the 16-bit subregister.
2538        SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_16bit, dl,
2539                                                        MVT::i16, Reg);
2540
2541        // Emit a testw.
2542        return CurDAG->getMachineNode(X86::TEST16ri, dl, MVT::i32, Subreg, Imm);
2543      }
2544
2545      // For example, "testq %rax, $268468232" to "testl %eax, $268468232".
2546      if ((C->getZExtValue() & ~UINT64_C(0xffffffff)) == 0 &&
2547          N0.getValueType() == MVT::i64 &&
2548          (!(C->getZExtValue() & 0x80000000) ||
2549           HasNoSignedComparisonUses(Node))) {
2550        SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
2551        SDValue Reg = N0.getNode()->getOperand(0);
2552
2553        // Extract the 32-bit subregister.
2554        SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_32bit, dl,
2555                                                        MVT::i32, Reg);
2556
2557        // Emit a testl.
2558        return CurDAG->getMachineNode(X86::TEST32ri, dl, MVT::i32, Subreg, Imm);
2559      }
2560    }
2561    break;
2562  }
2563  case ISD::STORE: {
2564    // Change a chain of {load; incr or dec; store} of the same value into
2565    // a simple increment or decrement through memory of that value, if the
2566    // uses of the modified value and its address are suitable.
2567    // The DEC64m tablegen pattern is currently not able to match the case where
2568    // the EFLAGS on the original DEC are used. (This also applies to
2569    // {INC,DEC}X{64,32,16,8}.)
2570    // We'll need to improve tablegen to allow flags to be transferred from a
2571    // node in the pattern to the result node.  probably with a new keyword
2572    // for example, we have this
2573    // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
2574    //  [(store (add (loadi64 addr:$dst), -1), addr:$dst),
2575    //   (implicit EFLAGS)]>;
2576    // but maybe need something like this
2577    // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
2578    //  [(store (add (loadi64 addr:$dst), -1), addr:$dst),
2579    //   (transferrable EFLAGS)]>;
2580
2581    StoreSDNode *StoreNode = cast<StoreSDNode>(Node);
2582    SDValue StoredVal = StoreNode->getOperand(1);
2583    unsigned Opc = StoredVal->getOpcode();
2584
2585    LoadSDNode *LoadNode = 0;
2586    SDValue InputChain;
2587    if (!isLoadIncOrDecStore(StoreNode, Opc, StoredVal, CurDAG,
2588                             LoadNode, InputChain))
2589      break;
2590
2591    SDValue Base, Scale, Index, Disp, Segment;
2592    if (!SelectAddr(LoadNode, LoadNode->getBasePtr(),
2593                    Base, Scale, Index, Disp, Segment))
2594      break;
2595
2596    MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(2);
2597    MemOp[0] = StoreNode->getMemOperand();
2598    MemOp[1] = LoadNode->getMemOperand();
2599    const SDValue Ops[] = { Base, Scale, Index, Disp, Segment, InputChain };
2600    EVT LdVT = LoadNode->getMemoryVT();
2601    unsigned newOpc = getFusedLdStOpcode(LdVT, Opc);
2602    MachineSDNode *Result = CurDAG->getMachineNode(newOpc,
2603                                                   Node->getDebugLoc(),
2604                                                   MVT::i32, MVT::Other, Ops,
2605                                                   array_lengthof(Ops));
2606    Result->setMemRefs(MemOp, MemOp + 2);
2607
2608    ReplaceUses(SDValue(StoreNode, 0), SDValue(Result, 1));
2609    ReplaceUses(SDValue(StoredVal.getNode(), 1), SDValue(Result, 0));
2610
2611    return Result;
2612  }
2613
2614  // FIXME: Custom handling because TableGen doesn't support multiple implicit
2615  // defs in an instruction pattern
2616  case X86ISD::PCMPESTRI: {
2617    SDValue N0 = Node->getOperand(0);
2618    SDValue N1 = Node->getOperand(1);
2619    SDValue N2 = Node->getOperand(2);
2620    SDValue N3 = Node->getOperand(3);
2621    SDValue N4 = Node->getOperand(4);
2622
2623    // Make sure last argument is a constant
2624    ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N4);
2625    if (!Cst)
2626      break;
2627
2628    uint64_t Imm = Cst->getZExtValue();
2629
2630    SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl,
2631                                          X86::EAX, N1, SDValue()).getValue(1);
2632    InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, X86::EDX,
2633                                  N3, InFlag).getValue(1);
2634
2635    SDValue Ops[] = { N0, N2, getI8Imm(Imm), InFlag };
2636    unsigned Opc = Subtarget->hasAVX() ? X86::VPCMPESTRIrr :
2637                                         X86::PCMPESTRIrr;
2638    InFlag = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Glue, Ops,
2639                                            array_lengthof(Ops)), 0);
2640
2641    if (!SDValue(Node, 0).use_empty()) {
2642      SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2643                                              X86::ECX, NVT, InFlag);
2644      InFlag = Result.getValue(2);
2645      ReplaceUses(SDValue(Node, 0), Result);
2646    }
2647    if (!SDValue(Node, 1).use_empty()) {
2648      SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2649                                              X86::EFLAGS, NVT, InFlag);
2650      InFlag = Result.getValue(2);
2651      ReplaceUses(SDValue(Node, 1), Result);
2652    }
2653
2654    return NULL;
2655  }
2656
2657  // FIXME: Custom handling because TableGen doesn't support multiple implicit
2658  // defs in an instruction pattern
2659  case X86ISD::PCMPISTRI: {
2660    SDValue N0 = Node->getOperand(0);
2661    SDValue N1 = Node->getOperand(1);
2662    SDValue N2 = Node->getOperand(2);
2663
2664    // Make sure last argument is a constant
2665    ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N2);
2666    if (!Cst)
2667      break;
2668
2669    uint64_t Imm = Cst->getZExtValue();
2670
2671    SDValue Ops[] = { N0, N1, getI8Imm(Imm) };
2672    unsigned Opc = Subtarget->hasAVX() ? X86::VPCMPISTRIrr :
2673                                         X86::PCMPISTRIrr;
2674    SDValue InFlag = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Glue, Ops,
2675                                                    array_lengthof(Ops)), 0);
2676
2677    if (!SDValue(Node, 0).use_empty()) {
2678      SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2679                                              X86::ECX, NVT, InFlag);
2680      InFlag = Result.getValue(2);
2681      ReplaceUses(SDValue(Node, 0), Result);
2682    }
2683    if (!SDValue(Node, 1).use_empty()) {
2684      SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2685                                              X86::EFLAGS, NVT, InFlag);
2686      InFlag = Result.getValue(2);
2687      ReplaceUses(SDValue(Node, 1), Result);
2688    }
2689
2690    return NULL;
2691  }
2692  }
2693
2694  SDNode *ResNode = SelectCode(Node);
2695
2696  DEBUG(dbgs() << "=> ";
2697        if (ResNode == NULL || ResNode == Node)
2698          Node->dump(CurDAG);
2699        else
2700          ResNode->dump(CurDAG);
2701        dbgs() << '\n');
2702
2703  return ResNode;
2704}
2705
2706bool X86DAGToDAGISel::
2707SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
2708                             std::vector<SDValue> &OutOps) {
2709  SDValue Op0, Op1, Op2, Op3, Op4;
2710  switch (ConstraintCode) {
2711  case 'o':   // offsetable        ??
2712  case 'v':   // not offsetable    ??
2713  default: return true;
2714  case 'm':   // memory
2715    if (!SelectAddr(0, Op, Op0, Op1, Op2, Op3, Op4))
2716      return true;
2717    break;
2718  }
2719
2720  OutOps.push_back(Op0);
2721  OutOps.push_back(Op1);
2722  OutOps.push_back(Op2);
2723  OutOps.push_back(Op3);
2724  OutOps.push_back(Op4);
2725  return false;
2726}
2727
2728/// createX86ISelDag - This pass converts a legalized DAG into a
2729/// X86-specific DAG, ready for instruction scheduling.
2730///
2731FunctionPass *llvm::createX86ISelDag(X86TargetMachine &TM,
2732                                     CodeGenOpt::Level OptLevel) {
2733  return new X86DAGToDAGISel(TM, OptLevel);
2734}
2735