X86ISelDAGToDAG.cpp revision 9989a63818581bcfeed730e42055af425fdf9353
1//===- X86ISelDAGToDAG.cpp - A DAG pattern matching inst selector for X86 -===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines a DAG pattern matching instruction selector for X86,
11// converting from a legalized dag to a X86 dag.
12//
13//===----------------------------------------------------------------------===//
14
15#define DEBUG_TYPE "x86-isel"
16#include "X86.h"
17#include "X86InstrBuilder.h"
18#include "X86MachineFunctionInfo.h"
19#include "X86RegisterInfo.h"
20#include "X86Subtarget.h"
21#include "X86TargetMachine.h"
22#include "llvm/Instructions.h"
23#include "llvm/Intrinsics.h"
24#include "llvm/Support/CFG.h"
25#include "llvm/Type.h"
26#include "llvm/CodeGen/FunctionLoweringInfo.h"
27#include "llvm/CodeGen/MachineConstantPool.h"
28#include "llvm/CodeGen/MachineFunction.h"
29#include "llvm/CodeGen/MachineFrameInfo.h"
30#include "llvm/CodeGen/MachineInstrBuilder.h"
31#include "llvm/CodeGen/MachineRegisterInfo.h"
32#include "llvm/CodeGen/SelectionDAGISel.h"
33#include "llvm/Target/TargetMachine.h"
34#include "llvm/Target/TargetOptions.h"
35#include "llvm/Support/Debug.h"
36#include "llvm/Support/ErrorHandling.h"
37#include "llvm/Support/MathExtras.h"
38#include "llvm/Support/raw_ostream.h"
39#include "llvm/ADT/SmallPtrSet.h"
40#include "llvm/ADT/Statistic.h"
41using namespace llvm;
42
43STATISTIC(NumLoadMoved, "Number of loads moved below TokenFactor");
44
45//===----------------------------------------------------------------------===//
46//                      Pattern Matcher Implementation
47//===----------------------------------------------------------------------===//
48
49namespace {
50  /// X86ISelAddressMode - This corresponds to X86AddressMode, but uses
51  /// SDValue's instead of register numbers for the leaves of the matched
52  /// tree.
53  struct X86ISelAddressMode {
54    enum {
55      RegBase,
56      FrameIndexBase
57    } BaseType;
58
59    // This is really a union, discriminated by BaseType!
60    SDValue Base_Reg;
61    int Base_FrameIndex;
62
63    unsigned Scale;
64    SDValue IndexReg;
65    int32_t Disp;
66    SDValue Segment;
67    const GlobalValue *GV;
68    const Constant *CP;
69    const BlockAddress *BlockAddr;
70    const char *ES;
71    int JT;
72    unsigned Align;    // CP alignment.
73    unsigned char SymbolFlags;  // X86II::MO_*
74
75    X86ISelAddressMode()
76      : BaseType(RegBase), Base_FrameIndex(0), Scale(1), IndexReg(), Disp(0),
77        Segment(), GV(0), CP(0), BlockAddr(0), ES(0), JT(-1), Align(0),
78        SymbolFlags(X86II::MO_NO_FLAG) {
79    }
80
81    bool hasSymbolicDisplacement() const {
82      return GV != 0 || CP != 0 || ES != 0 || JT != -1 || BlockAddr != 0;
83    }
84
85    bool hasBaseOrIndexReg() const {
86      return IndexReg.getNode() != 0 || Base_Reg.getNode() != 0;
87    }
88
89    /// isRIPRelative - Return true if this addressing mode is already RIP
90    /// relative.
91    bool isRIPRelative() const {
92      if (BaseType != RegBase) return false;
93      if (RegisterSDNode *RegNode =
94            dyn_cast_or_null<RegisterSDNode>(Base_Reg.getNode()))
95        return RegNode->getReg() == X86::RIP;
96      return false;
97    }
98
99    void setBaseReg(SDValue Reg) {
100      BaseType = RegBase;
101      Base_Reg = Reg;
102    }
103
104    void dump() {
105      dbgs() << "X86ISelAddressMode " << this << '\n';
106      dbgs() << "Base_Reg ";
107      if (Base_Reg.getNode() != 0)
108        Base_Reg.getNode()->dump();
109      else
110        dbgs() << "nul";
111      dbgs() << " Base.FrameIndex " << Base_FrameIndex << '\n'
112             << " Scale" << Scale << '\n'
113             << "IndexReg ";
114      if (IndexReg.getNode() != 0)
115        IndexReg.getNode()->dump();
116      else
117        dbgs() << "nul";
118      dbgs() << " Disp " << Disp << '\n'
119             << "GV ";
120      if (GV)
121        GV->dump();
122      else
123        dbgs() << "nul";
124      dbgs() << " CP ";
125      if (CP)
126        CP->dump();
127      else
128        dbgs() << "nul";
129      dbgs() << '\n'
130             << "ES ";
131      if (ES)
132        dbgs() << ES;
133      else
134        dbgs() << "nul";
135      dbgs() << " JT" << JT << " Align" << Align << '\n';
136    }
137  };
138}
139
140namespace {
141  //===--------------------------------------------------------------------===//
142  /// ISel - X86 specific code to select X86 machine instructions for
143  /// SelectionDAG operations.
144  ///
145  class X86DAGToDAGISel : public SelectionDAGISel {
146    /// X86Lowering - This object fully describes how to lower LLVM code to an
147    /// X86-specific SelectionDAG.
148    const X86TargetLowering &X86Lowering;
149
150    /// Subtarget - Keep a pointer to the X86Subtarget around so that we can
151    /// make the right decision when generating code for different targets.
152    const X86Subtarget *Subtarget;
153
154    /// OptForSize - If true, selector should try to optimize for code size
155    /// instead of performance.
156    bool OptForSize;
157
158  public:
159    explicit X86DAGToDAGISel(X86TargetMachine &tm, CodeGenOpt::Level OptLevel)
160      : SelectionDAGISel(tm, OptLevel),
161        X86Lowering(*tm.getTargetLowering()),
162        Subtarget(&tm.getSubtarget<X86Subtarget>()),
163        OptForSize(false) {}
164
165    virtual const char *getPassName() const {
166      return "X86 DAG->DAG Instruction Selection";
167    }
168
169    virtual void EmitFunctionEntryCode();
170
171    virtual bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const;
172
173    virtual void PreprocessISelDAG();
174
175    inline bool immSext8(SDNode *N) const {
176      return isInt<8>(cast<ConstantSDNode>(N)->getSExtValue());
177    }
178
179    // i64immSExt32 predicate - True if the 64-bit immediate fits in a 32-bit
180    // sign extended field.
181    inline bool i64immSExt32(SDNode *N) const {
182      uint64_t v = cast<ConstantSDNode>(N)->getZExtValue();
183      return (int64_t)v == (int32_t)v;
184    }
185
186// Include the pieces autogenerated from the target description.
187#include "X86GenDAGISel.inc"
188
189  private:
190    SDNode *Select(SDNode *N);
191    SDNode *SelectAtomic64(SDNode *Node, unsigned Opc);
192    SDNode *SelectAtomicLoadAdd(SDNode *Node, EVT NVT);
193    SDNode *SelectAtomicLoadArith(SDNode *Node, EVT NVT);
194
195    bool FoldOffsetIntoAddress(uint64_t Offset, X86ISelAddressMode &AM);
196    bool MatchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM);
197    bool MatchWrapper(SDValue N, X86ISelAddressMode &AM);
198    bool MatchAddress(SDValue N, X86ISelAddressMode &AM);
199    bool MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
200                                 unsigned Depth);
201    bool MatchAddressBase(SDValue N, X86ISelAddressMode &AM);
202    bool SelectAddr(SDNode *Parent, SDValue N, SDValue &Base,
203                    SDValue &Scale, SDValue &Index, SDValue &Disp,
204                    SDValue &Segment);
205    bool SelectLEAAddr(SDValue N, SDValue &Base,
206                       SDValue &Scale, SDValue &Index, SDValue &Disp,
207                       SDValue &Segment);
208    bool SelectTLSADDRAddr(SDValue N, SDValue &Base,
209                           SDValue &Scale, SDValue &Index, SDValue &Disp,
210                           SDValue &Segment);
211    bool SelectScalarSSELoad(SDNode *Root, SDValue N,
212                             SDValue &Base, SDValue &Scale,
213                             SDValue &Index, SDValue &Disp,
214                             SDValue &Segment,
215                             SDValue &NodeWithChain);
216
217    bool TryFoldLoad(SDNode *P, SDValue N,
218                     SDValue &Base, SDValue &Scale,
219                     SDValue &Index, SDValue &Disp,
220                     SDValue &Segment);
221
222    /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
223    /// inline asm expressions.
224    virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
225                                              char ConstraintCode,
226                                              std::vector<SDValue> &OutOps);
227
228    void EmitSpecialCodeForMain(MachineBasicBlock *BB, MachineFrameInfo *MFI);
229
230    inline void getAddressOperands(X86ISelAddressMode &AM, SDValue &Base,
231                                   SDValue &Scale, SDValue &Index,
232                                   SDValue &Disp, SDValue &Segment) {
233      Base  = (AM.BaseType == X86ISelAddressMode::FrameIndexBase) ?
234        CurDAG->getTargetFrameIndex(AM.Base_FrameIndex, TLI.getPointerTy()) :
235        AM.Base_Reg;
236      Scale = getI8Imm(AM.Scale);
237      Index = AM.IndexReg;
238      // These are 32-bit even in 64-bit mode since RIP relative offset
239      // is 32-bit.
240      if (AM.GV)
241        Disp = CurDAG->getTargetGlobalAddress(AM.GV, DebugLoc(),
242                                              MVT::i32, AM.Disp,
243                                              AM.SymbolFlags);
244      else if (AM.CP)
245        Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i32,
246                                             AM.Align, AM.Disp, AM.SymbolFlags);
247      else if (AM.ES)
248        Disp = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i32, AM.SymbolFlags);
249      else if (AM.JT != -1)
250        Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i32, AM.SymbolFlags);
251      else if (AM.BlockAddr)
252        Disp = CurDAG->getBlockAddress(AM.BlockAddr, MVT::i32,
253                                       true, AM.SymbolFlags);
254      else
255        Disp = CurDAG->getTargetConstant(AM.Disp, MVT::i32);
256
257      if (AM.Segment.getNode())
258        Segment = AM.Segment;
259      else
260        Segment = CurDAG->getRegister(0, MVT::i32);
261    }
262
263    /// getI8Imm - Return a target constant with the specified value, of type
264    /// i8.
265    inline SDValue getI8Imm(unsigned Imm) {
266      return CurDAG->getTargetConstant(Imm, MVT::i8);
267    }
268
269    /// getI32Imm - Return a target constant with the specified value, of type
270    /// i32.
271    inline SDValue getI32Imm(unsigned Imm) {
272      return CurDAG->getTargetConstant(Imm, MVT::i32);
273    }
274
275    /// getGlobalBaseReg - Return an SDNode that returns the value of
276    /// the global base register. Output instructions required to
277    /// initialize the global base register, if necessary.
278    ///
279    SDNode *getGlobalBaseReg();
280
281    /// getTargetMachine - Return a reference to the TargetMachine, casted
282    /// to the target-specific type.
283    const X86TargetMachine &getTargetMachine() {
284      return static_cast<const X86TargetMachine &>(TM);
285    }
286
287    /// getInstrInfo - Return a reference to the TargetInstrInfo, casted
288    /// to the target-specific type.
289    const X86InstrInfo *getInstrInfo() {
290      return getTargetMachine().getInstrInfo();
291    }
292  };
293}
294
295
296bool
297X86DAGToDAGISel::IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const {
298  if (OptLevel == CodeGenOpt::None) return false;
299
300  if (!N.hasOneUse())
301    return false;
302
303  if (N.getOpcode() != ISD::LOAD)
304    return true;
305
306  // If N is a load, do additional profitability checks.
307  if (U == Root) {
308    switch (U->getOpcode()) {
309    default: break;
310    case X86ISD::ADD:
311    case X86ISD::SUB:
312    case X86ISD::AND:
313    case X86ISD::XOR:
314    case X86ISD::OR:
315    case ISD::ADD:
316    case ISD::ADDC:
317    case ISD::ADDE:
318    case ISD::AND:
319    case ISD::OR:
320    case ISD::XOR: {
321      SDValue Op1 = U->getOperand(1);
322
323      // If the other operand is a 8-bit immediate we should fold the immediate
324      // instead. This reduces code size.
325      // e.g.
326      // movl 4(%esp), %eax
327      // addl $4, %eax
328      // vs.
329      // movl $4, %eax
330      // addl 4(%esp), %eax
331      // The former is 2 bytes shorter. In case where the increment is 1, then
332      // the saving can be 4 bytes (by using incl %eax).
333      if (ConstantSDNode *Imm = dyn_cast<ConstantSDNode>(Op1))
334        if (Imm->getAPIntValue().isSignedIntN(8))
335          return false;
336
337      // If the other operand is a TLS address, we should fold it instead.
338      // This produces
339      // movl    %gs:0, %eax
340      // leal    i@NTPOFF(%eax), %eax
341      // instead of
342      // movl    $i@NTPOFF, %eax
343      // addl    %gs:0, %eax
344      // if the block also has an access to a second TLS address this will save
345      // a load.
346      // FIXME: This is probably also true for non TLS addresses.
347      if (Op1.getOpcode() == X86ISD::Wrapper) {
348        SDValue Val = Op1.getOperand(0);
349        if (Val.getOpcode() == ISD::TargetGlobalTLSAddress)
350          return false;
351      }
352    }
353    }
354  }
355
356  return true;
357}
358
359/// MoveBelowCallOrigChain - Replace the original chain operand of the call with
360/// load's chain operand and move load below the call's chain operand.
361static void MoveBelowOrigChain(SelectionDAG *CurDAG, SDValue Load,
362                                  SDValue Call, SDValue OrigChain) {
363  SmallVector<SDValue, 8> Ops;
364  SDValue Chain = OrigChain.getOperand(0);
365  if (Chain.getNode() == Load.getNode())
366    Ops.push_back(Load.getOperand(0));
367  else {
368    assert(Chain.getOpcode() == ISD::TokenFactor &&
369           "Unexpected chain operand");
370    for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i)
371      if (Chain.getOperand(i).getNode() == Load.getNode())
372        Ops.push_back(Load.getOperand(0));
373      else
374        Ops.push_back(Chain.getOperand(i));
375    SDValue NewChain =
376      CurDAG->getNode(ISD::TokenFactor, Load.getDebugLoc(),
377                      MVT::Other, &Ops[0], Ops.size());
378    Ops.clear();
379    Ops.push_back(NewChain);
380  }
381  for (unsigned i = 1, e = OrigChain.getNumOperands(); i != e; ++i)
382    Ops.push_back(OrigChain.getOperand(i));
383  CurDAG->UpdateNodeOperands(OrigChain.getNode(), &Ops[0], Ops.size());
384  CurDAG->UpdateNodeOperands(Load.getNode(), Call.getOperand(0),
385                             Load.getOperand(1), Load.getOperand(2));
386  Ops.clear();
387  Ops.push_back(SDValue(Load.getNode(), 1));
388  for (unsigned i = 1, e = Call.getNode()->getNumOperands(); i != e; ++i)
389    Ops.push_back(Call.getOperand(i));
390  CurDAG->UpdateNodeOperands(Call.getNode(), &Ops[0], Ops.size());
391}
392
393/// isCalleeLoad - Return true if call address is a load and it can be
394/// moved below CALLSEQ_START and the chains leading up to the call.
395/// Return the CALLSEQ_START by reference as a second output.
396/// In the case of a tail call, there isn't a callseq node between the call
397/// chain and the load.
398static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) {
399  if (Callee.getNode() == Chain.getNode() || !Callee.hasOneUse())
400    return false;
401  LoadSDNode *LD = dyn_cast<LoadSDNode>(Callee.getNode());
402  if (!LD ||
403      LD->isVolatile() ||
404      LD->getAddressingMode() != ISD::UNINDEXED ||
405      LD->getExtensionType() != ISD::NON_EXTLOAD)
406    return false;
407
408  // Now let's find the callseq_start.
409  while (HasCallSeq && Chain.getOpcode() != ISD::CALLSEQ_START) {
410    if (!Chain.hasOneUse())
411      return false;
412    Chain = Chain.getOperand(0);
413  }
414
415  if (!Chain.getNumOperands())
416    return false;
417  if (Chain.getOperand(0).getNode() == Callee.getNode())
418    return true;
419  if (Chain.getOperand(0).getOpcode() == ISD::TokenFactor &&
420      Callee.getValue(1).isOperandOf(Chain.getOperand(0).getNode()) &&
421      Callee.getValue(1).hasOneUse())
422    return true;
423  return false;
424}
425
426void X86DAGToDAGISel::PreprocessISelDAG() {
427  // OptForSize is used in pattern predicates that isel is matching.
428  OptForSize = MF->getFunction()->hasFnAttr(Attribute::OptimizeForSize);
429
430  for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
431       E = CurDAG->allnodes_end(); I != E; ) {
432    SDNode *N = I++;  // Preincrement iterator to avoid invalidation issues.
433
434    if (OptLevel != CodeGenOpt::None &&
435        (N->getOpcode() == X86ISD::CALL ||
436         N->getOpcode() == X86ISD::TC_RETURN)) {
437      /// Also try moving call address load from outside callseq_start to just
438      /// before the call to allow it to be folded.
439      ///
440      ///     [Load chain]
441      ///         ^
442      ///         |
443      ///       [Load]
444      ///       ^    ^
445      ///       |    |
446      ///      /      \--
447      ///     /          |
448      ///[CALLSEQ_START] |
449      ///     ^          |
450      ///     |          |
451      /// [LOAD/C2Reg]   |
452      ///     |          |
453      ///      \        /
454      ///       \      /
455      ///       [CALL]
456      bool HasCallSeq = N->getOpcode() == X86ISD::CALL;
457      SDValue Chain = N->getOperand(0);
458      SDValue Load  = N->getOperand(1);
459      if (!isCalleeLoad(Load, Chain, HasCallSeq))
460        continue;
461      MoveBelowOrigChain(CurDAG, Load, SDValue(N, 0), Chain);
462      ++NumLoadMoved;
463      continue;
464    }
465
466    // Lower fpround and fpextend nodes that target the FP stack to be store and
467    // load to the stack.  This is a gross hack.  We would like to simply mark
468    // these as being illegal, but when we do that, legalize produces these when
469    // it expands calls, then expands these in the same legalize pass.  We would
470    // like dag combine to be able to hack on these between the call expansion
471    // and the node legalization.  As such this pass basically does "really
472    // late" legalization of these inline with the X86 isel pass.
473    // FIXME: This should only happen when not compiled with -O0.
474    if (N->getOpcode() != ISD::FP_ROUND && N->getOpcode() != ISD::FP_EXTEND)
475      continue;
476
477    EVT SrcVT = N->getOperand(0).getValueType();
478    EVT DstVT = N->getValueType(0);
479
480    // If any of the sources are vectors, no fp stack involved.
481    if (SrcVT.isVector() || DstVT.isVector())
482      continue;
483
484    // If the source and destination are SSE registers, then this is a legal
485    // conversion that should not be lowered.
486    bool SrcIsSSE = X86Lowering.isScalarFPTypeInSSEReg(SrcVT);
487    bool DstIsSSE = X86Lowering.isScalarFPTypeInSSEReg(DstVT);
488    if (SrcIsSSE && DstIsSSE)
489      continue;
490
491    if (!SrcIsSSE && !DstIsSSE) {
492      // If this is an FPStack extension, it is a noop.
493      if (N->getOpcode() == ISD::FP_EXTEND)
494        continue;
495      // If this is a value-preserving FPStack truncation, it is a noop.
496      if (N->getConstantOperandVal(1))
497        continue;
498    }
499
500    // Here we could have an FP stack truncation or an FPStack <-> SSE convert.
501    // FPStack has extload and truncstore.  SSE can fold direct loads into other
502    // operations.  Based on this, decide what we want to do.
503    EVT MemVT;
504    if (N->getOpcode() == ISD::FP_ROUND)
505      MemVT = DstVT;  // FP_ROUND must use DstVT, we can't do a 'trunc load'.
506    else
507      MemVT = SrcIsSSE ? SrcVT : DstVT;
508
509    SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT);
510    DebugLoc dl = N->getDebugLoc();
511
512    // FIXME: optimize the case where the src/dest is a load or store?
513    SDValue Store = CurDAG->getTruncStore(CurDAG->getEntryNode(), dl,
514                                          N->getOperand(0),
515                                          MemTmp, MachinePointerInfo(), MemVT,
516                                          false, false, 0);
517    SDValue Result = CurDAG->getExtLoad(ISD::EXTLOAD, dl, DstVT, Store, MemTmp,
518                                        MachinePointerInfo(),
519                                        MemVT, false, false, 0);
520
521    // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the
522    // extload we created.  This will cause general havok on the dag because
523    // anything below the conversion could be folded into other existing nodes.
524    // To avoid invalidating 'I', back it up to the convert node.
525    --I;
526    CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
527
528    // Now that we did that, the node is dead.  Increment the iterator to the
529    // next node to process, then delete N.
530    ++I;
531    CurDAG->DeleteNode(N);
532  }
533}
534
535
536/// EmitSpecialCodeForMain - Emit any code that needs to be executed only in
537/// the main function.
538void X86DAGToDAGISel::EmitSpecialCodeForMain(MachineBasicBlock *BB,
539                                             MachineFrameInfo *MFI) {
540  const TargetInstrInfo *TII = TM.getInstrInfo();
541  if (Subtarget->isTargetCygMing()) {
542    unsigned CallOp =
543      Subtarget->is64Bit() ? X86::WINCALL64pcrel32 : X86::CALLpcrel32;
544    BuildMI(BB, DebugLoc(),
545            TII->get(CallOp)).addExternalSymbol("__main");
546  }
547}
548
549void X86DAGToDAGISel::EmitFunctionEntryCode() {
550  // If this is main, emit special code for main.
551  if (const Function *Fn = MF->getFunction())
552    if (Fn->hasExternalLinkage() && Fn->getName() == "main")
553      EmitSpecialCodeForMain(MF->begin(), MF->getFrameInfo());
554}
555
556static bool isDispSafeForFrameIndex(int64_t Val) {
557  // On 64-bit platforms, we can run into an issue where a frame index
558  // includes a displacement that, when added to the explicit displacement,
559  // will overflow the displacement field. Assuming that the frame index
560  // displacement fits into a 31-bit integer  (which is only slightly more
561  // aggressive than the current fundamental assumption that it fits into
562  // a 32-bit integer), a 31-bit disp should always be safe.
563  return isInt<31>(Val);
564}
565
566bool X86DAGToDAGISel::FoldOffsetIntoAddress(uint64_t Offset,
567                                            X86ISelAddressMode &AM) {
568  int64_t Val = AM.Disp + Offset;
569  CodeModel::Model M = TM.getCodeModel();
570  if (Subtarget->is64Bit()) {
571    if (!X86::isOffsetSuitableForCodeModel(Val, M,
572                                           AM.hasSymbolicDisplacement()))
573      return true;
574    // In addition to the checks required for a register base, check that
575    // we do not try to use an unsafe Disp with a frame index.
576    if (AM.BaseType == X86ISelAddressMode::FrameIndexBase &&
577        !isDispSafeForFrameIndex(Val))
578      return true;
579  }
580  AM.Disp = Val;
581  return false;
582
583}
584
585bool X86DAGToDAGISel::MatchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM){
586  SDValue Address = N->getOperand(1);
587
588  // load gs:0 -> GS segment register.
589  // load fs:0 -> FS segment register.
590  //
591  // This optimization is valid because the GNU TLS model defines that
592  // gs:0 (or fs:0 on X86-64) contains its own address.
593  // For more information see http://people.redhat.com/drepper/tls.pdf
594  if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Address))
595    if (C->getSExtValue() == 0 && AM.Segment.getNode() == 0 &&
596        Subtarget->isTargetELF())
597      switch (N->getPointerInfo().getAddrSpace()) {
598      case 256:
599        AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
600        return false;
601      case 257:
602        AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
603        return false;
604      }
605
606  return true;
607}
608
609/// MatchWrapper - Try to match X86ISD::Wrapper and X86ISD::WrapperRIP nodes
610/// into an addressing mode.  These wrap things that will resolve down into a
611/// symbol reference.  If no match is possible, this returns true, otherwise it
612/// returns false.
613bool X86DAGToDAGISel::MatchWrapper(SDValue N, X86ISelAddressMode &AM) {
614  // If the addressing mode already has a symbol as the displacement, we can
615  // never match another symbol.
616  if (AM.hasSymbolicDisplacement())
617    return true;
618
619  SDValue N0 = N.getOperand(0);
620  CodeModel::Model M = TM.getCodeModel();
621
622  // Handle X86-64 rip-relative addresses.  We check this before checking direct
623  // folding because RIP is preferable to non-RIP accesses.
624  if (Subtarget->is64Bit() &&
625      // Under X86-64 non-small code model, GV (and friends) are 64-bits, so
626      // they cannot be folded into immediate fields.
627      // FIXME: This can be improved for kernel and other models?
628      (M == CodeModel::Small || M == CodeModel::Kernel) &&
629      // Base and index reg must be 0 in order to use %rip as base and lowering
630      // must allow RIP.
631      !AM.hasBaseOrIndexReg() && N.getOpcode() == X86ISD::WrapperRIP) {
632    if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
633      X86ISelAddressMode Backup = AM;
634      AM.GV = G->getGlobal();
635      AM.SymbolFlags = G->getTargetFlags();
636      if (FoldOffsetIntoAddress(G->getOffset(), AM)) {
637        AM = Backup;
638        return true;
639      }
640    } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
641      X86ISelAddressMode Backup = AM;
642      AM.CP = CP->getConstVal();
643      AM.Align = CP->getAlignment();
644      AM.SymbolFlags = CP->getTargetFlags();
645      if (FoldOffsetIntoAddress(CP->getOffset(), AM)) {
646        AM = Backup;
647        return true;
648      }
649    } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) {
650      AM.ES = S->getSymbol();
651      AM.SymbolFlags = S->getTargetFlags();
652    } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
653      AM.JT = J->getIndex();
654      AM.SymbolFlags = J->getTargetFlags();
655    } else {
656      AM.BlockAddr = cast<BlockAddressSDNode>(N0)->getBlockAddress();
657      AM.SymbolFlags = cast<BlockAddressSDNode>(N0)->getTargetFlags();
658    }
659
660    if (N.getOpcode() == X86ISD::WrapperRIP)
661      AM.setBaseReg(CurDAG->getRegister(X86::RIP, MVT::i64));
662    return false;
663  }
664
665  // Handle the case when globals fit in our immediate field: This is true for
666  // X86-32 always and X86-64 when in -static -mcmodel=small mode.  In 64-bit
667  // mode, this results in a non-RIP-relative computation.
668  if (!Subtarget->is64Bit() ||
669      ((M == CodeModel::Small || M == CodeModel::Kernel) &&
670       TM.getRelocationModel() == Reloc::Static)) {
671    if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
672      AM.GV = G->getGlobal();
673      AM.Disp += G->getOffset();
674      AM.SymbolFlags = G->getTargetFlags();
675    } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
676      AM.CP = CP->getConstVal();
677      AM.Align = CP->getAlignment();
678      AM.Disp += CP->getOffset();
679      AM.SymbolFlags = CP->getTargetFlags();
680    } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) {
681      AM.ES = S->getSymbol();
682      AM.SymbolFlags = S->getTargetFlags();
683    } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
684      AM.JT = J->getIndex();
685      AM.SymbolFlags = J->getTargetFlags();
686    } else {
687      AM.BlockAddr = cast<BlockAddressSDNode>(N0)->getBlockAddress();
688      AM.SymbolFlags = cast<BlockAddressSDNode>(N0)->getTargetFlags();
689    }
690    return false;
691  }
692
693  return true;
694}
695
696/// MatchAddress - Add the specified node to the specified addressing mode,
697/// returning true if it cannot be done.  This just pattern matches for the
698/// addressing mode.
699bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM) {
700  if (MatchAddressRecursively(N, AM, 0))
701    return true;
702
703  // Post-processing: Convert lea(,%reg,2) to lea(%reg,%reg), which has
704  // a smaller encoding and avoids a scaled-index.
705  if (AM.Scale == 2 &&
706      AM.BaseType == X86ISelAddressMode::RegBase &&
707      AM.Base_Reg.getNode() == 0) {
708    AM.Base_Reg = AM.IndexReg;
709    AM.Scale = 1;
710  }
711
712  // Post-processing: Convert foo to foo(%rip), even in non-PIC mode,
713  // because it has a smaller encoding.
714  // TODO: Which other code models can use this?
715  if (TM.getCodeModel() == CodeModel::Small &&
716      Subtarget->is64Bit() &&
717      AM.Scale == 1 &&
718      AM.BaseType == X86ISelAddressMode::RegBase &&
719      AM.Base_Reg.getNode() == 0 &&
720      AM.IndexReg.getNode() == 0 &&
721      AM.SymbolFlags == X86II::MO_NO_FLAG &&
722      AM.hasSymbolicDisplacement())
723    AM.Base_Reg = CurDAG->getRegister(X86::RIP, MVT::i64);
724
725  return false;
726}
727
728// Insert a node into the DAG at least before the Pos node's position. This
729// will reposition the node as needed, and will assign it a node ID that is <=
730// the Pos node's ID. Note that this does *not* preserve the uniqueness of node
731// IDs! The selection DAG must no longer depend on their uniqueness when this
732// is used.
733static void InsertDAGNode(SelectionDAG &DAG, SDValue Pos, SDValue N) {
734  if (N.getNode()->getNodeId() == -1 ||
735      N.getNode()->getNodeId() > Pos.getNode()->getNodeId()) {
736    DAG.RepositionNode(Pos.getNode(), N.getNode());
737    N.getNode()->setNodeId(Pos.getNode()->getNodeId());
738  }
739}
740
741// Transform "(X >> (8-C1)) & C2" to "(X >> 8) & 0xff)" if safe. This
742// allows us to convert the shift and and into an h-register extract and
743// a scaled index. Returns false if the simplification is performed.
744static bool FoldMaskAndShiftToExtract(SelectionDAG &DAG, SDValue N,
745                                      uint64_t Mask,
746                                      SDValue Shift, SDValue X,
747                                      X86ISelAddressMode &AM) {
748  if (Shift.getOpcode() != ISD::SRL ||
749      !isa<ConstantSDNode>(Shift.getOperand(1)) ||
750      !Shift.hasOneUse())
751    return true;
752
753  int ScaleLog = 8 - Shift.getConstantOperandVal(1);
754  if (ScaleLog <= 0 || ScaleLog >= 4 ||
755      Mask != (0xffu << ScaleLog))
756    return true;
757
758  EVT VT = N.getValueType();
759  DebugLoc DL = N.getDebugLoc();
760  SDValue Eight = DAG.getConstant(8, MVT::i8);
761  SDValue NewMask = DAG.getConstant(0xff, VT);
762  SDValue Srl = DAG.getNode(ISD::SRL, DL, VT, X, Eight);
763  SDValue And = DAG.getNode(ISD::AND, DL, VT, Srl, NewMask);
764  SDValue ShlCount = DAG.getConstant(ScaleLog, MVT::i8);
765  SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, And, ShlCount);
766
767  // Insert the new nodes into the topological ordering. We must do this in
768  // a valid topological ordering as nothing is going to go back and re-sort
769  // these nodes. We continually insert before 'N' in sequence as this is
770  // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
771  // hierarchy left to express.
772  InsertDAGNode(DAG, N, Eight);
773  InsertDAGNode(DAG, N, Srl);
774  InsertDAGNode(DAG, N, NewMask);
775  InsertDAGNode(DAG, N, And);
776  InsertDAGNode(DAG, N, ShlCount);
777  InsertDAGNode(DAG, N, Shl);
778  DAG.ReplaceAllUsesWith(N, Shl);
779  AM.IndexReg = And;
780  AM.Scale = (1 << ScaleLog);
781  return false;
782}
783
784// Transforms "(X << C1) & C2" to "(X & (C2>>C1)) << C1" if safe and if this
785// allows us to fold the shift into this addressing mode. Returns false if the
786// transform succeeded.
787static bool FoldMaskedShiftToScaledMask(SelectionDAG &DAG, SDValue N,
788                                        uint64_t Mask,
789                                        SDValue Shift, SDValue X,
790                                        X86ISelAddressMode &AM) {
791  if (Shift.getOpcode() != ISD::SHL ||
792      !isa<ConstantSDNode>(Shift.getOperand(1)))
793    return true;
794
795  // Not likely to be profitable if either the AND or SHIFT node has more
796  // than one use (unless all uses are for address computation). Besides,
797  // isel mechanism requires their node ids to be reused.
798  if (!N.hasOneUse() || !Shift.hasOneUse())
799    return true;
800
801  // Verify that the shift amount is something we can fold.
802  unsigned ShiftAmt = Shift.getConstantOperandVal(1);
803  if (ShiftAmt != 1 && ShiftAmt != 2 && ShiftAmt != 3)
804    return true;
805
806  EVT VT = N.getValueType();
807  DebugLoc DL = N.getDebugLoc();
808  SDValue NewMask = DAG.getConstant(Mask >> ShiftAmt, VT);
809  SDValue NewAnd = DAG.getNode(ISD::AND, DL, VT, X, NewMask);
810  SDValue NewShift = DAG.getNode(ISD::SHL, DL, VT, NewAnd, Shift.getOperand(1));
811
812  // Insert the new nodes into the topological ordering. We must do this in
813  // a valid topological ordering as nothing is going to go back and re-sort
814  // these nodes. We continually insert before 'N' in sequence as this is
815  // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
816  // hierarchy left to express.
817  InsertDAGNode(DAG, N, NewMask);
818  InsertDAGNode(DAG, N, NewAnd);
819  InsertDAGNode(DAG, N, NewShift);
820  DAG.ReplaceAllUsesWith(N, NewShift);
821
822  AM.Scale = 1 << ShiftAmt;
823  AM.IndexReg = NewAnd;
824  return false;
825}
826
827// Implement some heroics to detect shifts of masked values where the mask can
828// be replaced by extending the shift and undoing that in the addressing mode
829// scale. Patterns such as (shl (srl x, c1), c2) are canonicalized into (and
830// (srl x, SHIFT), MASK) by DAGCombines that don't know the shl can be done in
831// the addressing mode. This results in code such as:
832//
833//   int f(short *y, int *lookup_table) {
834//     ...
835//     return *y + lookup_table[*y >> 11];
836//   }
837//
838// Turning into:
839//   movzwl (%rdi), %eax
840//   movl %eax, %ecx
841//   shrl $11, %ecx
842//   addl (%rsi,%rcx,4), %eax
843//
844// Instead of:
845//   movzwl (%rdi), %eax
846//   movl %eax, %ecx
847//   shrl $9, %ecx
848//   andl $124, %rcx
849//   addl (%rsi,%rcx), %eax
850//
851// Note that this function assumes the mask is provided as a mask *after* the
852// value is shifted. The input chain may or may not match that, but computing
853// such a mask is trivial.
854static bool FoldMaskAndShiftToScale(SelectionDAG &DAG, SDValue N,
855                                    uint64_t Mask,
856                                    SDValue Shift, SDValue X,
857                                    X86ISelAddressMode &AM) {
858  if (Shift.getOpcode() != ISD::SRL || !Shift.hasOneUse() ||
859      !isa<ConstantSDNode>(Shift.getOperand(1)))
860    return true;
861
862  unsigned ShiftAmt = Shift.getConstantOperandVal(1);
863  unsigned MaskLZ = CountLeadingZeros_64(Mask);
864  unsigned MaskTZ = CountTrailingZeros_64(Mask);
865
866  // The amount of shift we're trying to fit into the addressing mode is taken
867  // from the trailing zeros of the mask.
868  unsigned AMShiftAmt = MaskTZ;
869
870  // There is nothing we can do here unless the mask is removing some bits.
871  // Also, the addressing mode can only represent shifts of 1, 2, or 3 bits.
872  if (AMShiftAmt <= 0 || AMShiftAmt > 3) return true;
873
874  // We also need to ensure that mask is a continuous run of bits.
875  if (CountTrailingOnes_64(Mask >> MaskTZ) + MaskTZ + MaskLZ != 64) return true;
876
877  // Scale the leading zero count down based on the actual size of the value.
878  // Also scale it down based on the size of the shift.
879  MaskLZ -= (64 - X.getValueSizeInBits()) + ShiftAmt;
880
881  // The final check is to ensure that any masked out high bits of X are
882  // already known to be zero. Otherwise, the mask has a semantic impact
883  // other than masking out a couple of low bits. Unfortunately, because of
884  // the mask, zero extensions will be removed from operands in some cases.
885  // This code works extra hard to look through extensions because we can
886  // replace them with zero extensions cheaply if necessary.
887  bool ReplacingAnyExtend = false;
888  if (X.getOpcode() == ISD::ANY_EXTEND) {
889    unsigned ExtendBits =
890      X.getValueSizeInBits() - X.getOperand(0).getValueSizeInBits();
891    // Assume that we'll replace the any-extend with a zero-extend, and
892    // narrow the search to the extended value.
893    X = X.getOperand(0);
894    MaskLZ = ExtendBits > MaskLZ ? 0 : MaskLZ - ExtendBits;
895    ReplacingAnyExtend = true;
896  }
897  APInt MaskedHighBits = APInt::getHighBitsSet(X.getValueSizeInBits(),
898                                               MaskLZ);
899  APInt KnownZero, KnownOne;
900  DAG.ComputeMaskedBits(X, MaskedHighBits, KnownZero, KnownOne);
901  if (MaskedHighBits != KnownZero) return true;
902
903  // We've identified a pattern that can be transformed into a single shift
904  // and an addressing mode. Make it so.
905  EVT VT = N.getValueType();
906  if (ReplacingAnyExtend) {
907    assert(X.getValueType() != VT);
908    // We looked through an ANY_EXTEND node, insert a ZERO_EXTEND.
909    SDValue NewX = DAG.getNode(ISD::ZERO_EXTEND, X.getDebugLoc(), VT, X);
910    InsertDAGNode(DAG, N, NewX);
911    X = NewX;
912  }
913  DebugLoc DL = N.getDebugLoc();
914  SDValue NewSRLAmt = DAG.getConstant(ShiftAmt + AMShiftAmt, MVT::i8);
915  SDValue NewSRL = DAG.getNode(ISD::SRL, DL, VT, X, NewSRLAmt);
916  SDValue NewSHLAmt = DAG.getConstant(AMShiftAmt, MVT::i8);
917  SDValue NewSHL = DAG.getNode(ISD::SHL, DL, VT, NewSRL, NewSHLAmt);
918
919  // Insert the new nodes into the topological ordering. We must do this in
920  // a valid topological ordering as nothing is going to go back and re-sort
921  // these nodes. We continually insert before 'N' in sequence as this is
922  // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
923  // hierarchy left to express.
924  InsertDAGNode(DAG, N, NewSRLAmt);
925  InsertDAGNode(DAG, N, NewSRL);
926  InsertDAGNode(DAG, N, NewSHLAmt);
927  InsertDAGNode(DAG, N, NewSHL);
928  DAG.ReplaceAllUsesWith(N, NewSHL);
929
930  AM.Scale = 1 << AMShiftAmt;
931  AM.IndexReg = NewSRL;
932  return false;
933}
934
935bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
936                                              unsigned Depth) {
937  DebugLoc dl = N.getDebugLoc();
938  DEBUG({
939      dbgs() << "MatchAddress: ";
940      AM.dump();
941    });
942  // Limit recursion.
943  if (Depth > 5)
944    return MatchAddressBase(N, AM);
945
946  // If this is already a %rip relative address, we can only merge immediates
947  // into it.  Instead of handling this in every case, we handle it here.
948  // RIP relative addressing: %rip + 32-bit displacement!
949  if (AM.isRIPRelative()) {
950    // FIXME: JumpTable and ExternalSymbol address currently don't like
951    // displacements.  It isn't very important, but this should be fixed for
952    // consistency.
953    if (!AM.ES && AM.JT != -1) return true;
954
955    if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N))
956      if (!FoldOffsetIntoAddress(Cst->getSExtValue(), AM))
957        return false;
958    return true;
959  }
960
961  switch (N.getOpcode()) {
962  default: break;
963  case ISD::Constant: {
964    uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue();
965    if (!FoldOffsetIntoAddress(Val, AM))
966      return false;
967    break;
968  }
969
970  case X86ISD::Wrapper:
971  case X86ISD::WrapperRIP:
972    if (!MatchWrapper(N, AM))
973      return false;
974    break;
975
976  case ISD::LOAD:
977    if (!MatchLoadInAddress(cast<LoadSDNode>(N), AM))
978      return false;
979    break;
980
981  case ISD::FrameIndex:
982    if (AM.BaseType == X86ISelAddressMode::RegBase &&
983        AM.Base_Reg.getNode() == 0 &&
984        (!Subtarget->is64Bit() || isDispSafeForFrameIndex(AM.Disp))) {
985      AM.BaseType = X86ISelAddressMode::FrameIndexBase;
986      AM.Base_FrameIndex = cast<FrameIndexSDNode>(N)->getIndex();
987      return false;
988    }
989    break;
990
991  case ISD::SHL:
992    if (AM.IndexReg.getNode() != 0 || AM.Scale != 1)
993      break;
994
995    if (ConstantSDNode
996          *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1))) {
997      unsigned Val = CN->getZExtValue();
998      // Note that we handle x<<1 as (,x,2) rather than (x,x) here so
999      // that the base operand remains free for further matching. If
1000      // the base doesn't end up getting used, a post-processing step
1001      // in MatchAddress turns (,x,2) into (x,x), which is cheaper.
1002      if (Val == 1 || Val == 2 || Val == 3) {
1003        AM.Scale = 1 << Val;
1004        SDValue ShVal = N.getNode()->getOperand(0);
1005
1006        // Okay, we know that we have a scale by now.  However, if the scaled
1007        // value is an add of something and a constant, we can fold the
1008        // constant into the disp field here.
1009        if (CurDAG->isBaseWithConstantOffset(ShVal)) {
1010          AM.IndexReg = ShVal.getNode()->getOperand(0);
1011          ConstantSDNode *AddVal =
1012            cast<ConstantSDNode>(ShVal.getNode()->getOperand(1));
1013          uint64_t Disp = AddVal->getSExtValue() << Val;
1014          if (!FoldOffsetIntoAddress(Disp, AM))
1015            return false;
1016        }
1017
1018        AM.IndexReg = ShVal;
1019        return false;
1020      }
1021    break;
1022    }
1023
1024  case ISD::SRL: {
1025    // Scale must not be used already.
1026    if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) break;
1027
1028    SDValue And = N.getOperand(0);
1029    if (And.getOpcode() != ISD::AND) break;
1030    SDValue X = And.getOperand(0);
1031
1032    // We only handle up to 64-bit values here as those are what matter for
1033    // addressing mode optimizations.
1034    if (X.getValueSizeInBits() > 64) break;
1035
1036    // The mask used for the transform is expected to be post-shift, but we
1037    // found the shift first so just apply the shift to the mask before passing
1038    // it down.
1039    if (!isa<ConstantSDNode>(N.getOperand(1)) ||
1040        !isa<ConstantSDNode>(And.getOperand(1)))
1041      break;
1042    uint64_t Mask = And.getConstantOperandVal(1) >> N.getConstantOperandVal(1);
1043
1044    // Try to fold the mask and shift into the scale, and return false if we
1045    // succeed.
1046    if (!FoldMaskAndShiftToScale(*CurDAG, N, Mask, N, X, AM))
1047      return false;
1048    break;
1049  }
1050
1051  case ISD::SMUL_LOHI:
1052  case ISD::UMUL_LOHI:
1053    // A mul_lohi where we need the low part can be folded as a plain multiply.
1054    if (N.getResNo() != 0) break;
1055    // FALL THROUGH
1056  case ISD::MUL:
1057  case X86ISD::MUL_IMM:
1058    // X*[3,5,9] -> X+X*[2,4,8]
1059    if (AM.BaseType == X86ISelAddressMode::RegBase &&
1060        AM.Base_Reg.getNode() == 0 &&
1061        AM.IndexReg.getNode() == 0) {
1062      if (ConstantSDNode
1063            *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1)))
1064        if (CN->getZExtValue() == 3 || CN->getZExtValue() == 5 ||
1065            CN->getZExtValue() == 9) {
1066          AM.Scale = unsigned(CN->getZExtValue())-1;
1067
1068          SDValue MulVal = N.getNode()->getOperand(0);
1069          SDValue Reg;
1070
1071          // Okay, we know that we have a scale by now.  However, if the scaled
1072          // value is an add of something and a constant, we can fold the
1073          // constant into the disp field here.
1074          if (MulVal.getNode()->getOpcode() == ISD::ADD && MulVal.hasOneUse() &&
1075              isa<ConstantSDNode>(MulVal.getNode()->getOperand(1))) {
1076            Reg = MulVal.getNode()->getOperand(0);
1077            ConstantSDNode *AddVal =
1078              cast<ConstantSDNode>(MulVal.getNode()->getOperand(1));
1079            uint64_t Disp = AddVal->getSExtValue() * CN->getZExtValue();
1080            if (FoldOffsetIntoAddress(Disp, AM))
1081              Reg = N.getNode()->getOperand(0);
1082          } else {
1083            Reg = N.getNode()->getOperand(0);
1084          }
1085
1086          AM.IndexReg = AM.Base_Reg = Reg;
1087          return false;
1088        }
1089    }
1090    break;
1091
1092  case ISD::SUB: {
1093    // Given A-B, if A can be completely folded into the address and
1094    // the index field with the index field unused, use -B as the index.
1095    // This is a win if a has multiple parts that can be folded into
1096    // the address. Also, this saves a mov if the base register has
1097    // other uses, since it avoids a two-address sub instruction, however
1098    // it costs an additional mov if the index register has other uses.
1099
1100    // Add an artificial use to this node so that we can keep track of
1101    // it if it gets CSE'd with a different node.
1102    HandleSDNode Handle(N);
1103
1104    // Test if the LHS of the sub can be folded.
1105    X86ISelAddressMode Backup = AM;
1106    if (MatchAddressRecursively(N.getNode()->getOperand(0), AM, Depth+1)) {
1107      AM = Backup;
1108      break;
1109    }
1110    // Test if the index field is free for use.
1111    if (AM.IndexReg.getNode() || AM.isRIPRelative()) {
1112      AM = Backup;
1113      break;
1114    }
1115
1116    int Cost = 0;
1117    SDValue RHS = Handle.getValue().getNode()->getOperand(1);
1118    // If the RHS involves a register with multiple uses, this
1119    // transformation incurs an extra mov, due to the neg instruction
1120    // clobbering its operand.
1121    if (!RHS.getNode()->hasOneUse() ||
1122        RHS.getNode()->getOpcode() == ISD::CopyFromReg ||
1123        RHS.getNode()->getOpcode() == ISD::TRUNCATE ||
1124        RHS.getNode()->getOpcode() == ISD::ANY_EXTEND ||
1125        (RHS.getNode()->getOpcode() == ISD::ZERO_EXTEND &&
1126         RHS.getNode()->getOperand(0).getValueType() == MVT::i32))
1127      ++Cost;
1128    // If the base is a register with multiple uses, this
1129    // transformation may save a mov.
1130    if ((AM.BaseType == X86ISelAddressMode::RegBase &&
1131         AM.Base_Reg.getNode() &&
1132         !AM.Base_Reg.getNode()->hasOneUse()) ||
1133        AM.BaseType == X86ISelAddressMode::FrameIndexBase)
1134      --Cost;
1135    // If the folded LHS was interesting, this transformation saves
1136    // address arithmetic.
1137    if ((AM.hasSymbolicDisplacement() && !Backup.hasSymbolicDisplacement()) +
1138        ((AM.Disp != 0) && (Backup.Disp == 0)) +
1139        (AM.Segment.getNode() && !Backup.Segment.getNode()) >= 2)
1140      --Cost;
1141    // If it doesn't look like it may be an overall win, don't do it.
1142    if (Cost >= 0) {
1143      AM = Backup;
1144      break;
1145    }
1146
1147    // Ok, the transformation is legal and appears profitable. Go for it.
1148    SDValue Zero = CurDAG->getConstant(0, N.getValueType());
1149    SDValue Neg = CurDAG->getNode(ISD::SUB, dl, N.getValueType(), Zero, RHS);
1150    AM.IndexReg = Neg;
1151    AM.Scale = 1;
1152
1153    // Insert the new nodes into the topological ordering.
1154    InsertDAGNode(*CurDAG, N, Zero);
1155    InsertDAGNode(*CurDAG, N, Neg);
1156    return false;
1157  }
1158
1159  case ISD::ADD: {
1160    // Add an artificial use to this node so that we can keep track of
1161    // it if it gets CSE'd with a different node.
1162    HandleSDNode Handle(N);
1163
1164    X86ISelAddressMode Backup = AM;
1165    if (!MatchAddressRecursively(N.getOperand(0), AM, Depth+1) &&
1166        !MatchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1))
1167      return false;
1168    AM = Backup;
1169
1170    // Try again after commuting the operands.
1171    if (!MatchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1)&&
1172        !MatchAddressRecursively(Handle.getValue().getOperand(0), AM, Depth+1))
1173      return false;
1174    AM = Backup;
1175
1176    // If we couldn't fold both operands into the address at the same time,
1177    // see if we can just put each operand into a register and fold at least
1178    // the add.
1179    if (AM.BaseType == X86ISelAddressMode::RegBase &&
1180        !AM.Base_Reg.getNode() &&
1181        !AM.IndexReg.getNode()) {
1182      N = Handle.getValue();
1183      AM.Base_Reg = N.getOperand(0);
1184      AM.IndexReg = N.getOperand(1);
1185      AM.Scale = 1;
1186      return false;
1187    }
1188    N = Handle.getValue();
1189    break;
1190  }
1191
1192  case ISD::OR:
1193    // Handle "X | C" as "X + C" iff X is known to have C bits clear.
1194    if (CurDAG->isBaseWithConstantOffset(N)) {
1195      X86ISelAddressMode Backup = AM;
1196      ConstantSDNode *CN = cast<ConstantSDNode>(N.getOperand(1));
1197
1198      // Start with the LHS as an addr mode.
1199      if (!MatchAddressRecursively(N.getOperand(0), AM, Depth+1) &&
1200          !FoldOffsetIntoAddress(CN->getSExtValue(), AM))
1201        return false;
1202      AM = Backup;
1203    }
1204    break;
1205
1206  case ISD::AND: {
1207    // Perform some heroic transforms on an and of a constant-count shift
1208    // with a constant to enable use of the scaled offset field.
1209
1210    // Scale must not be used already.
1211    if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) break;
1212
1213    SDValue Shift = N.getOperand(0);
1214    if (Shift.getOpcode() != ISD::SRL && Shift.getOpcode() != ISD::SHL) break;
1215    SDValue X = Shift.getOperand(0);
1216
1217    // We only handle up to 64-bit values here as those are what matter for
1218    // addressing mode optimizations.
1219    if (X.getValueSizeInBits() > 64) break;
1220
1221    if (!isa<ConstantSDNode>(N.getOperand(1)))
1222      break;
1223    uint64_t Mask = N.getConstantOperandVal(1);
1224
1225    // Try to fold the mask and shift into an extract and scale.
1226    if (!FoldMaskAndShiftToExtract(*CurDAG, N, Mask, Shift, X, AM))
1227      return false;
1228
1229    // Try to fold the mask and shift directly into the scale.
1230    if (!FoldMaskAndShiftToScale(*CurDAG, N, Mask, Shift, X, AM))
1231      return false;
1232
1233    // Try to swap the mask and shift to place shifts which can be done as
1234    // a scale on the outside of the mask.
1235    if (!FoldMaskedShiftToScaledMask(*CurDAG, N, Mask, Shift, X, AM))
1236      return false;
1237    break;
1238  }
1239  }
1240
1241  return MatchAddressBase(N, AM);
1242}
1243
1244/// MatchAddressBase - Helper for MatchAddress. Add the specified node to the
1245/// specified addressing mode without any further recursion.
1246bool X86DAGToDAGISel::MatchAddressBase(SDValue N, X86ISelAddressMode &AM) {
1247  // Is the base register already occupied?
1248  if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base_Reg.getNode()) {
1249    // If so, check to see if the scale index register is set.
1250    if (AM.IndexReg.getNode() == 0) {
1251      AM.IndexReg = N;
1252      AM.Scale = 1;
1253      return false;
1254    }
1255
1256    // Otherwise, we cannot select it.
1257    return true;
1258  }
1259
1260  // Default, generate it as a register.
1261  AM.BaseType = X86ISelAddressMode::RegBase;
1262  AM.Base_Reg = N;
1263  return false;
1264}
1265
1266/// SelectAddr - returns true if it is able pattern match an addressing mode.
1267/// It returns the operands which make up the maximal addressing mode it can
1268/// match by reference.
1269///
1270/// Parent is the parent node of the addr operand that is being matched.  It
1271/// is always a load, store, atomic node, or null.  It is only null when
1272/// checking memory operands for inline asm nodes.
1273bool X86DAGToDAGISel::SelectAddr(SDNode *Parent, SDValue N, SDValue &Base,
1274                                 SDValue &Scale, SDValue &Index,
1275                                 SDValue &Disp, SDValue &Segment) {
1276  X86ISelAddressMode AM;
1277
1278  if (Parent &&
1279      // This list of opcodes are all the nodes that have an "addr:$ptr" operand
1280      // that are not a MemSDNode, and thus don't have proper addrspace info.
1281      Parent->getOpcode() != ISD::INTRINSIC_W_CHAIN && // unaligned loads, fixme
1282      Parent->getOpcode() != ISD::INTRINSIC_VOID && // nontemporal stores
1283      Parent->getOpcode() != X86ISD::TLSCALL) { // Fixme
1284    unsigned AddrSpace =
1285      cast<MemSDNode>(Parent)->getPointerInfo().getAddrSpace();
1286    // AddrSpace 256 -> GS, 257 -> FS.
1287    if (AddrSpace == 256)
1288      AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
1289    if (AddrSpace == 257)
1290      AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
1291  }
1292
1293  if (MatchAddress(N, AM))
1294    return false;
1295
1296  EVT VT = N.getValueType();
1297  if (AM.BaseType == X86ISelAddressMode::RegBase) {
1298    if (!AM.Base_Reg.getNode())
1299      AM.Base_Reg = CurDAG->getRegister(0, VT);
1300  }
1301
1302  if (!AM.IndexReg.getNode())
1303    AM.IndexReg = CurDAG->getRegister(0, VT);
1304
1305  getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
1306  return true;
1307}
1308
1309/// SelectScalarSSELoad - Match a scalar SSE load.  In particular, we want to
1310/// match a load whose top elements are either undef or zeros.  The load flavor
1311/// is derived from the type of N, which is either v4f32 or v2f64.
1312///
1313/// We also return:
1314///   PatternChainNode: this is the matched node that has a chain input and
1315///   output.
1316bool X86DAGToDAGISel::SelectScalarSSELoad(SDNode *Root,
1317                                          SDValue N, SDValue &Base,
1318                                          SDValue &Scale, SDValue &Index,
1319                                          SDValue &Disp, SDValue &Segment,
1320                                          SDValue &PatternNodeWithChain) {
1321  if (N.getOpcode() == ISD::SCALAR_TO_VECTOR) {
1322    PatternNodeWithChain = N.getOperand(0);
1323    if (ISD::isNON_EXTLoad(PatternNodeWithChain.getNode()) &&
1324        PatternNodeWithChain.hasOneUse() &&
1325        IsProfitableToFold(N.getOperand(0), N.getNode(), Root) &&
1326        IsLegalToFold(N.getOperand(0), N.getNode(), Root, OptLevel)) {
1327      LoadSDNode *LD = cast<LoadSDNode>(PatternNodeWithChain);
1328      if (!SelectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp, Segment))
1329        return false;
1330      return true;
1331    }
1332  }
1333
1334  // Also handle the case where we explicitly require zeros in the top
1335  // elements.  This is a vector shuffle from the zero vector.
1336  if (N.getOpcode() == X86ISD::VZEXT_MOVL && N.getNode()->hasOneUse() &&
1337      // Check to see if the top elements are all zeros (or bitcast of zeros).
1338      N.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR &&
1339      N.getOperand(0).getNode()->hasOneUse() &&
1340      ISD::isNON_EXTLoad(N.getOperand(0).getOperand(0).getNode()) &&
1341      N.getOperand(0).getOperand(0).hasOneUse() &&
1342      IsProfitableToFold(N.getOperand(0), N.getNode(), Root) &&
1343      IsLegalToFold(N.getOperand(0), N.getNode(), Root, OptLevel)) {
1344    // Okay, this is a zero extending load.  Fold it.
1345    LoadSDNode *LD = cast<LoadSDNode>(N.getOperand(0).getOperand(0));
1346    if (!SelectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp, Segment))
1347      return false;
1348    PatternNodeWithChain = SDValue(LD, 0);
1349    return true;
1350  }
1351  return false;
1352}
1353
1354
1355/// SelectLEAAddr - it calls SelectAddr and determines if the maximal addressing
1356/// mode it matches can be cost effectively emitted as an LEA instruction.
1357bool X86DAGToDAGISel::SelectLEAAddr(SDValue N,
1358                                    SDValue &Base, SDValue &Scale,
1359                                    SDValue &Index, SDValue &Disp,
1360                                    SDValue &Segment) {
1361  X86ISelAddressMode AM;
1362
1363  // Set AM.Segment to prevent MatchAddress from using one. LEA doesn't support
1364  // segments.
1365  SDValue Copy = AM.Segment;
1366  SDValue T = CurDAG->getRegister(0, MVT::i32);
1367  AM.Segment = T;
1368  if (MatchAddress(N, AM))
1369    return false;
1370  assert (T == AM.Segment);
1371  AM.Segment = Copy;
1372
1373  EVT VT = N.getValueType();
1374  unsigned Complexity = 0;
1375  if (AM.BaseType == X86ISelAddressMode::RegBase)
1376    if (AM.Base_Reg.getNode())
1377      Complexity = 1;
1378    else
1379      AM.Base_Reg = CurDAG->getRegister(0, VT);
1380  else if (AM.BaseType == X86ISelAddressMode::FrameIndexBase)
1381    Complexity = 4;
1382
1383  if (AM.IndexReg.getNode())
1384    Complexity++;
1385  else
1386    AM.IndexReg = CurDAG->getRegister(0, VT);
1387
1388  // Don't match just leal(,%reg,2). It's cheaper to do addl %reg, %reg, or with
1389  // a simple shift.
1390  if (AM.Scale > 1)
1391    Complexity++;
1392
1393  // FIXME: We are artificially lowering the criteria to turn ADD %reg, $GA
1394  // to a LEA. This is determined with some expermentation but is by no means
1395  // optimal (especially for code size consideration). LEA is nice because of
1396  // its three-address nature. Tweak the cost function again when we can run
1397  // convertToThreeAddress() at register allocation time.
1398  if (AM.hasSymbolicDisplacement()) {
1399    // For X86-64, we should always use lea to materialize RIP relative
1400    // addresses.
1401    if (Subtarget->is64Bit())
1402      Complexity = 4;
1403    else
1404      Complexity += 2;
1405  }
1406
1407  if (AM.Disp && (AM.Base_Reg.getNode() || AM.IndexReg.getNode()))
1408    Complexity++;
1409
1410  // If it isn't worth using an LEA, reject it.
1411  if (Complexity <= 2)
1412    return false;
1413
1414  getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
1415  return true;
1416}
1417
1418/// SelectTLSADDRAddr - This is only run on TargetGlobalTLSAddress nodes.
1419bool X86DAGToDAGISel::SelectTLSADDRAddr(SDValue N, SDValue &Base,
1420                                        SDValue &Scale, SDValue &Index,
1421                                        SDValue &Disp, SDValue &Segment) {
1422  assert(N.getOpcode() == ISD::TargetGlobalTLSAddress);
1423  const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
1424
1425  X86ISelAddressMode AM;
1426  AM.GV = GA->getGlobal();
1427  AM.Disp += GA->getOffset();
1428  AM.Base_Reg = CurDAG->getRegister(0, N.getValueType());
1429  AM.SymbolFlags = GA->getTargetFlags();
1430
1431  if (N.getValueType() == MVT::i32) {
1432    AM.Scale = 1;
1433    AM.IndexReg = CurDAG->getRegister(X86::EBX, MVT::i32);
1434  } else {
1435    AM.IndexReg = CurDAG->getRegister(0, MVT::i64);
1436  }
1437
1438  getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
1439  return true;
1440}
1441
1442
1443bool X86DAGToDAGISel::TryFoldLoad(SDNode *P, SDValue N,
1444                                  SDValue &Base, SDValue &Scale,
1445                                  SDValue &Index, SDValue &Disp,
1446                                  SDValue &Segment) {
1447  if (!ISD::isNON_EXTLoad(N.getNode()) ||
1448      !IsProfitableToFold(N, P, P) ||
1449      !IsLegalToFold(N, P, P, OptLevel))
1450    return false;
1451
1452  return SelectAddr(N.getNode(),
1453                    N.getOperand(1), Base, Scale, Index, Disp, Segment);
1454}
1455
1456/// getGlobalBaseReg - Return an SDNode that returns the value of
1457/// the global base register. Output instructions required to
1458/// initialize the global base register, if necessary.
1459///
1460SDNode *X86DAGToDAGISel::getGlobalBaseReg() {
1461  unsigned GlobalBaseReg = getInstrInfo()->getGlobalBaseReg(MF);
1462  return CurDAG->getRegister(GlobalBaseReg, TLI.getPointerTy()).getNode();
1463}
1464
1465SDNode *X86DAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) {
1466  SDValue Chain = Node->getOperand(0);
1467  SDValue In1 = Node->getOperand(1);
1468  SDValue In2L = Node->getOperand(2);
1469  SDValue In2H = Node->getOperand(3);
1470  SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
1471  if (!SelectAddr(Node, In1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4))
1472    return NULL;
1473  MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1474  MemOp[0] = cast<MemSDNode>(Node)->getMemOperand();
1475  const SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, In2L, In2H, Chain};
1476  SDNode *ResNode = CurDAG->getMachineNode(Opc, Node->getDebugLoc(),
1477                                           MVT::i32, MVT::i32, MVT::Other, Ops,
1478                                           array_lengthof(Ops));
1479  cast<MachineSDNode>(ResNode)->setMemRefs(MemOp, MemOp + 1);
1480  return ResNode;
1481}
1482
1483// FIXME: Figure out some way to unify this with the 'or' and other code
1484// below.
1485SDNode *X86DAGToDAGISel::SelectAtomicLoadAdd(SDNode *Node, EVT NVT) {
1486  if (Node->hasAnyUseOfValue(0))
1487    return 0;
1488
1489  // Optimize common patterns for __sync_add_and_fetch and
1490  // __sync_sub_and_fetch where the result is not used. This allows us
1491  // to use "lock" version of add, sub, inc, dec instructions.
1492  // FIXME: Do not use special instructions but instead add the "lock"
1493  // prefix to the target node somehow. The extra information will then be
1494  // transferred to machine instruction and it denotes the prefix.
1495  SDValue Chain = Node->getOperand(0);
1496  SDValue Ptr = Node->getOperand(1);
1497  SDValue Val = Node->getOperand(2);
1498  SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
1499  if (!SelectAddr(Node, Ptr, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4))
1500    return 0;
1501
1502  bool isInc = false, isDec = false, isSub = false, isCN = false;
1503  ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Val);
1504  if (CN && CN->getSExtValue() == (int32_t)CN->getSExtValue()) {
1505    isCN = true;
1506    int64_t CNVal = CN->getSExtValue();
1507    if (CNVal == 1)
1508      isInc = true;
1509    else if (CNVal == -1)
1510      isDec = true;
1511    else if (CNVal >= 0)
1512      Val = CurDAG->getTargetConstant(CNVal, NVT);
1513    else {
1514      isSub = true;
1515      Val = CurDAG->getTargetConstant(-CNVal, NVT);
1516    }
1517  } else if (Val.hasOneUse() &&
1518             Val.getOpcode() == ISD::SUB &&
1519             X86::isZeroNode(Val.getOperand(0))) {
1520    isSub = true;
1521    Val = Val.getOperand(1);
1522  }
1523
1524  DebugLoc dl = Node->getDebugLoc();
1525  unsigned Opc = 0;
1526  switch (NVT.getSimpleVT().SimpleTy) {
1527  default: return 0;
1528  case MVT::i8:
1529    if (isInc)
1530      Opc = X86::LOCK_INC8m;
1531    else if (isDec)
1532      Opc = X86::LOCK_DEC8m;
1533    else if (isSub) {
1534      if (isCN)
1535        Opc = X86::LOCK_SUB8mi;
1536      else
1537        Opc = X86::LOCK_SUB8mr;
1538    } else {
1539      if (isCN)
1540        Opc = X86::LOCK_ADD8mi;
1541      else
1542        Opc = X86::LOCK_ADD8mr;
1543    }
1544    break;
1545  case MVT::i16:
1546    if (isInc)
1547      Opc = X86::LOCK_INC16m;
1548    else if (isDec)
1549      Opc = X86::LOCK_DEC16m;
1550    else if (isSub) {
1551      if (isCN) {
1552        if (immSext8(Val.getNode()))
1553          Opc = X86::LOCK_SUB16mi8;
1554        else
1555          Opc = X86::LOCK_SUB16mi;
1556      } else
1557        Opc = X86::LOCK_SUB16mr;
1558    } else {
1559      if (isCN) {
1560        if (immSext8(Val.getNode()))
1561          Opc = X86::LOCK_ADD16mi8;
1562        else
1563          Opc = X86::LOCK_ADD16mi;
1564      } else
1565        Opc = X86::LOCK_ADD16mr;
1566    }
1567    break;
1568  case MVT::i32:
1569    if (isInc)
1570      Opc = X86::LOCK_INC32m;
1571    else if (isDec)
1572      Opc = X86::LOCK_DEC32m;
1573    else if (isSub) {
1574      if (isCN) {
1575        if (immSext8(Val.getNode()))
1576          Opc = X86::LOCK_SUB32mi8;
1577        else
1578          Opc = X86::LOCK_SUB32mi;
1579      } else
1580        Opc = X86::LOCK_SUB32mr;
1581    } else {
1582      if (isCN) {
1583        if (immSext8(Val.getNode()))
1584          Opc = X86::LOCK_ADD32mi8;
1585        else
1586          Opc = X86::LOCK_ADD32mi;
1587      } else
1588        Opc = X86::LOCK_ADD32mr;
1589    }
1590    break;
1591  case MVT::i64:
1592    if (isInc)
1593      Opc = X86::LOCK_INC64m;
1594    else if (isDec)
1595      Opc = X86::LOCK_DEC64m;
1596    else if (isSub) {
1597      Opc = X86::LOCK_SUB64mr;
1598      if (isCN) {
1599        if (immSext8(Val.getNode()))
1600          Opc = X86::LOCK_SUB64mi8;
1601        else if (i64immSExt32(Val.getNode()))
1602          Opc = X86::LOCK_SUB64mi32;
1603      }
1604    } else {
1605      Opc = X86::LOCK_ADD64mr;
1606      if (isCN) {
1607        if (immSext8(Val.getNode()))
1608          Opc = X86::LOCK_ADD64mi8;
1609        else if (i64immSExt32(Val.getNode()))
1610          Opc = X86::LOCK_ADD64mi32;
1611      }
1612    }
1613    break;
1614  }
1615
1616  SDValue Undef = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
1617                                                 dl, NVT), 0);
1618  MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1619  MemOp[0] = cast<MemSDNode>(Node)->getMemOperand();
1620  if (isInc || isDec) {
1621    SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Chain };
1622    SDValue Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 6), 0);
1623    cast<MachineSDNode>(Ret)->setMemRefs(MemOp, MemOp + 1);
1624    SDValue RetVals[] = { Undef, Ret };
1625    return CurDAG->getMergeValues(RetVals, 2, dl).getNode();
1626  } else {
1627    SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Val, Chain };
1628    SDValue Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 7), 0);
1629    cast<MachineSDNode>(Ret)->setMemRefs(MemOp, MemOp + 1);
1630    SDValue RetVals[] = { Undef, Ret };
1631    return CurDAG->getMergeValues(RetVals, 2, dl).getNode();
1632  }
1633}
1634
1635enum AtomicOpc {
1636  OR,
1637  AND,
1638  XOR,
1639  AtomicOpcEnd
1640};
1641
1642enum AtomicSz {
1643  ConstantI8,
1644  I8,
1645  SextConstantI16,
1646  ConstantI16,
1647  I16,
1648  SextConstantI32,
1649  ConstantI32,
1650  I32,
1651  SextConstantI64,
1652  ConstantI64,
1653  I64,
1654  AtomicSzEnd
1655};
1656
1657static const unsigned int AtomicOpcTbl[AtomicOpcEnd][AtomicSzEnd] = {
1658  {
1659    X86::LOCK_OR8mi,
1660    X86::LOCK_OR8mr,
1661    X86::LOCK_OR16mi8,
1662    X86::LOCK_OR16mi,
1663    X86::LOCK_OR16mr,
1664    X86::LOCK_OR32mi8,
1665    X86::LOCK_OR32mi,
1666    X86::LOCK_OR32mr,
1667    X86::LOCK_OR64mi8,
1668    X86::LOCK_OR64mi32,
1669    X86::LOCK_OR64mr
1670  },
1671  {
1672    X86::LOCK_AND8mi,
1673    X86::LOCK_AND8mr,
1674    X86::LOCK_AND16mi8,
1675    X86::LOCK_AND16mi,
1676    X86::LOCK_AND16mr,
1677    X86::LOCK_AND32mi8,
1678    X86::LOCK_AND32mi,
1679    X86::LOCK_AND32mr,
1680    X86::LOCK_AND64mi8,
1681    X86::LOCK_AND64mi32,
1682    X86::LOCK_AND64mr
1683  },
1684  {
1685    X86::LOCK_XOR8mi,
1686    X86::LOCK_XOR8mr,
1687    X86::LOCK_XOR16mi8,
1688    X86::LOCK_XOR16mi,
1689    X86::LOCK_XOR16mr,
1690    X86::LOCK_XOR32mi8,
1691    X86::LOCK_XOR32mi,
1692    X86::LOCK_XOR32mr,
1693    X86::LOCK_XOR64mi8,
1694    X86::LOCK_XOR64mi32,
1695    X86::LOCK_XOR64mr
1696  }
1697};
1698
1699SDNode *X86DAGToDAGISel::SelectAtomicLoadArith(SDNode *Node, EVT NVT) {
1700  if (Node->hasAnyUseOfValue(0))
1701    return 0;
1702
1703  // Optimize common patterns for __sync_or_and_fetch and similar arith
1704  // operations where the result is not used. This allows us to use the "lock"
1705  // version of the arithmetic instruction.
1706  // FIXME: Same as for 'add' and 'sub', try to merge those down here.
1707  SDValue Chain = Node->getOperand(0);
1708  SDValue Ptr = Node->getOperand(1);
1709  SDValue Val = Node->getOperand(2);
1710  SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
1711  if (!SelectAddr(Node, Ptr, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4))
1712    return 0;
1713
1714  // Which index into the table.
1715  enum AtomicOpc Op;
1716  switch (Node->getOpcode()) {
1717    case ISD::ATOMIC_LOAD_OR:
1718      Op = OR;
1719      break;
1720    case ISD::ATOMIC_LOAD_AND:
1721      Op = AND;
1722      break;
1723    case ISD::ATOMIC_LOAD_XOR:
1724      Op = XOR;
1725      break;
1726    default:
1727      return 0;
1728  }
1729
1730  bool isCN = false;
1731  ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Val);
1732  if (CN && (int32_t)CN->getSExtValue() == CN->getSExtValue()) {
1733    isCN = true;
1734    Val = CurDAG->getTargetConstant(CN->getSExtValue(), NVT);
1735  }
1736
1737  unsigned Opc = 0;
1738  switch (NVT.getSimpleVT().SimpleTy) {
1739    default: return 0;
1740    case MVT::i8:
1741      if (isCN)
1742        Opc = AtomicOpcTbl[Op][ConstantI8];
1743      else
1744        Opc = AtomicOpcTbl[Op][I8];
1745      break;
1746    case MVT::i16:
1747      if (isCN) {
1748        if (immSext8(Val.getNode()))
1749          Opc = AtomicOpcTbl[Op][SextConstantI16];
1750        else
1751          Opc = AtomicOpcTbl[Op][ConstantI16];
1752      } else
1753        Opc = AtomicOpcTbl[Op][I16];
1754      break;
1755    case MVT::i32:
1756      if (isCN) {
1757        if (immSext8(Val.getNode()))
1758          Opc = AtomicOpcTbl[Op][SextConstantI32];
1759        else
1760          Opc = AtomicOpcTbl[Op][ConstantI32];
1761      } else
1762        Opc = AtomicOpcTbl[Op][I32];
1763      break;
1764    case MVT::i64:
1765      Opc = AtomicOpcTbl[Op][I64];
1766      if (isCN) {
1767        if (immSext8(Val.getNode()))
1768          Opc = AtomicOpcTbl[Op][SextConstantI64];
1769        else if (i64immSExt32(Val.getNode()))
1770          Opc = AtomicOpcTbl[Op][ConstantI64];
1771      }
1772      break;
1773  }
1774
1775  assert(Opc != 0 && "Invalid arith lock transform!");
1776
1777  DebugLoc dl = Node->getDebugLoc();
1778  SDValue Undef = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
1779                                                 dl, NVT), 0);
1780  MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1781  MemOp[0] = cast<MemSDNode>(Node)->getMemOperand();
1782  SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Val, Chain };
1783  SDValue Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 7), 0);
1784  cast<MachineSDNode>(Ret)->setMemRefs(MemOp, MemOp + 1);
1785  SDValue RetVals[] = { Undef, Ret };
1786  return CurDAG->getMergeValues(RetVals, 2, dl).getNode();
1787}
1788
1789/// HasNoSignedComparisonUses - Test whether the given X86ISD::CMP node has
1790/// any uses which require the SF or OF bits to be accurate.
1791static bool HasNoSignedComparisonUses(SDNode *N) {
1792  // Examine each user of the node.
1793  for (SDNode::use_iterator UI = N->use_begin(),
1794         UE = N->use_end(); UI != UE; ++UI) {
1795    // Only examine CopyToReg uses.
1796    if (UI->getOpcode() != ISD::CopyToReg)
1797      return false;
1798    // Only examine CopyToReg uses that copy to EFLAGS.
1799    if (cast<RegisterSDNode>(UI->getOperand(1))->getReg() !=
1800          X86::EFLAGS)
1801      return false;
1802    // Examine each user of the CopyToReg use.
1803    for (SDNode::use_iterator FlagUI = UI->use_begin(),
1804           FlagUE = UI->use_end(); FlagUI != FlagUE; ++FlagUI) {
1805      // Only examine the Flag result.
1806      if (FlagUI.getUse().getResNo() != 1) continue;
1807      // Anything unusual: assume conservatively.
1808      if (!FlagUI->isMachineOpcode()) return false;
1809      // Examine the opcode of the user.
1810      switch (FlagUI->getMachineOpcode()) {
1811      // These comparisons don't treat the most significant bit specially.
1812      case X86::SETAr: case X86::SETAEr: case X86::SETBr: case X86::SETBEr:
1813      case X86::SETEr: case X86::SETNEr: case X86::SETPr: case X86::SETNPr:
1814      case X86::SETAm: case X86::SETAEm: case X86::SETBm: case X86::SETBEm:
1815      case X86::SETEm: case X86::SETNEm: case X86::SETPm: case X86::SETNPm:
1816      case X86::JA_4: case X86::JAE_4: case X86::JB_4: case X86::JBE_4:
1817      case X86::JE_4: case X86::JNE_4: case X86::JP_4: case X86::JNP_4:
1818      case X86::CMOVA16rr: case X86::CMOVA16rm:
1819      case X86::CMOVA32rr: case X86::CMOVA32rm:
1820      case X86::CMOVA64rr: case X86::CMOVA64rm:
1821      case X86::CMOVAE16rr: case X86::CMOVAE16rm:
1822      case X86::CMOVAE32rr: case X86::CMOVAE32rm:
1823      case X86::CMOVAE64rr: case X86::CMOVAE64rm:
1824      case X86::CMOVB16rr: case X86::CMOVB16rm:
1825      case X86::CMOVB32rr: case X86::CMOVB32rm:
1826      case X86::CMOVB64rr: case X86::CMOVB64rm:
1827      case X86::CMOVBE16rr: case X86::CMOVBE16rm:
1828      case X86::CMOVBE32rr: case X86::CMOVBE32rm:
1829      case X86::CMOVBE64rr: case X86::CMOVBE64rm:
1830      case X86::CMOVE16rr: case X86::CMOVE16rm:
1831      case X86::CMOVE32rr: case X86::CMOVE32rm:
1832      case X86::CMOVE64rr: case X86::CMOVE64rm:
1833      case X86::CMOVNE16rr: case X86::CMOVNE16rm:
1834      case X86::CMOVNE32rr: case X86::CMOVNE32rm:
1835      case X86::CMOVNE64rr: case X86::CMOVNE64rm:
1836      case X86::CMOVNP16rr: case X86::CMOVNP16rm:
1837      case X86::CMOVNP32rr: case X86::CMOVNP32rm:
1838      case X86::CMOVNP64rr: case X86::CMOVNP64rm:
1839      case X86::CMOVP16rr: case X86::CMOVP16rm:
1840      case X86::CMOVP32rr: case X86::CMOVP32rm:
1841      case X86::CMOVP64rr: case X86::CMOVP64rm:
1842        continue;
1843      // Anything else: assume conservatively.
1844      default: return false;
1845      }
1846    }
1847  }
1848  return true;
1849}
1850
1851SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
1852  EVT NVT = Node->getValueType(0);
1853  unsigned Opc, MOpc;
1854  unsigned Opcode = Node->getOpcode();
1855  DebugLoc dl = Node->getDebugLoc();
1856
1857  DEBUG(dbgs() << "Selecting: "; Node->dump(CurDAG); dbgs() << '\n');
1858
1859  if (Node->isMachineOpcode()) {
1860    DEBUG(dbgs() << "== ";  Node->dump(CurDAG); dbgs() << '\n');
1861    return NULL;   // Already selected.
1862  }
1863
1864  switch (Opcode) {
1865  default: break;
1866  case X86ISD::GlobalBaseReg:
1867    return getGlobalBaseReg();
1868
1869  case X86ISD::ATOMOR64_DAG:
1870    return SelectAtomic64(Node, X86::ATOMOR6432);
1871  case X86ISD::ATOMXOR64_DAG:
1872    return SelectAtomic64(Node, X86::ATOMXOR6432);
1873  case X86ISD::ATOMADD64_DAG:
1874    return SelectAtomic64(Node, X86::ATOMADD6432);
1875  case X86ISD::ATOMSUB64_DAG:
1876    return SelectAtomic64(Node, X86::ATOMSUB6432);
1877  case X86ISD::ATOMNAND64_DAG:
1878    return SelectAtomic64(Node, X86::ATOMNAND6432);
1879  case X86ISD::ATOMAND64_DAG:
1880    return SelectAtomic64(Node, X86::ATOMAND6432);
1881  case X86ISD::ATOMSWAP64_DAG:
1882    return SelectAtomic64(Node, X86::ATOMSWAP6432);
1883
1884  case ISD::ATOMIC_LOAD_ADD: {
1885    SDNode *RetVal = SelectAtomicLoadAdd(Node, NVT);
1886    if (RetVal)
1887      return RetVal;
1888    break;
1889  }
1890  case ISD::ATOMIC_LOAD_XOR:
1891  case ISD::ATOMIC_LOAD_AND:
1892  case ISD::ATOMIC_LOAD_OR: {
1893    SDNode *RetVal = SelectAtomicLoadArith(Node, NVT);
1894    if (RetVal)
1895      return RetVal;
1896    break;
1897  }
1898  case ISD::AND:
1899  case ISD::OR:
1900  case ISD::XOR: {
1901    // For operations of the form (x << C1) op C2, check if we can use a smaller
1902    // encoding for C2 by transforming it into (x op (C2>>C1)) << C1.
1903    SDValue N0 = Node->getOperand(0);
1904    SDValue N1 = Node->getOperand(1);
1905
1906    if (N0->getOpcode() != ISD::SHL || !N0->hasOneUse())
1907      break;
1908
1909    // i8 is unshrinkable, i16 should be promoted to i32.
1910    if (NVT != MVT::i32 && NVT != MVT::i64)
1911      break;
1912
1913    ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N1);
1914    ConstantSDNode *ShlCst = dyn_cast<ConstantSDNode>(N0->getOperand(1));
1915    if (!Cst || !ShlCst)
1916      break;
1917
1918    int64_t Val = Cst->getSExtValue();
1919    uint64_t ShlVal = ShlCst->getZExtValue();
1920
1921    // Make sure that we don't change the operation by removing bits.
1922    // This only matters for OR and XOR, AND is unaffected.
1923    if (Opcode != ISD::AND && ((Val >> ShlVal) << ShlVal) != Val)
1924      break;
1925
1926    unsigned ShlOp, Op = 0;
1927    EVT CstVT = NVT;
1928
1929    // Check the minimum bitwidth for the new constant.
1930    // TODO: AND32ri is the same as AND64ri32 with zext imm.
1931    // TODO: MOV32ri+OR64r is cheaper than MOV64ri64+OR64rr
1932    // TODO: Using 16 and 8 bit operations is also possible for or32 & xor32.
1933    if (!isInt<8>(Val) && isInt<8>(Val >> ShlVal))
1934      CstVT = MVT::i8;
1935    else if (!isInt<32>(Val) && isInt<32>(Val >> ShlVal))
1936      CstVT = MVT::i32;
1937
1938    // Bail if there is no smaller encoding.
1939    if (NVT == CstVT)
1940      break;
1941
1942    switch (NVT.getSimpleVT().SimpleTy) {
1943    default: llvm_unreachable("Unsupported VT!");
1944    case MVT::i32:
1945      assert(CstVT == MVT::i8);
1946      ShlOp = X86::SHL32ri;
1947
1948      switch (Opcode) {
1949      case ISD::AND: Op = X86::AND32ri8; break;
1950      case ISD::OR:  Op =  X86::OR32ri8; break;
1951      case ISD::XOR: Op = X86::XOR32ri8; break;
1952      }
1953      break;
1954    case MVT::i64:
1955      assert(CstVT == MVT::i8 || CstVT == MVT::i32);
1956      ShlOp = X86::SHL64ri;
1957
1958      switch (Opcode) {
1959      case ISD::AND: Op = CstVT==MVT::i8? X86::AND64ri8 : X86::AND64ri32; break;
1960      case ISD::OR:  Op = CstVT==MVT::i8?  X86::OR64ri8 :  X86::OR64ri32; break;
1961      case ISD::XOR: Op = CstVT==MVT::i8? X86::XOR64ri8 : X86::XOR64ri32; break;
1962      }
1963      break;
1964    }
1965
1966    // Emit the smaller op and the shift.
1967    SDValue NewCst = CurDAG->getTargetConstant(Val >> ShlVal, CstVT);
1968    SDNode *New = CurDAG->getMachineNode(Op, dl, NVT, N0->getOperand(0),NewCst);
1969    return CurDAG->SelectNodeTo(Node, ShlOp, NVT, SDValue(New, 0),
1970                                getI8Imm(ShlVal));
1971  }
1972  case X86ISD::UMUL: {
1973    SDValue N0 = Node->getOperand(0);
1974    SDValue N1 = Node->getOperand(1);
1975
1976    unsigned LoReg;
1977    switch (NVT.getSimpleVT().SimpleTy) {
1978    default: llvm_unreachable("Unsupported VT!");
1979    case MVT::i8:  LoReg = X86::AL;  Opc = X86::MUL8r; break;
1980    case MVT::i16: LoReg = X86::AX;  Opc = X86::MUL16r; break;
1981    case MVT::i32: LoReg = X86::EAX; Opc = X86::MUL32r; break;
1982    case MVT::i64: LoReg = X86::RAX; Opc = X86::MUL64r; break;
1983    }
1984
1985    SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg,
1986                                          N0, SDValue()).getValue(1);
1987
1988    SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::i32);
1989    SDValue Ops[] = {N1, InFlag};
1990    SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops, 2);
1991
1992    ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0));
1993    ReplaceUses(SDValue(Node, 1), SDValue(CNode, 1));
1994    ReplaceUses(SDValue(Node, 2), SDValue(CNode, 2));
1995    return NULL;
1996  }
1997
1998  case ISD::SMUL_LOHI:
1999  case ISD::UMUL_LOHI: {
2000    SDValue N0 = Node->getOperand(0);
2001    SDValue N1 = Node->getOperand(1);
2002
2003    bool isSigned = Opcode == ISD::SMUL_LOHI;
2004    if (!isSigned) {
2005      switch (NVT.getSimpleVT().SimpleTy) {
2006      default: llvm_unreachable("Unsupported VT!");
2007      case MVT::i8:  Opc = X86::MUL8r;  MOpc = X86::MUL8m;  break;
2008      case MVT::i16: Opc = X86::MUL16r; MOpc = X86::MUL16m; break;
2009      case MVT::i32: Opc = X86::MUL32r; MOpc = X86::MUL32m; break;
2010      case MVT::i64: Opc = X86::MUL64r; MOpc = X86::MUL64m; break;
2011      }
2012    } else {
2013      switch (NVT.getSimpleVT().SimpleTy) {
2014      default: llvm_unreachable("Unsupported VT!");
2015      case MVT::i8:  Opc = X86::IMUL8r;  MOpc = X86::IMUL8m;  break;
2016      case MVT::i16: Opc = X86::IMUL16r; MOpc = X86::IMUL16m; break;
2017      case MVT::i32: Opc = X86::IMUL32r; MOpc = X86::IMUL32m; break;
2018      case MVT::i64: Opc = X86::IMUL64r; MOpc = X86::IMUL64m; break;
2019      }
2020    }
2021
2022    unsigned LoReg, HiReg;
2023    switch (NVT.getSimpleVT().SimpleTy) {
2024    default: llvm_unreachable("Unsupported VT!");
2025    case MVT::i8:  LoReg = X86::AL;  HiReg = X86::AH;  break;
2026    case MVT::i16: LoReg = X86::AX;  HiReg = X86::DX;  break;
2027    case MVT::i32: LoReg = X86::EAX; HiReg = X86::EDX; break;
2028    case MVT::i64: LoReg = X86::RAX; HiReg = X86::RDX; break;
2029    }
2030
2031    SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
2032    bool foldedLoad = TryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
2033    // Multiply is commmutative.
2034    if (!foldedLoad) {
2035      foldedLoad = TryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
2036      if (foldedLoad)
2037        std::swap(N0, N1);
2038    }
2039
2040    SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg,
2041                                            N0, SDValue()).getValue(1);
2042
2043    if (foldedLoad) {
2044      SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
2045                        InFlag };
2046      SDNode *CNode =
2047        CurDAG->getMachineNode(MOpc, dl, MVT::Other, MVT::Glue, Ops,
2048                               array_lengthof(Ops));
2049      InFlag = SDValue(CNode, 1);
2050
2051      // Update the chain.
2052      ReplaceUses(N1.getValue(1), SDValue(CNode, 0));
2053    } else {
2054      SDNode *CNode = CurDAG->getMachineNode(Opc, dl, MVT::Glue, N1, InFlag);
2055      InFlag = SDValue(CNode, 0);
2056    }
2057
2058    // Prevent use of AH in a REX instruction by referencing AX instead.
2059    if (HiReg == X86::AH && Subtarget->is64Bit() &&
2060        !SDValue(Node, 1).use_empty()) {
2061      SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2062                                              X86::AX, MVT::i16, InFlag);
2063      InFlag = Result.getValue(2);
2064      // Get the low part if needed. Don't use getCopyFromReg for aliasing
2065      // registers.
2066      if (!SDValue(Node, 0).use_empty())
2067        ReplaceUses(SDValue(Node, 1),
2068          CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result));
2069
2070      // Shift AX down 8 bits.
2071      Result = SDValue(CurDAG->getMachineNode(X86::SHR16ri, dl, MVT::i16,
2072                                              Result,
2073                                     CurDAG->getTargetConstant(8, MVT::i8)), 0);
2074      // Then truncate it down to i8.
2075      ReplaceUses(SDValue(Node, 1),
2076        CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result));
2077    }
2078    // Copy the low half of the result, if it is needed.
2079    if (!SDValue(Node, 0).use_empty()) {
2080      SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2081                                                LoReg, NVT, InFlag);
2082      InFlag = Result.getValue(2);
2083      ReplaceUses(SDValue(Node, 0), Result);
2084      DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
2085    }
2086    // Copy the high half of the result, if it is needed.
2087    if (!SDValue(Node, 1).use_empty()) {
2088      SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2089                                              HiReg, NVT, InFlag);
2090      InFlag = Result.getValue(2);
2091      ReplaceUses(SDValue(Node, 1), Result);
2092      DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
2093    }
2094
2095    return NULL;
2096  }
2097
2098  case ISD::SDIVREM:
2099  case ISD::UDIVREM: {
2100    SDValue N0 = Node->getOperand(0);
2101    SDValue N1 = Node->getOperand(1);
2102
2103    bool isSigned = Opcode == ISD::SDIVREM;
2104    if (!isSigned) {
2105      switch (NVT.getSimpleVT().SimpleTy) {
2106      default: llvm_unreachable("Unsupported VT!");
2107      case MVT::i8:  Opc = X86::DIV8r;  MOpc = X86::DIV8m;  break;
2108      case MVT::i16: Opc = X86::DIV16r; MOpc = X86::DIV16m; break;
2109      case MVT::i32: Opc = X86::DIV32r; MOpc = X86::DIV32m; break;
2110      case MVT::i64: Opc = X86::DIV64r; MOpc = X86::DIV64m; break;
2111      }
2112    } else {
2113      switch (NVT.getSimpleVT().SimpleTy) {
2114      default: llvm_unreachable("Unsupported VT!");
2115      case MVT::i8:  Opc = X86::IDIV8r;  MOpc = X86::IDIV8m;  break;
2116      case MVT::i16: Opc = X86::IDIV16r; MOpc = X86::IDIV16m; break;
2117      case MVT::i32: Opc = X86::IDIV32r; MOpc = X86::IDIV32m; break;
2118      case MVT::i64: Opc = X86::IDIV64r; MOpc = X86::IDIV64m; break;
2119      }
2120    }
2121
2122    unsigned LoReg, HiReg, ClrReg;
2123    unsigned ClrOpcode, SExtOpcode;
2124    switch (NVT.getSimpleVT().SimpleTy) {
2125    default: llvm_unreachable("Unsupported VT!");
2126    case MVT::i8:
2127      LoReg = X86::AL;  ClrReg = HiReg = X86::AH;
2128      ClrOpcode  = 0;
2129      SExtOpcode = X86::CBW;
2130      break;
2131    case MVT::i16:
2132      LoReg = X86::AX;  HiReg = X86::DX;
2133      ClrOpcode  = X86::MOV16r0; ClrReg = X86::DX;
2134      SExtOpcode = X86::CWD;
2135      break;
2136    case MVT::i32:
2137      LoReg = X86::EAX; ClrReg = HiReg = X86::EDX;
2138      ClrOpcode  = X86::MOV32r0;
2139      SExtOpcode = X86::CDQ;
2140      break;
2141    case MVT::i64:
2142      LoReg = X86::RAX; ClrReg = HiReg = X86::RDX;
2143      ClrOpcode  = X86::MOV64r0;
2144      SExtOpcode = X86::CQO;
2145      break;
2146    }
2147
2148    SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
2149    bool foldedLoad = TryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
2150    bool signBitIsZero = CurDAG->SignBitIsZero(N0);
2151
2152    SDValue InFlag;
2153    if (NVT == MVT::i8 && (!isSigned || signBitIsZero)) {
2154      // Special case for div8, just use a move with zero extension to AX to
2155      // clear the upper 8 bits (AH).
2156      SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Move, Chain;
2157      if (TryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
2158        SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N0.getOperand(0) };
2159        Move =
2160          SDValue(CurDAG->getMachineNode(X86::MOVZX32rm8, dl, MVT::i32,
2161                                         MVT::Other, Ops,
2162                                         array_lengthof(Ops)), 0);
2163        Chain = Move.getValue(1);
2164        ReplaceUses(N0.getValue(1), Chain);
2165      } else {
2166        Move =
2167          SDValue(CurDAG->getMachineNode(X86::MOVZX32rr8, dl, MVT::i32, N0),0);
2168        Chain = CurDAG->getEntryNode();
2169      }
2170      Chain  = CurDAG->getCopyToReg(Chain, dl, X86::EAX, Move, SDValue());
2171      InFlag = Chain.getValue(1);
2172    } else {
2173      InFlag =
2174        CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl,
2175                             LoReg, N0, SDValue()).getValue(1);
2176      if (isSigned && !signBitIsZero) {
2177        // Sign extend the low part into the high part.
2178        InFlag =
2179          SDValue(CurDAG->getMachineNode(SExtOpcode, dl, MVT::Glue, InFlag),0);
2180      } else {
2181        // Zero out the high part, effectively zero extending the input.
2182        SDValue ClrNode =
2183          SDValue(CurDAG->getMachineNode(ClrOpcode, dl, NVT), 0);
2184        InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, ClrReg,
2185                                      ClrNode, InFlag).getValue(1);
2186      }
2187    }
2188
2189    if (foldedLoad) {
2190      SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
2191                        InFlag };
2192      SDNode *CNode =
2193        CurDAG->getMachineNode(MOpc, dl, MVT::Other, MVT::Glue, Ops,
2194                               array_lengthof(Ops));
2195      InFlag = SDValue(CNode, 1);
2196      // Update the chain.
2197      ReplaceUses(N1.getValue(1), SDValue(CNode, 0));
2198    } else {
2199      InFlag =
2200        SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Glue, N1, InFlag), 0);
2201    }
2202
2203    // Prevent use of AH in a REX instruction by referencing AX instead.
2204    // Shift it down 8 bits.
2205    if (HiReg == X86::AH && Subtarget->is64Bit() &&
2206        !SDValue(Node, 1).use_empty()) {
2207      SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2208                                              X86::AX, MVT::i16, InFlag);
2209      InFlag = Result.getValue(2);
2210
2211      // If we also need AL (the quotient), get it by extracting a subreg from
2212      // Result. The fast register allocator does not like multiple CopyFromReg
2213      // nodes using aliasing registers.
2214      if (!SDValue(Node, 0).use_empty())
2215        ReplaceUses(SDValue(Node, 0),
2216          CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result));
2217
2218      // Shift AX right by 8 bits instead of using AH.
2219      Result = SDValue(CurDAG->getMachineNode(X86::SHR16ri, dl, MVT::i16,
2220                                         Result,
2221                                         CurDAG->getTargetConstant(8, MVT::i8)),
2222                       0);
2223      ReplaceUses(SDValue(Node, 1),
2224        CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result));
2225    }
2226    // Copy the division (low) result, if it is needed.
2227    if (!SDValue(Node, 0).use_empty()) {
2228      SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2229                                                LoReg, NVT, InFlag);
2230      InFlag = Result.getValue(2);
2231      ReplaceUses(SDValue(Node, 0), Result);
2232      DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
2233    }
2234    // Copy the remainder (high) result, if it is needed.
2235    if (!SDValue(Node, 1).use_empty()) {
2236      SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2237                                              HiReg, NVT, InFlag);
2238      InFlag = Result.getValue(2);
2239      ReplaceUses(SDValue(Node, 1), Result);
2240      DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
2241    }
2242    return NULL;
2243  }
2244
2245  case X86ISD::CMP: {
2246    SDValue N0 = Node->getOperand(0);
2247    SDValue N1 = Node->getOperand(1);
2248
2249    // Look for (X86cmp (and $op, $imm), 0) and see if we can convert it to
2250    // use a smaller encoding.
2251    if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse() &&
2252        HasNoSignedComparisonUses(Node))
2253      // Look past the truncate if CMP is the only use of it.
2254      N0 = N0.getOperand(0);
2255    if ((N0.getNode()->getOpcode() == ISD::AND ||
2256         (N0.getResNo() == 0 && N0.getNode()->getOpcode() == X86ISD::AND)) &&
2257        N0.getNode()->hasOneUse() &&
2258        N0.getValueType() != MVT::i8 &&
2259        X86::isZeroNode(N1)) {
2260      ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getNode()->getOperand(1));
2261      if (!C) break;
2262
2263      // For example, convert "testl %eax, $8" to "testb %al, $8"
2264      if ((C->getZExtValue() & ~UINT64_C(0xff)) == 0 &&
2265          (!(C->getZExtValue() & 0x80) ||
2266           HasNoSignedComparisonUses(Node))) {
2267        SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i8);
2268        SDValue Reg = N0.getNode()->getOperand(0);
2269
2270        // On x86-32, only the ABCD registers have 8-bit subregisters.
2271        if (!Subtarget->is64Bit()) {
2272          TargetRegisterClass *TRC = 0;
2273          switch (N0.getValueType().getSimpleVT().SimpleTy) {
2274          case MVT::i32: TRC = &X86::GR32_ABCDRegClass; break;
2275          case MVT::i16: TRC = &X86::GR16_ABCDRegClass; break;
2276          default: llvm_unreachable("Unsupported TEST operand type!");
2277          }
2278          SDValue RC = CurDAG->getTargetConstant(TRC->getID(), MVT::i32);
2279          Reg = SDValue(CurDAG->getMachineNode(X86::COPY_TO_REGCLASS, dl,
2280                                               Reg.getValueType(), Reg, RC), 0);
2281        }
2282
2283        // Extract the l-register.
2284        SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl,
2285                                                        MVT::i8, Reg);
2286
2287        // Emit a testb.
2288        return CurDAG->getMachineNode(X86::TEST8ri, dl, MVT::i32, Subreg, Imm);
2289      }
2290
2291      // For example, "testl %eax, $2048" to "testb %ah, $8".
2292      if ((C->getZExtValue() & ~UINT64_C(0xff00)) == 0 &&
2293          (!(C->getZExtValue() & 0x8000) ||
2294           HasNoSignedComparisonUses(Node))) {
2295        // Shift the immediate right by 8 bits.
2296        SDValue ShiftedImm = CurDAG->getTargetConstant(C->getZExtValue() >> 8,
2297                                                       MVT::i8);
2298        SDValue Reg = N0.getNode()->getOperand(0);
2299
2300        // Put the value in an ABCD register.
2301        TargetRegisterClass *TRC = 0;
2302        switch (N0.getValueType().getSimpleVT().SimpleTy) {
2303        case MVT::i64: TRC = &X86::GR64_ABCDRegClass; break;
2304        case MVT::i32: TRC = &X86::GR32_ABCDRegClass; break;
2305        case MVT::i16: TRC = &X86::GR16_ABCDRegClass; break;
2306        default: llvm_unreachable("Unsupported TEST operand type!");
2307        }
2308        SDValue RC = CurDAG->getTargetConstant(TRC->getID(), MVT::i32);
2309        Reg = SDValue(CurDAG->getMachineNode(X86::COPY_TO_REGCLASS, dl,
2310                                             Reg.getValueType(), Reg, RC), 0);
2311
2312        // Extract the h-register.
2313        SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_8bit_hi, dl,
2314                                                        MVT::i8, Reg);
2315
2316        // Emit a testb.  The EXTRACT_SUBREG becomes a COPY that can only
2317        // target GR8_NOREX registers, so make sure the register class is
2318        // forced.
2319        return CurDAG->getMachineNode(X86::TEST8ri_NOREX, dl, MVT::i32,
2320                                      Subreg, ShiftedImm);
2321      }
2322
2323      // For example, "testl %eax, $32776" to "testw %ax, $32776".
2324      if ((C->getZExtValue() & ~UINT64_C(0xffff)) == 0 &&
2325          N0.getValueType() != MVT::i16 &&
2326          (!(C->getZExtValue() & 0x8000) ||
2327           HasNoSignedComparisonUses(Node))) {
2328        SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i16);
2329        SDValue Reg = N0.getNode()->getOperand(0);
2330
2331        // Extract the 16-bit subregister.
2332        SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_16bit, dl,
2333                                                        MVT::i16, Reg);
2334
2335        // Emit a testw.
2336        return CurDAG->getMachineNode(X86::TEST16ri, dl, MVT::i32, Subreg, Imm);
2337      }
2338
2339      // For example, "testq %rax, $268468232" to "testl %eax, $268468232".
2340      if ((C->getZExtValue() & ~UINT64_C(0xffffffff)) == 0 &&
2341          N0.getValueType() == MVT::i64 &&
2342          (!(C->getZExtValue() & 0x80000000) ||
2343           HasNoSignedComparisonUses(Node))) {
2344        SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
2345        SDValue Reg = N0.getNode()->getOperand(0);
2346
2347        // Extract the 32-bit subregister.
2348        SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_32bit, dl,
2349                                                        MVT::i32, Reg);
2350
2351        // Emit a testl.
2352        return CurDAG->getMachineNode(X86::TEST32ri, dl, MVT::i32, Subreg, Imm);
2353      }
2354    }
2355    break;
2356  }
2357  case ISD::STORE: {
2358    // The DEC64m tablegen pattern is currently not able to match the case where
2359    // the EFLAGS on the original DEC are used.
2360    // we'll need to improve tablegen to allow flags to be transferred from a
2361    // node in the pattern to the result node.  probably with a new keyword
2362    // for example, we have this
2363    // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
2364    //  [(store (add (loadi64 addr:$dst), -1), addr:$dst),
2365    //   (implicit EFLAGS)]>;
2366    // but maybe need something like this
2367    // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
2368    //  [(store (add (loadi64 addr:$dst), -1), addr:$dst),
2369    //   (transferrable EFLAGS)]>;
2370    StoreSDNode *StoreNode = cast<StoreSDNode>(Node);
2371    SDValue Chain = StoreNode->getOperand(0);
2372    SDValue StoredVal = StoreNode->getOperand(1);
2373    SDValue Address = StoreNode->getOperand(2);
2374    SDValue Undef = StoreNode->getOperand(3);
2375
2376    if (StoreNode->getMemOperand()->getSize() != 8 ||
2377        Undef->getOpcode() != ISD::UNDEF ||
2378        Chain->getOpcode() != ISD::LOAD ||
2379        StoredVal->getOpcode() != X86ISD::DEC ||
2380        StoredVal.getResNo() != 0 ||
2381        !StoredVal.getNode()->hasNUsesOfValue(1, 0) ||
2382        !Chain.getNode()->hasNUsesOfValue(1, 0) ||
2383        StoredVal->getOperand(0).getNode() != Chain.getNode())
2384      break;
2385
2386    //OPC_CheckPredicate, 1, // Predicate_nontemporalstore
2387    if (StoreNode->isNonTemporal())
2388      break;
2389
2390    LoadSDNode *LoadNode = cast<LoadSDNode>(Chain.getNode());
2391    if (LoadNode->getOperand(1) != Address ||
2392        LoadNode->getOperand(2) != Undef)
2393      break;
2394
2395    if (!ISD::isNormalLoad(LoadNode))
2396      break;
2397
2398    if (!ISD::isNormalStore(StoreNode))
2399      break;
2400
2401    // check load chain has only one use (from the store)
2402    if (!Chain.hasOneUse())
2403      break;
2404
2405    // Merge the input chains if they are not intra-pattern references.
2406    SDValue InputChain = LoadNode->getOperand(0);
2407
2408    SDValue Base, Scale, Index, Disp, Segment;
2409    if (!SelectAddr(LoadNode, LoadNode->getBasePtr(),
2410                    Base, Scale, Index, Disp, Segment))
2411      break;
2412
2413    MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(2);
2414    MemOp[0] = StoreNode->getMemOperand();
2415    MemOp[1] = LoadNode->getMemOperand();
2416    const SDValue Ops[] = { Base, Scale, Index, Disp, Segment, InputChain };
2417    MachineSDNode *Result = CurDAG->getMachineNode(X86::DEC64m,
2418                                                   Node->getDebugLoc(),
2419                                                   MVT::i32, MVT::Other, Ops,
2420                                                   array_lengthof(Ops));
2421    Result->setMemRefs(MemOp, MemOp + 2);
2422
2423    ReplaceUses(SDValue(StoreNode, 0), SDValue(Result, 1));
2424    ReplaceUses(SDValue(StoredVal.getNode(), 1), SDValue(Result, 0));
2425
2426    return Result;
2427  }
2428  }
2429
2430  SDNode *ResNode = SelectCode(Node);
2431
2432  DEBUG(dbgs() << "=> ";
2433        if (ResNode == NULL || ResNode == Node)
2434          Node->dump(CurDAG);
2435        else
2436          ResNode->dump(CurDAG);
2437        dbgs() << '\n');
2438
2439  return ResNode;
2440}
2441
2442bool X86DAGToDAGISel::
2443SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
2444                             std::vector<SDValue> &OutOps) {
2445  SDValue Op0, Op1, Op2, Op3, Op4;
2446  switch (ConstraintCode) {
2447  case 'o':   // offsetable        ??
2448  case 'v':   // not offsetable    ??
2449  default: return true;
2450  case 'm':   // memory
2451    if (!SelectAddr(0, Op, Op0, Op1, Op2, Op3, Op4))
2452      return true;
2453    break;
2454  }
2455
2456  OutOps.push_back(Op0);
2457  OutOps.push_back(Op1);
2458  OutOps.push_back(Op2);
2459  OutOps.push_back(Op3);
2460  OutOps.push_back(Op4);
2461  return false;
2462}
2463
2464/// createX86ISelDag - This pass converts a legalized DAG into a
2465/// X86-specific DAG, ready for instruction scheduling.
2466///
2467FunctionPass *llvm::createX86ISelDag(X86TargetMachine &TM,
2468                                     llvm::CodeGenOpt::Level OptLevel) {
2469  return new X86DAGToDAGISel(TM, OptLevel);
2470}
2471