X86ISelDAGToDAG.cpp revision 2a01946de4e510e42691f8dc5e7331fcecb67432
1//===- X86ISelDAGToDAG.cpp - A DAG pattern matching inst selector for X86 -===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines a DAG pattern matching instruction selector for X86,
11// converting from a legalized dag to a X86 dag.
12//
13//===----------------------------------------------------------------------===//
14
15#define DEBUG_TYPE "x86-isel"
16#include "X86.h"
17#include "X86InstrBuilder.h"
18#include "X86MachineFunctionInfo.h"
19#include "X86RegisterInfo.h"
20#include "X86Subtarget.h"
21#include "X86TargetMachine.h"
22#include "llvm/Instructions.h"
23#include "llvm/Intrinsics.h"
24#include "llvm/Support/CFG.h"
25#include "llvm/Type.h"
26#include "llvm/CodeGen/FunctionLoweringInfo.h"
27#include "llvm/CodeGen/MachineConstantPool.h"
28#include "llvm/CodeGen/MachineFunction.h"
29#include "llvm/CodeGen/MachineFrameInfo.h"
30#include "llvm/CodeGen/MachineInstrBuilder.h"
31#include "llvm/CodeGen/MachineRegisterInfo.h"
32#include "llvm/CodeGen/SelectionDAGISel.h"
33#include "llvm/Target/TargetMachine.h"
34#include "llvm/Target/TargetOptions.h"
35#include "llvm/Support/Debug.h"
36#include "llvm/Support/ErrorHandling.h"
37#include "llvm/Support/MathExtras.h"
38#include "llvm/Support/raw_ostream.h"
39#include "llvm/ADT/SmallPtrSet.h"
40#include "llvm/ADT/Statistic.h"
41using namespace llvm;
42
43STATISTIC(NumLoadMoved, "Number of loads moved below TokenFactor");
44
45//===----------------------------------------------------------------------===//
46//                      Pattern Matcher Implementation
47//===----------------------------------------------------------------------===//
48
49namespace {
50  /// X86ISelAddressMode - This corresponds to X86AddressMode, but uses
51  /// SDValue's instead of register numbers for the leaves of the matched
52  /// tree.
53  struct X86ISelAddressMode {
54    enum {
55      RegBase,
56      FrameIndexBase
57    } BaseType;
58
59    // This is really a union, discriminated by BaseType!
60    SDValue Base_Reg;
61    int Base_FrameIndex;
62
63    unsigned Scale;
64    SDValue IndexReg;
65    int32_t Disp;
66    SDValue Segment;
67    const GlobalValue *GV;
68    const Constant *CP;
69    const BlockAddress *BlockAddr;
70    const char *ES;
71    int JT;
72    unsigned Align;    // CP alignment.
73    unsigned char SymbolFlags;  // X86II::MO_*
74
75    X86ISelAddressMode()
76      : BaseType(RegBase), Base_FrameIndex(0), Scale(1), IndexReg(), Disp(0),
77        Segment(), GV(0), CP(0), BlockAddr(0), ES(0), JT(-1), Align(0),
78        SymbolFlags(X86II::MO_NO_FLAG) {
79    }
80
81    bool hasSymbolicDisplacement() const {
82      return GV != 0 || CP != 0 || ES != 0 || JT != -1 || BlockAddr != 0;
83    }
84
85    bool hasBaseOrIndexReg() const {
86      return IndexReg.getNode() != 0 || Base_Reg.getNode() != 0;
87    }
88
89    /// isRIPRelative - Return true if this addressing mode is already RIP
90    /// relative.
91    bool isRIPRelative() const {
92      if (BaseType != RegBase) return false;
93      if (RegisterSDNode *RegNode =
94            dyn_cast_or_null<RegisterSDNode>(Base_Reg.getNode()))
95        return RegNode->getReg() == X86::RIP;
96      return false;
97    }
98
99    void setBaseReg(SDValue Reg) {
100      BaseType = RegBase;
101      Base_Reg = Reg;
102    }
103
104    void dump() {
105      dbgs() << "X86ISelAddressMode " << this << '\n';
106      dbgs() << "Base_Reg ";
107      if (Base_Reg.getNode() != 0)
108        Base_Reg.getNode()->dump();
109      else
110        dbgs() << "nul";
111      dbgs() << " Base.FrameIndex " << Base_FrameIndex << '\n'
112             << " Scale" << Scale << '\n'
113             << "IndexReg ";
114      if (IndexReg.getNode() != 0)
115        IndexReg.getNode()->dump();
116      else
117        dbgs() << "nul";
118      dbgs() << " Disp " << Disp << '\n'
119             << "GV ";
120      if (GV)
121        GV->dump();
122      else
123        dbgs() << "nul";
124      dbgs() << " CP ";
125      if (CP)
126        CP->dump();
127      else
128        dbgs() << "nul";
129      dbgs() << '\n'
130             << "ES ";
131      if (ES)
132        dbgs() << ES;
133      else
134        dbgs() << "nul";
135      dbgs() << " JT" << JT << " Align" << Align << '\n';
136    }
137  };
138}
139
140namespace {
141  //===--------------------------------------------------------------------===//
142  /// ISel - X86 specific code to select X86 machine instructions for
143  /// SelectionDAG operations.
144  ///
145  class X86DAGToDAGISel : public SelectionDAGISel {
146    /// X86Lowering - This object fully describes how to lower LLVM code to an
147    /// X86-specific SelectionDAG.
148    const X86TargetLowering &X86Lowering;
149
150    /// Subtarget - Keep a pointer to the X86Subtarget around so that we can
151    /// make the right decision when generating code for different targets.
152    const X86Subtarget *Subtarget;
153
154    /// OptForSize - If true, selector should try to optimize for code size
155    /// instead of performance.
156    bool OptForSize;
157
158  public:
159    explicit X86DAGToDAGISel(X86TargetMachine &tm, CodeGenOpt::Level OptLevel)
160      : SelectionDAGISel(tm, OptLevel),
161        X86Lowering(*tm.getTargetLowering()),
162        Subtarget(&tm.getSubtarget<X86Subtarget>()),
163        OptForSize(false) {}
164
165    virtual const char *getPassName() const {
166      return "X86 DAG->DAG Instruction Selection";
167    }
168
169    virtual void EmitFunctionEntryCode();
170
171    virtual bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const;
172
173    virtual void PreprocessISelDAG();
174
175    inline bool immSext8(SDNode *N) const {
176      return isInt<8>(cast<ConstantSDNode>(N)->getSExtValue());
177    }
178
179    // i64immSExt32 predicate - True if the 64-bit immediate fits in a 32-bit
180    // sign extended field.
181    inline bool i64immSExt32(SDNode *N) const {
182      uint64_t v = cast<ConstantSDNode>(N)->getZExtValue();
183      return (int64_t)v == (int32_t)v;
184    }
185
186// Include the pieces autogenerated from the target description.
187#include "X86GenDAGISel.inc"
188
189  private:
190    SDNode *Select(SDNode *N);
191    SDNode *SelectAtomic64(SDNode *Node, unsigned Opc);
192    SDNode *SelectAtomicLoadAdd(SDNode *Node, EVT NVT);
193    SDNode *SelectAtomicLoadArith(SDNode *Node, EVT NVT);
194
195    bool FoldOffsetIntoAddress(uint64_t Offset, X86ISelAddressMode &AM);
196    bool MatchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM);
197    bool MatchWrapper(SDValue N, X86ISelAddressMode &AM);
198    bool MatchAddress(SDValue N, X86ISelAddressMode &AM);
199    bool MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
200                                 unsigned Depth);
201    bool MatchAddressBase(SDValue N, X86ISelAddressMode &AM);
202    bool SelectAddr(SDNode *Parent, SDValue N, SDValue &Base,
203                    SDValue &Scale, SDValue &Index, SDValue &Disp,
204                    SDValue &Segment);
205    bool SelectLEAAddr(SDValue N, SDValue &Base,
206                       SDValue &Scale, SDValue &Index, SDValue &Disp,
207                       SDValue &Segment);
208    bool SelectTLSADDRAddr(SDValue N, SDValue &Base,
209                           SDValue &Scale, SDValue &Index, SDValue &Disp,
210                           SDValue &Segment);
211    bool SelectScalarSSELoad(SDNode *Root, SDValue N,
212                             SDValue &Base, SDValue &Scale,
213                             SDValue &Index, SDValue &Disp,
214                             SDValue &Segment,
215                             SDValue &NodeWithChain);
216
217    bool TryFoldLoad(SDNode *P, SDValue N,
218                     SDValue &Base, SDValue &Scale,
219                     SDValue &Index, SDValue &Disp,
220                     SDValue &Segment);
221
222    /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
223    /// inline asm expressions.
224    virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
225                                              char ConstraintCode,
226                                              std::vector<SDValue> &OutOps);
227
228    void EmitSpecialCodeForMain(MachineBasicBlock *BB, MachineFrameInfo *MFI);
229
230    inline void getAddressOperands(X86ISelAddressMode &AM, SDValue &Base,
231                                   SDValue &Scale, SDValue &Index,
232                                   SDValue &Disp, SDValue &Segment) {
233      Base  = (AM.BaseType == X86ISelAddressMode::FrameIndexBase) ?
234        CurDAG->getTargetFrameIndex(AM.Base_FrameIndex, TLI.getPointerTy()) :
235        AM.Base_Reg;
236      Scale = getI8Imm(AM.Scale);
237      Index = AM.IndexReg;
238      // These are 32-bit even in 64-bit mode since RIP relative offset
239      // is 32-bit.
240      if (AM.GV)
241        Disp = CurDAG->getTargetGlobalAddress(AM.GV, DebugLoc(),
242                                              MVT::i32, AM.Disp,
243                                              AM.SymbolFlags);
244      else if (AM.CP)
245        Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i32,
246                                             AM.Align, AM.Disp, AM.SymbolFlags);
247      else if (AM.ES)
248        Disp = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i32, AM.SymbolFlags);
249      else if (AM.JT != -1)
250        Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i32, AM.SymbolFlags);
251      else if (AM.BlockAddr)
252        Disp = CurDAG->getBlockAddress(AM.BlockAddr, MVT::i32,
253                                       true, AM.SymbolFlags);
254      else
255        Disp = CurDAG->getTargetConstant(AM.Disp, MVT::i32);
256
257      if (AM.Segment.getNode())
258        Segment = AM.Segment;
259      else
260        Segment = CurDAG->getRegister(0, MVT::i32);
261    }
262
263    /// getI8Imm - Return a target constant with the specified value, of type
264    /// i8.
265    inline SDValue getI8Imm(unsigned Imm) {
266      return CurDAG->getTargetConstant(Imm, MVT::i8);
267    }
268
269    /// getI32Imm - Return a target constant with the specified value, of type
270    /// i32.
271    inline SDValue getI32Imm(unsigned Imm) {
272      return CurDAG->getTargetConstant(Imm, MVT::i32);
273    }
274
275    /// getGlobalBaseReg - Return an SDNode that returns the value of
276    /// the global base register. Output instructions required to
277    /// initialize the global base register, if necessary.
278    ///
279    SDNode *getGlobalBaseReg();
280
281    /// getTargetMachine - Return a reference to the TargetMachine, casted
282    /// to the target-specific type.
283    const X86TargetMachine &getTargetMachine() {
284      return static_cast<const X86TargetMachine &>(TM);
285    }
286
287    /// getInstrInfo - Return a reference to the TargetInstrInfo, casted
288    /// to the target-specific type.
289    const X86InstrInfo *getInstrInfo() {
290      return getTargetMachine().getInstrInfo();
291    }
292  };
293}
294
295
296bool
297X86DAGToDAGISel::IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const {
298  if (OptLevel == CodeGenOpt::None) return false;
299
300  if (!N.hasOneUse())
301    return false;
302
303  if (N.getOpcode() != ISD::LOAD)
304    return true;
305
306  // If N is a load, do additional profitability checks.
307  if (U == Root) {
308    switch (U->getOpcode()) {
309    default: break;
310    case X86ISD::ADD:
311    case X86ISD::SUB:
312    case X86ISD::AND:
313    case X86ISD::XOR:
314    case X86ISD::OR:
315    case ISD::ADD:
316    case ISD::ADDC:
317    case ISD::ADDE:
318    case ISD::AND:
319    case ISD::OR:
320    case ISD::XOR: {
321      SDValue Op1 = U->getOperand(1);
322
323      // If the other operand is a 8-bit immediate we should fold the immediate
324      // instead. This reduces code size.
325      // e.g.
326      // movl 4(%esp), %eax
327      // addl $4, %eax
328      // vs.
329      // movl $4, %eax
330      // addl 4(%esp), %eax
331      // The former is 2 bytes shorter. In case where the increment is 1, then
332      // the saving can be 4 bytes (by using incl %eax).
333      if (ConstantSDNode *Imm = dyn_cast<ConstantSDNode>(Op1))
334        if (Imm->getAPIntValue().isSignedIntN(8))
335          return false;
336
337      // If the other operand is a TLS address, we should fold it instead.
338      // This produces
339      // movl    %gs:0, %eax
340      // leal    i@NTPOFF(%eax), %eax
341      // instead of
342      // movl    $i@NTPOFF, %eax
343      // addl    %gs:0, %eax
344      // if the block also has an access to a second TLS address this will save
345      // a load.
346      // FIXME: This is probably also true for non TLS addresses.
347      if (Op1.getOpcode() == X86ISD::Wrapper) {
348        SDValue Val = Op1.getOperand(0);
349        if (Val.getOpcode() == ISD::TargetGlobalTLSAddress)
350          return false;
351      }
352    }
353    }
354  }
355
356  return true;
357}
358
359/// MoveBelowCallOrigChain - Replace the original chain operand of the call with
360/// load's chain operand and move load below the call's chain operand.
361static void MoveBelowOrigChain(SelectionDAG *CurDAG, SDValue Load,
362                                  SDValue Call, SDValue OrigChain) {
363  SmallVector<SDValue, 8> Ops;
364  SDValue Chain = OrigChain.getOperand(0);
365  if (Chain.getNode() == Load.getNode())
366    Ops.push_back(Load.getOperand(0));
367  else {
368    assert(Chain.getOpcode() == ISD::TokenFactor &&
369           "Unexpected chain operand");
370    for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i)
371      if (Chain.getOperand(i).getNode() == Load.getNode())
372        Ops.push_back(Load.getOperand(0));
373      else
374        Ops.push_back(Chain.getOperand(i));
375    SDValue NewChain =
376      CurDAG->getNode(ISD::TokenFactor, Load.getDebugLoc(),
377                      MVT::Other, &Ops[0], Ops.size());
378    Ops.clear();
379    Ops.push_back(NewChain);
380  }
381  for (unsigned i = 1, e = OrigChain.getNumOperands(); i != e; ++i)
382    Ops.push_back(OrigChain.getOperand(i));
383  CurDAG->UpdateNodeOperands(OrigChain.getNode(), &Ops[0], Ops.size());
384  CurDAG->UpdateNodeOperands(Load.getNode(), Call.getOperand(0),
385                             Load.getOperand(1), Load.getOperand(2));
386  Ops.clear();
387  Ops.push_back(SDValue(Load.getNode(), 1));
388  for (unsigned i = 1, e = Call.getNode()->getNumOperands(); i != e; ++i)
389    Ops.push_back(Call.getOperand(i));
390  CurDAG->UpdateNodeOperands(Call.getNode(), &Ops[0], Ops.size());
391}
392
393/// isCalleeLoad - Return true if call address is a load and it can be
394/// moved below CALLSEQ_START and the chains leading up to the call.
395/// Return the CALLSEQ_START by reference as a second output.
396/// In the case of a tail call, there isn't a callseq node between the call
397/// chain and the load.
398static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) {
399  if (Callee.getNode() == Chain.getNode() || !Callee.hasOneUse())
400    return false;
401  LoadSDNode *LD = dyn_cast<LoadSDNode>(Callee.getNode());
402  if (!LD ||
403      LD->isVolatile() ||
404      LD->getAddressingMode() != ISD::UNINDEXED ||
405      LD->getExtensionType() != ISD::NON_EXTLOAD)
406    return false;
407
408  // Now let's find the callseq_start.
409  while (HasCallSeq && Chain.getOpcode() != ISD::CALLSEQ_START) {
410    if (!Chain.hasOneUse())
411      return false;
412    Chain = Chain.getOperand(0);
413  }
414
415  if (!Chain.getNumOperands())
416    return false;
417  if (Chain.getOperand(0).getNode() == Callee.getNode())
418    return true;
419  if (Chain.getOperand(0).getOpcode() == ISD::TokenFactor &&
420      Callee.getValue(1).isOperandOf(Chain.getOperand(0).getNode()) &&
421      Callee.getValue(1).hasOneUse())
422    return true;
423  return false;
424}
425
426void X86DAGToDAGISel::PreprocessISelDAG() {
427  // OptForSize is used in pattern predicates that isel is matching.
428  OptForSize = MF->getFunction()->hasFnAttr(Attribute::OptimizeForSize);
429
430  for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
431       E = CurDAG->allnodes_end(); I != E; ) {
432    SDNode *N = I++;  // Preincrement iterator to avoid invalidation issues.
433
434    if (OptLevel != CodeGenOpt::None &&
435        (N->getOpcode() == X86ISD::CALL ||
436         N->getOpcode() == X86ISD::TC_RETURN)) {
437      /// Also try moving call address load from outside callseq_start to just
438      /// before the call to allow it to be folded.
439      ///
440      ///     [Load chain]
441      ///         ^
442      ///         |
443      ///       [Load]
444      ///       ^    ^
445      ///       |    |
446      ///      /      \--
447      ///     /          |
448      ///[CALLSEQ_START] |
449      ///     ^          |
450      ///     |          |
451      /// [LOAD/C2Reg]   |
452      ///     |          |
453      ///      \        /
454      ///       \      /
455      ///       [CALL]
456      bool HasCallSeq = N->getOpcode() == X86ISD::CALL;
457      SDValue Chain = N->getOperand(0);
458      SDValue Load  = N->getOperand(1);
459      if (!isCalleeLoad(Load, Chain, HasCallSeq))
460        continue;
461      MoveBelowOrigChain(CurDAG, Load, SDValue(N, 0), Chain);
462      ++NumLoadMoved;
463      continue;
464    }
465
466    // Lower fpround and fpextend nodes that target the FP stack to be store and
467    // load to the stack.  This is a gross hack.  We would like to simply mark
468    // these as being illegal, but when we do that, legalize produces these when
469    // it expands calls, then expands these in the same legalize pass.  We would
470    // like dag combine to be able to hack on these between the call expansion
471    // and the node legalization.  As such this pass basically does "really
472    // late" legalization of these inline with the X86 isel pass.
473    // FIXME: This should only happen when not compiled with -O0.
474    if (N->getOpcode() != ISD::FP_ROUND && N->getOpcode() != ISD::FP_EXTEND)
475      continue;
476
477    // If the source and destination are SSE registers, then this is a legal
478    // conversion that should not be lowered.
479    EVT SrcVT = N->getOperand(0).getValueType();
480    EVT DstVT = N->getValueType(0);
481    bool SrcIsSSE = X86Lowering.isScalarFPTypeInSSEReg(SrcVT);
482    bool DstIsSSE = X86Lowering.isScalarFPTypeInSSEReg(DstVT);
483    if (SrcIsSSE && DstIsSSE)
484      continue;
485
486    if (!SrcIsSSE && !DstIsSSE) {
487      // If this is an FPStack extension, it is a noop.
488      if (N->getOpcode() == ISD::FP_EXTEND)
489        continue;
490      // If this is a value-preserving FPStack truncation, it is a noop.
491      if (N->getConstantOperandVal(1))
492        continue;
493    }
494
495    // Here we could have an FP stack truncation or an FPStack <-> SSE convert.
496    // FPStack has extload and truncstore.  SSE can fold direct loads into other
497    // operations.  Based on this, decide what we want to do.
498    EVT MemVT;
499    if (N->getOpcode() == ISD::FP_ROUND)
500      MemVT = DstVT;  // FP_ROUND must use DstVT, we can't do a 'trunc load'.
501    else
502      MemVT = SrcIsSSE ? SrcVT : DstVT;
503
504    SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT);
505    DebugLoc dl = N->getDebugLoc();
506
507    // FIXME: optimize the case where the src/dest is a load or store?
508    SDValue Store = CurDAG->getTruncStore(CurDAG->getEntryNode(), dl,
509                                          N->getOperand(0),
510                                          MemTmp, MachinePointerInfo(), MemVT,
511                                          false, false, 0);
512    SDValue Result = CurDAG->getExtLoad(ISD::EXTLOAD, dl, DstVT, Store, MemTmp,
513                                        MachinePointerInfo(),
514                                        MemVT, false, false, 0);
515
516    // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the
517    // extload we created.  This will cause general havok on the dag because
518    // anything below the conversion could be folded into other existing nodes.
519    // To avoid invalidating 'I', back it up to the convert node.
520    --I;
521    CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
522
523    // Now that we did that, the node is dead.  Increment the iterator to the
524    // next node to process, then delete N.
525    ++I;
526    CurDAG->DeleteNode(N);
527  }
528}
529
530
531/// EmitSpecialCodeForMain - Emit any code that needs to be executed only in
532/// the main function.
533void X86DAGToDAGISel::EmitSpecialCodeForMain(MachineBasicBlock *BB,
534                                             MachineFrameInfo *MFI) {
535  const TargetInstrInfo *TII = TM.getInstrInfo();
536  if (Subtarget->isTargetCygMing()) {
537    unsigned CallOp =
538      Subtarget->is64Bit() ? X86::WINCALL64pcrel32 : X86::CALLpcrel32;
539    BuildMI(BB, DebugLoc(),
540            TII->get(CallOp)).addExternalSymbol("__main");
541  }
542}
543
544void X86DAGToDAGISel::EmitFunctionEntryCode() {
545  // If this is main, emit special code for main.
546  if (const Function *Fn = MF->getFunction())
547    if (Fn->hasExternalLinkage() && Fn->getName() == "main")
548      EmitSpecialCodeForMain(MF->begin(), MF->getFrameInfo());
549}
550
551static bool isDispSafeForFrameIndex(int64_t Val) {
552  // On 64-bit platforms, we can run into an issue where a frame index
553  // includes a displacement that, when added to the explicit displacement,
554  // will overflow the displacement field. Assuming that the frame index
555  // displacement fits into a 31-bit integer  (which is only slightly more
556  // aggressive than the current fundamental assumption that it fits into
557  // a 32-bit integer), a 31-bit disp should always be safe.
558  return isInt<31>(Val);
559}
560
561bool X86DAGToDAGISel::FoldOffsetIntoAddress(uint64_t Offset,
562                                            X86ISelAddressMode &AM) {
563  int64_t Val = AM.Disp + Offset;
564  CodeModel::Model M = TM.getCodeModel();
565  if (Subtarget->is64Bit()) {
566    if (!X86::isOffsetSuitableForCodeModel(Val, M,
567                                           AM.hasSymbolicDisplacement()))
568      return true;
569    // In addition to the checks required for a register base, check that
570    // we do not try to use an unsafe Disp with a frame index.
571    if (AM.BaseType == X86ISelAddressMode::FrameIndexBase &&
572        !isDispSafeForFrameIndex(Val))
573      return true;
574  }
575  AM.Disp = Val;
576  return false;
577
578}
579
580bool X86DAGToDAGISel::MatchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM){
581  SDValue Address = N->getOperand(1);
582
583  // load gs:0 -> GS segment register.
584  // load fs:0 -> FS segment register.
585  //
586  // This optimization is valid because the GNU TLS model defines that
587  // gs:0 (or fs:0 on X86-64) contains its own address.
588  // For more information see http://people.redhat.com/drepper/tls.pdf
589  if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Address))
590    if (C->getSExtValue() == 0 && AM.Segment.getNode() == 0 &&
591        Subtarget->isTargetELF())
592      switch (N->getPointerInfo().getAddrSpace()) {
593      case 256:
594        AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
595        return false;
596      case 257:
597        AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
598        return false;
599      }
600
601  return true;
602}
603
604/// MatchWrapper - Try to match X86ISD::Wrapper and X86ISD::WrapperRIP nodes
605/// into an addressing mode.  These wrap things that will resolve down into a
606/// symbol reference.  If no match is possible, this returns true, otherwise it
607/// returns false.
608bool X86DAGToDAGISel::MatchWrapper(SDValue N, X86ISelAddressMode &AM) {
609  // If the addressing mode already has a symbol as the displacement, we can
610  // never match another symbol.
611  if (AM.hasSymbolicDisplacement())
612    return true;
613
614  SDValue N0 = N.getOperand(0);
615  CodeModel::Model M = TM.getCodeModel();
616
617  // Handle X86-64 rip-relative addresses.  We check this before checking direct
618  // folding because RIP is preferable to non-RIP accesses.
619  if (Subtarget->is64Bit() &&
620      // Under X86-64 non-small code model, GV (and friends) are 64-bits, so
621      // they cannot be folded into immediate fields.
622      // FIXME: This can be improved for kernel and other models?
623      (M == CodeModel::Small || M == CodeModel::Kernel) &&
624      // Base and index reg must be 0 in order to use %rip as base and lowering
625      // must allow RIP.
626      !AM.hasBaseOrIndexReg() && N.getOpcode() == X86ISD::WrapperRIP) {
627    if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
628      X86ISelAddressMode Backup = AM;
629      AM.GV = G->getGlobal();
630      AM.SymbolFlags = G->getTargetFlags();
631      if (FoldOffsetIntoAddress(G->getOffset(), AM)) {
632        AM = Backup;
633        return true;
634      }
635    } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
636      X86ISelAddressMode Backup = AM;
637      AM.CP = CP->getConstVal();
638      AM.Align = CP->getAlignment();
639      AM.SymbolFlags = CP->getTargetFlags();
640      if (FoldOffsetIntoAddress(CP->getOffset(), AM)) {
641        AM = Backup;
642        return true;
643      }
644    } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) {
645      AM.ES = S->getSymbol();
646      AM.SymbolFlags = S->getTargetFlags();
647    } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
648      AM.JT = J->getIndex();
649      AM.SymbolFlags = J->getTargetFlags();
650    } else {
651      AM.BlockAddr = cast<BlockAddressSDNode>(N0)->getBlockAddress();
652      AM.SymbolFlags = cast<BlockAddressSDNode>(N0)->getTargetFlags();
653    }
654
655    if (N.getOpcode() == X86ISD::WrapperRIP)
656      AM.setBaseReg(CurDAG->getRegister(X86::RIP, MVT::i64));
657    return false;
658  }
659
660  // Handle the case when globals fit in our immediate field: This is true for
661  // X86-32 always and X86-64 when in -static -mcmodel=small mode.  In 64-bit
662  // mode, this results in a non-RIP-relative computation.
663  if (!Subtarget->is64Bit() ||
664      ((M == CodeModel::Small || M == CodeModel::Kernel) &&
665       TM.getRelocationModel() == Reloc::Static)) {
666    if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
667      AM.GV = G->getGlobal();
668      AM.Disp += G->getOffset();
669      AM.SymbolFlags = G->getTargetFlags();
670    } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
671      AM.CP = CP->getConstVal();
672      AM.Align = CP->getAlignment();
673      AM.Disp += CP->getOffset();
674      AM.SymbolFlags = CP->getTargetFlags();
675    } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) {
676      AM.ES = S->getSymbol();
677      AM.SymbolFlags = S->getTargetFlags();
678    } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
679      AM.JT = J->getIndex();
680      AM.SymbolFlags = J->getTargetFlags();
681    } else {
682      AM.BlockAddr = cast<BlockAddressSDNode>(N0)->getBlockAddress();
683      AM.SymbolFlags = cast<BlockAddressSDNode>(N0)->getTargetFlags();
684    }
685    return false;
686  }
687
688  return true;
689}
690
691/// MatchAddress - Add the specified node to the specified addressing mode,
692/// returning true if it cannot be done.  This just pattern matches for the
693/// addressing mode.
694bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM) {
695  if (MatchAddressRecursively(N, AM, 0))
696    return true;
697
698  // Post-processing: Convert lea(,%reg,2) to lea(%reg,%reg), which has
699  // a smaller encoding and avoids a scaled-index.
700  if (AM.Scale == 2 &&
701      AM.BaseType == X86ISelAddressMode::RegBase &&
702      AM.Base_Reg.getNode() == 0) {
703    AM.Base_Reg = AM.IndexReg;
704    AM.Scale = 1;
705  }
706
707  // Post-processing: Convert foo to foo(%rip), even in non-PIC mode,
708  // because it has a smaller encoding.
709  // TODO: Which other code models can use this?
710  if (TM.getCodeModel() == CodeModel::Small &&
711      Subtarget->is64Bit() &&
712      AM.Scale == 1 &&
713      AM.BaseType == X86ISelAddressMode::RegBase &&
714      AM.Base_Reg.getNode() == 0 &&
715      AM.IndexReg.getNode() == 0 &&
716      AM.SymbolFlags == X86II::MO_NO_FLAG &&
717      AM.hasSymbolicDisplacement())
718    AM.Base_Reg = CurDAG->getRegister(X86::RIP, MVT::i64);
719
720  return false;
721}
722
723bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
724                                              unsigned Depth) {
725  DebugLoc dl = N.getDebugLoc();
726  DEBUG({
727      dbgs() << "MatchAddress: ";
728      AM.dump();
729    });
730  // Limit recursion.
731  if (Depth > 5)
732    return MatchAddressBase(N, AM);
733
734  // If this is already a %rip relative address, we can only merge immediates
735  // into it.  Instead of handling this in every case, we handle it here.
736  // RIP relative addressing: %rip + 32-bit displacement!
737  if (AM.isRIPRelative()) {
738    // FIXME: JumpTable and ExternalSymbol address currently don't like
739    // displacements.  It isn't very important, but this should be fixed for
740    // consistency.
741    if (!AM.ES && AM.JT != -1) return true;
742
743    if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N))
744      if (!FoldOffsetIntoAddress(Cst->getSExtValue(), AM))
745        return false;
746    return true;
747  }
748
749  switch (N.getOpcode()) {
750  default: break;
751  case ISD::Constant: {
752    uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue();
753    if (!FoldOffsetIntoAddress(Val, AM))
754      return false;
755    break;
756  }
757
758  case X86ISD::Wrapper:
759  case X86ISD::WrapperRIP:
760    if (!MatchWrapper(N, AM))
761      return false;
762    break;
763
764  case ISD::LOAD:
765    if (!MatchLoadInAddress(cast<LoadSDNode>(N), AM))
766      return false;
767    break;
768
769  case ISD::FrameIndex:
770    if (AM.BaseType == X86ISelAddressMode::RegBase &&
771        AM.Base_Reg.getNode() == 0 &&
772        (!Subtarget->is64Bit() || isDispSafeForFrameIndex(AM.Disp))) {
773      AM.BaseType = X86ISelAddressMode::FrameIndexBase;
774      AM.Base_FrameIndex = cast<FrameIndexSDNode>(N)->getIndex();
775      return false;
776    }
777    break;
778
779  case ISD::SHL:
780    if (AM.IndexReg.getNode() != 0 || AM.Scale != 1)
781      break;
782
783    if (ConstantSDNode
784          *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1))) {
785      unsigned Val = CN->getZExtValue();
786      // Note that we handle x<<1 as (,x,2) rather than (x,x) here so
787      // that the base operand remains free for further matching. If
788      // the base doesn't end up getting used, a post-processing step
789      // in MatchAddress turns (,x,2) into (x,x), which is cheaper.
790      if (Val == 1 || Val == 2 || Val == 3) {
791        AM.Scale = 1 << Val;
792        SDValue ShVal = N.getNode()->getOperand(0);
793
794        // Okay, we know that we have a scale by now.  However, if the scaled
795        // value is an add of something and a constant, we can fold the
796        // constant into the disp field here.
797        if (CurDAG->isBaseWithConstantOffset(ShVal)) {
798          AM.IndexReg = ShVal.getNode()->getOperand(0);
799          ConstantSDNode *AddVal =
800            cast<ConstantSDNode>(ShVal.getNode()->getOperand(1));
801          uint64_t Disp = AddVal->getSExtValue() << Val;
802          if (!FoldOffsetIntoAddress(Disp, AM))
803            return false;
804        }
805
806        AM.IndexReg = ShVal;
807        return false;
808      }
809    break;
810    }
811
812  case ISD::SMUL_LOHI:
813  case ISD::UMUL_LOHI:
814    // A mul_lohi where we need the low part can be folded as a plain multiply.
815    if (N.getResNo() != 0) break;
816    // FALL THROUGH
817  case ISD::MUL:
818  case X86ISD::MUL_IMM:
819    // X*[3,5,9] -> X+X*[2,4,8]
820    if (AM.BaseType == X86ISelAddressMode::RegBase &&
821        AM.Base_Reg.getNode() == 0 &&
822        AM.IndexReg.getNode() == 0) {
823      if (ConstantSDNode
824            *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1)))
825        if (CN->getZExtValue() == 3 || CN->getZExtValue() == 5 ||
826            CN->getZExtValue() == 9) {
827          AM.Scale = unsigned(CN->getZExtValue())-1;
828
829          SDValue MulVal = N.getNode()->getOperand(0);
830          SDValue Reg;
831
832          // Okay, we know that we have a scale by now.  However, if the scaled
833          // value is an add of something and a constant, we can fold the
834          // constant into the disp field here.
835          if (MulVal.getNode()->getOpcode() == ISD::ADD && MulVal.hasOneUse() &&
836              isa<ConstantSDNode>(MulVal.getNode()->getOperand(1))) {
837            Reg = MulVal.getNode()->getOperand(0);
838            ConstantSDNode *AddVal =
839              cast<ConstantSDNode>(MulVal.getNode()->getOperand(1));
840            uint64_t Disp = AddVal->getSExtValue() * CN->getZExtValue();
841            if (FoldOffsetIntoAddress(Disp, AM))
842              Reg = N.getNode()->getOperand(0);
843          } else {
844            Reg = N.getNode()->getOperand(0);
845          }
846
847          AM.IndexReg = AM.Base_Reg = Reg;
848          return false;
849        }
850    }
851    break;
852
853  case ISD::SUB: {
854    // Given A-B, if A can be completely folded into the address and
855    // the index field with the index field unused, use -B as the index.
856    // This is a win if a has multiple parts that can be folded into
857    // the address. Also, this saves a mov if the base register has
858    // other uses, since it avoids a two-address sub instruction, however
859    // it costs an additional mov if the index register has other uses.
860
861    // Add an artificial use to this node so that we can keep track of
862    // it if it gets CSE'd with a different node.
863    HandleSDNode Handle(N);
864
865    // Test if the LHS of the sub can be folded.
866    X86ISelAddressMode Backup = AM;
867    if (MatchAddressRecursively(N.getNode()->getOperand(0), AM, Depth+1)) {
868      AM = Backup;
869      break;
870    }
871    // Test if the index field is free for use.
872    if (AM.IndexReg.getNode() || AM.isRIPRelative()) {
873      AM = Backup;
874      break;
875    }
876
877    int Cost = 0;
878    SDValue RHS = Handle.getValue().getNode()->getOperand(1);
879    // If the RHS involves a register with multiple uses, this
880    // transformation incurs an extra mov, due to the neg instruction
881    // clobbering its operand.
882    if (!RHS.getNode()->hasOneUse() ||
883        RHS.getNode()->getOpcode() == ISD::CopyFromReg ||
884        RHS.getNode()->getOpcode() == ISD::TRUNCATE ||
885        RHS.getNode()->getOpcode() == ISD::ANY_EXTEND ||
886        (RHS.getNode()->getOpcode() == ISD::ZERO_EXTEND &&
887         RHS.getNode()->getOperand(0).getValueType() == MVT::i32))
888      ++Cost;
889    // If the base is a register with multiple uses, this
890    // transformation may save a mov.
891    if ((AM.BaseType == X86ISelAddressMode::RegBase &&
892         AM.Base_Reg.getNode() &&
893         !AM.Base_Reg.getNode()->hasOneUse()) ||
894        AM.BaseType == X86ISelAddressMode::FrameIndexBase)
895      --Cost;
896    // If the folded LHS was interesting, this transformation saves
897    // address arithmetic.
898    if ((AM.hasSymbolicDisplacement() && !Backup.hasSymbolicDisplacement()) +
899        ((AM.Disp != 0) && (Backup.Disp == 0)) +
900        (AM.Segment.getNode() && !Backup.Segment.getNode()) >= 2)
901      --Cost;
902    // If it doesn't look like it may be an overall win, don't do it.
903    if (Cost >= 0) {
904      AM = Backup;
905      break;
906    }
907
908    // Ok, the transformation is legal and appears profitable. Go for it.
909    SDValue Zero = CurDAG->getConstant(0, N.getValueType());
910    SDValue Neg = CurDAG->getNode(ISD::SUB, dl, N.getValueType(), Zero, RHS);
911    AM.IndexReg = Neg;
912    AM.Scale = 1;
913
914    // Insert the new nodes into the topological ordering.
915    if (Zero.getNode()->getNodeId() == -1 ||
916        Zero.getNode()->getNodeId() > N.getNode()->getNodeId()) {
917      CurDAG->RepositionNode(N.getNode(), Zero.getNode());
918      Zero.getNode()->setNodeId(N.getNode()->getNodeId());
919    }
920    if (Neg.getNode()->getNodeId() == -1 ||
921        Neg.getNode()->getNodeId() > N.getNode()->getNodeId()) {
922      CurDAG->RepositionNode(N.getNode(), Neg.getNode());
923      Neg.getNode()->setNodeId(N.getNode()->getNodeId());
924    }
925    return false;
926  }
927
928  case ISD::ADD: {
929    // Add an artificial use to this node so that we can keep track of
930    // it if it gets CSE'd with a different node.
931    HandleSDNode Handle(N);
932
933    X86ISelAddressMode Backup = AM;
934    if (!MatchAddressRecursively(N.getOperand(0), AM, Depth+1) &&
935        !MatchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1))
936      return false;
937    AM = Backup;
938
939    // Try again after commuting the operands.
940    if (!MatchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1)&&
941        !MatchAddressRecursively(Handle.getValue().getOperand(0), AM, Depth+1))
942      return false;
943    AM = Backup;
944
945    // If we couldn't fold both operands into the address at the same time,
946    // see if we can just put each operand into a register and fold at least
947    // the add.
948    if (AM.BaseType == X86ISelAddressMode::RegBase &&
949        !AM.Base_Reg.getNode() &&
950        !AM.IndexReg.getNode()) {
951      N = Handle.getValue();
952      AM.Base_Reg = N.getOperand(0);
953      AM.IndexReg = N.getOperand(1);
954      AM.Scale = 1;
955      return false;
956    }
957    N = Handle.getValue();
958    break;
959  }
960
961  case ISD::OR:
962    // Handle "X | C" as "X + C" iff X is known to have C bits clear.
963    if (CurDAG->isBaseWithConstantOffset(N)) {
964      X86ISelAddressMode Backup = AM;
965      ConstantSDNode *CN = cast<ConstantSDNode>(N.getOperand(1));
966
967      // Start with the LHS as an addr mode.
968      if (!MatchAddressRecursively(N.getOperand(0), AM, Depth+1) &&
969          !FoldOffsetIntoAddress(CN->getSExtValue(), AM))
970        return false;
971      AM = Backup;
972    }
973    break;
974
975  case ISD::AND: {
976    // Perform some heroic transforms on an and of a constant-count shift
977    // with a constant to enable use of the scaled offset field.
978
979    SDValue Shift = N.getOperand(0);
980    if (Shift.getNumOperands() != 2) break;
981
982    // Scale must not be used already.
983    if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) break;
984
985    SDValue X = Shift.getOperand(0);
986    ConstantSDNode *C2 = dyn_cast<ConstantSDNode>(N.getOperand(1));
987    ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(Shift.getOperand(1));
988    if (!C1 || !C2) break;
989
990    // Handle "(X >> (8-C1)) & C2" as "(X >> 8) & 0xff)" if safe. This
991    // allows us to convert the shift and and into an h-register extract and
992    // a scaled index.
993    if (Shift.getOpcode() == ISD::SRL && Shift.hasOneUse()) {
994      unsigned ScaleLog = 8 - C1->getZExtValue();
995      if (ScaleLog > 0 && ScaleLog < 4 &&
996          C2->getZExtValue() == (UINT64_C(0xff) << ScaleLog)) {
997        SDValue Eight = CurDAG->getConstant(8, MVT::i8);
998        SDValue Mask = CurDAG->getConstant(0xff, N.getValueType());
999        SDValue Srl = CurDAG->getNode(ISD::SRL, dl, N.getValueType(),
1000                                      X, Eight);
1001        SDValue And = CurDAG->getNode(ISD::AND, dl, N.getValueType(),
1002                                      Srl, Mask);
1003        SDValue ShlCount = CurDAG->getConstant(ScaleLog, MVT::i8);
1004        SDValue Shl = CurDAG->getNode(ISD::SHL, dl, N.getValueType(),
1005                                      And, ShlCount);
1006
1007        // Insert the new nodes into the topological ordering.
1008        if (Eight.getNode()->getNodeId() == -1 ||
1009            Eight.getNode()->getNodeId() > X.getNode()->getNodeId()) {
1010          CurDAG->RepositionNode(X.getNode(), Eight.getNode());
1011          Eight.getNode()->setNodeId(X.getNode()->getNodeId());
1012        }
1013        if (Mask.getNode()->getNodeId() == -1 ||
1014            Mask.getNode()->getNodeId() > X.getNode()->getNodeId()) {
1015          CurDAG->RepositionNode(X.getNode(), Mask.getNode());
1016          Mask.getNode()->setNodeId(X.getNode()->getNodeId());
1017        }
1018        if (Srl.getNode()->getNodeId() == -1 ||
1019            Srl.getNode()->getNodeId() > Shift.getNode()->getNodeId()) {
1020          CurDAG->RepositionNode(Shift.getNode(), Srl.getNode());
1021          Srl.getNode()->setNodeId(Shift.getNode()->getNodeId());
1022        }
1023        if (And.getNode()->getNodeId() == -1 ||
1024            And.getNode()->getNodeId() > N.getNode()->getNodeId()) {
1025          CurDAG->RepositionNode(N.getNode(), And.getNode());
1026          And.getNode()->setNodeId(N.getNode()->getNodeId());
1027        }
1028        if (ShlCount.getNode()->getNodeId() == -1 ||
1029            ShlCount.getNode()->getNodeId() > X.getNode()->getNodeId()) {
1030          CurDAG->RepositionNode(X.getNode(), ShlCount.getNode());
1031          ShlCount.getNode()->setNodeId(N.getNode()->getNodeId());
1032        }
1033        if (Shl.getNode()->getNodeId() == -1 ||
1034            Shl.getNode()->getNodeId() > N.getNode()->getNodeId()) {
1035          CurDAG->RepositionNode(N.getNode(), Shl.getNode());
1036          Shl.getNode()->setNodeId(N.getNode()->getNodeId());
1037        }
1038        CurDAG->ReplaceAllUsesWith(N, Shl);
1039        AM.IndexReg = And;
1040        AM.Scale = (1 << ScaleLog);
1041        return false;
1042      }
1043    }
1044
1045    // Handle "(X << C1) & C2" as "(X & (C2>>C1)) << C1" if safe and if this
1046    // allows us to fold the shift into this addressing mode.
1047    if (Shift.getOpcode() != ISD::SHL) break;
1048
1049    // Not likely to be profitable if either the AND or SHIFT node has more
1050    // than one use (unless all uses are for address computation). Besides,
1051    // isel mechanism requires their node ids to be reused.
1052    if (!N.hasOneUse() || !Shift.hasOneUse())
1053      break;
1054
1055    // Verify that the shift amount is something we can fold.
1056    unsigned ShiftCst = C1->getZExtValue();
1057    if (ShiftCst != 1 && ShiftCst != 2 && ShiftCst != 3)
1058      break;
1059
1060    // Get the new AND mask, this folds to a constant.
1061    SDValue NewANDMask = CurDAG->getNode(ISD::SRL, dl, N.getValueType(),
1062                                         SDValue(C2, 0), SDValue(C1, 0));
1063    SDValue NewAND = CurDAG->getNode(ISD::AND, dl, N.getValueType(), X,
1064                                     NewANDMask);
1065    SDValue NewSHIFT = CurDAG->getNode(ISD::SHL, dl, N.getValueType(),
1066                                       NewAND, SDValue(C1, 0));
1067
1068    // Insert the new nodes into the topological ordering.
1069    if (C1->getNodeId() > X.getNode()->getNodeId()) {
1070      CurDAG->RepositionNode(X.getNode(), C1);
1071      C1->setNodeId(X.getNode()->getNodeId());
1072    }
1073    if (NewANDMask.getNode()->getNodeId() == -1 ||
1074        NewANDMask.getNode()->getNodeId() > X.getNode()->getNodeId()) {
1075      CurDAG->RepositionNode(X.getNode(), NewANDMask.getNode());
1076      NewANDMask.getNode()->setNodeId(X.getNode()->getNodeId());
1077    }
1078    if (NewAND.getNode()->getNodeId() == -1 ||
1079        NewAND.getNode()->getNodeId() > Shift.getNode()->getNodeId()) {
1080      CurDAG->RepositionNode(Shift.getNode(), NewAND.getNode());
1081      NewAND.getNode()->setNodeId(Shift.getNode()->getNodeId());
1082    }
1083    if (NewSHIFT.getNode()->getNodeId() == -1 ||
1084        NewSHIFT.getNode()->getNodeId() > N.getNode()->getNodeId()) {
1085      CurDAG->RepositionNode(N.getNode(), NewSHIFT.getNode());
1086      NewSHIFT.getNode()->setNodeId(N.getNode()->getNodeId());
1087    }
1088
1089    CurDAG->ReplaceAllUsesWith(N, NewSHIFT);
1090
1091    AM.Scale = 1 << ShiftCst;
1092    AM.IndexReg = NewAND;
1093    return false;
1094  }
1095  }
1096
1097  return MatchAddressBase(N, AM);
1098}
1099
1100/// MatchAddressBase - Helper for MatchAddress. Add the specified node to the
1101/// specified addressing mode without any further recursion.
1102bool X86DAGToDAGISel::MatchAddressBase(SDValue N, X86ISelAddressMode &AM) {
1103  // Is the base register already occupied?
1104  if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base_Reg.getNode()) {
1105    // If so, check to see if the scale index register is set.
1106    if (AM.IndexReg.getNode() == 0) {
1107      AM.IndexReg = N;
1108      AM.Scale = 1;
1109      return false;
1110    }
1111
1112    // Otherwise, we cannot select it.
1113    return true;
1114  }
1115
1116  // Default, generate it as a register.
1117  AM.BaseType = X86ISelAddressMode::RegBase;
1118  AM.Base_Reg = N;
1119  return false;
1120}
1121
1122/// SelectAddr - returns true if it is able pattern match an addressing mode.
1123/// It returns the operands which make up the maximal addressing mode it can
1124/// match by reference.
1125///
1126/// Parent is the parent node of the addr operand that is being matched.  It
1127/// is always a load, store, atomic node, or null.  It is only null when
1128/// checking memory operands for inline asm nodes.
1129bool X86DAGToDAGISel::SelectAddr(SDNode *Parent, SDValue N, SDValue &Base,
1130                                 SDValue &Scale, SDValue &Index,
1131                                 SDValue &Disp, SDValue &Segment) {
1132  X86ISelAddressMode AM;
1133
1134  if (Parent &&
1135      // This list of opcodes are all the nodes that have an "addr:$ptr" operand
1136      // that are not a MemSDNode, and thus don't have proper addrspace info.
1137      Parent->getOpcode() != ISD::INTRINSIC_W_CHAIN && // unaligned loads, fixme
1138      Parent->getOpcode() != ISD::INTRINSIC_VOID && // nontemporal stores
1139      Parent->getOpcode() != X86ISD::TLSCALL) { // Fixme
1140    unsigned AddrSpace =
1141      cast<MemSDNode>(Parent)->getPointerInfo().getAddrSpace();
1142    // AddrSpace 256 -> GS, 257 -> FS.
1143    if (AddrSpace == 256)
1144      AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
1145    if (AddrSpace == 257)
1146      AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
1147  }
1148
1149  if (MatchAddress(N, AM))
1150    return false;
1151
1152  EVT VT = N.getValueType();
1153  if (AM.BaseType == X86ISelAddressMode::RegBase) {
1154    if (!AM.Base_Reg.getNode())
1155      AM.Base_Reg = CurDAG->getRegister(0, VT);
1156  }
1157
1158  if (!AM.IndexReg.getNode())
1159    AM.IndexReg = CurDAG->getRegister(0, VT);
1160
1161  getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
1162  return true;
1163}
1164
1165/// SelectScalarSSELoad - Match a scalar SSE load.  In particular, we want to
1166/// match a load whose top elements are either undef or zeros.  The load flavor
1167/// is derived from the type of N, which is either v4f32 or v2f64.
1168///
1169/// We also return:
1170///   PatternChainNode: this is the matched node that has a chain input and
1171///   output.
1172bool X86DAGToDAGISel::SelectScalarSSELoad(SDNode *Root,
1173                                          SDValue N, SDValue &Base,
1174                                          SDValue &Scale, SDValue &Index,
1175                                          SDValue &Disp, SDValue &Segment,
1176                                          SDValue &PatternNodeWithChain) {
1177  if (N.getOpcode() == ISD::SCALAR_TO_VECTOR) {
1178    PatternNodeWithChain = N.getOperand(0);
1179    if (ISD::isNON_EXTLoad(PatternNodeWithChain.getNode()) &&
1180        PatternNodeWithChain.hasOneUse() &&
1181        IsProfitableToFold(N.getOperand(0), N.getNode(), Root) &&
1182        IsLegalToFold(N.getOperand(0), N.getNode(), Root, OptLevel)) {
1183      LoadSDNode *LD = cast<LoadSDNode>(PatternNodeWithChain);
1184      if (!SelectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp, Segment))
1185        return false;
1186      return true;
1187    }
1188  }
1189
1190  // Also handle the case where we explicitly require zeros in the top
1191  // elements.  This is a vector shuffle from the zero vector.
1192  if (N.getOpcode() == X86ISD::VZEXT_MOVL && N.getNode()->hasOneUse() &&
1193      // Check to see if the top elements are all zeros (or bitcast of zeros).
1194      N.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR &&
1195      N.getOperand(0).getNode()->hasOneUse() &&
1196      ISD::isNON_EXTLoad(N.getOperand(0).getOperand(0).getNode()) &&
1197      N.getOperand(0).getOperand(0).hasOneUse() &&
1198      IsProfitableToFold(N.getOperand(0), N.getNode(), Root) &&
1199      IsLegalToFold(N.getOperand(0), N.getNode(), Root, OptLevel)) {
1200    // Okay, this is a zero extending load.  Fold it.
1201    LoadSDNode *LD = cast<LoadSDNode>(N.getOperand(0).getOperand(0));
1202    if (!SelectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp, Segment))
1203      return false;
1204    PatternNodeWithChain = SDValue(LD, 0);
1205    return true;
1206  }
1207  return false;
1208}
1209
1210
1211/// SelectLEAAddr - it calls SelectAddr and determines if the maximal addressing
1212/// mode it matches can be cost effectively emitted as an LEA instruction.
1213bool X86DAGToDAGISel::SelectLEAAddr(SDValue N,
1214                                    SDValue &Base, SDValue &Scale,
1215                                    SDValue &Index, SDValue &Disp,
1216                                    SDValue &Segment) {
1217  X86ISelAddressMode AM;
1218
1219  // Set AM.Segment to prevent MatchAddress from using one. LEA doesn't support
1220  // segments.
1221  SDValue Copy = AM.Segment;
1222  SDValue T = CurDAG->getRegister(0, MVT::i32);
1223  AM.Segment = T;
1224  if (MatchAddress(N, AM))
1225    return false;
1226  assert (T == AM.Segment);
1227  AM.Segment = Copy;
1228
1229  EVT VT = N.getValueType();
1230  unsigned Complexity = 0;
1231  if (AM.BaseType == X86ISelAddressMode::RegBase)
1232    if (AM.Base_Reg.getNode())
1233      Complexity = 1;
1234    else
1235      AM.Base_Reg = CurDAG->getRegister(0, VT);
1236  else if (AM.BaseType == X86ISelAddressMode::FrameIndexBase)
1237    Complexity = 4;
1238
1239  if (AM.IndexReg.getNode())
1240    Complexity++;
1241  else
1242    AM.IndexReg = CurDAG->getRegister(0, VT);
1243
1244  // Don't match just leal(,%reg,2). It's cheaper to do addl %reg, %reg, or with
1245  // a simple shift.
1246  if (AM.Scale > 1)
1247    Complexity++;
1248
1249  // FIXME: We are artificially lowering the criteria to turn ADD %reg, $GA
1250  // to a LEA. This is determined with some expermentation but is by no means
1251  // optimal (especially for code size consideration). LEA is nice because of
1252  // its three-address nature. Tweak the cost function again when we can run
1253  // convertToThreeAddress() at register allocation time.
1254  if (AM.hasSymbolicDisplacement()) {
1255    // For X86-64, we should always use lea to materialize RIP relative
1256    // addresses.
1257    if (Subtarget->is64Bit())
1258      Complexity = 4;
1259    else
1260      Complexity += 2;
1261  }
1262
1263  if (AM.Disp && (AM.Base_Reg.getNode() || AM.IndexReg.getNode()))
1264    Complexity++;
1265
1266  // If it isn't worth using an LEA, reject it.
1267  if (Complexity <= 2)
1268    return false;
1269
1270  getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
1271  return true;
1272}
1273
1274/// SelectTLSADDRAddr - This is only run on TargetGlobalTLSAddress nodes.
1275bool X86DAGToDAGISel::SelectTLSADDRAddr(SDValue N, SDValue &Base,
1276                                        SDValue &Scale, SDValue &Index,
1277                                        SDValue &Disp, SDValue &Segment) {
1278  assert(N.getOpcode() == ISD::TargetGlobalTLSAddress);
1279  const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
1280
1281  X86ISelAddressMode AM;
1282  AM.GV = GA->getGlobal();
1283  AM.Disp += GA->getOffset();
1284  AM.Base_Reg = CurDAG->getRegister(0, N.getValueType());
1285  AM.SymbolFlags = GA->getTargetFlags();
1286
1287  if (N.getValueType() == MVT::i32) {
1288    AM.Scale = 1;
1289    AM.IndexReg = CurDAG->getRegister(X86::EBX, MVT::i32);
1290  } else {
1291    AM.IndexReg = CurDAG->getRegister(0, MVT::i64);
1292  }
1293
1294  getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
1295  return true;
1296}
1297
1298
1299bool X86DAGToDAGISel::TryFoldLoad(SDNode *P, SDValue N,
1300                                  SDValue &Base, SDValue &Scale,
1301                                  SDValue &Index, SDValue &Disp,
1302                                  SDValue &Segment) {
1303  if (!ISD::isNON_EXTLoad(N.getNode()) ||
1304      !IsProfitableToFold(N, P, P) ||
1305      !IsLegalToFold(N, P, P, OptLevel))
1306    return false;
1307
1308  return SelectAddr(N.getNode(),
1309                    N.getOperand(1), Base, Scale, Index, Disp, Segment);
1310}
1311
1312/// getGlobalBaseReg - Return an SDNode that returns the value of
1313/// the global base register. Output instructions required to
1314/// initialize the global base register, if necessary.
1315///
1316SDNode *X86DAGToDAGISel::getGlobalBaseReg() {
1317  unsigned GlobalBaseReg = getInstrInfo()->getGlobalBaseReg(MF);
1318  return CurDAG->getRegister(GlobalBaseReg, TLI.getPointerTy()).getNode();
1319}
1320
1321SDNode *X86DAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) {
1322  SDValue Chain = Node->getOperand(0);
1323  SDValue In1 = Node->getOperand(1);
1324  SDValue In2L = Node->getOperand(2);
1325  SDValue In2H = Node->getOperand(3);
1326  SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
1327  if (!SelectAddr(Node, In1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4))
1328    return NULL;
1329  MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1330  MemOp[0] = cast<MemSDNode>(Node)->getMemOperand();
1331  const SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, In2L, In2H, Chain};
1332  SDNode *ResNode = CurDAG->getMachineNode(Opc, Node->getDebugLoc(),
1333                                           MVT::i32, MVT::i32, MVT::Other, Ops,
1334                                           array_lengthof(Ops));
1335  cast<MachineSDNode>(ResNode)->setMemRefs(MemOp, MemOp + 1);
1336  return ResNode;
1337}
1338
1339// FIXME: Figure out some way to unify this with the 'or' and other code
1340// below.
1341SDNode *X86DAGToDAGISel::SelectAtomicLoadAdd(SDNode *Node, EVT NVT) {
1342  if (Node->hasAnyUseOfValue(0))
1343    return 0;
1344
1345  // Optimize common patterns for __sync_add_and_fetch and
1346  // __sync_sub_and_fetch where the result is not used. This allows us
1347  // to use "lock" version of add, sub, inc, dec instructions.
1348  // FIXME: Do not use special instructions but instead add the "lock"
1349  // prefix to the target node somehow. The extra information will then be
1350  // transferred to machine instruction and it denotes the prefix.
1351  SDValue Chain = Node->getOperand(0);
1352  SDValue Ptr = Node->getOperand(1);
1353  SDValue Val = Node->getOperand(2);
1354  SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
1355  if (!SelectAddr(Node, Ptr, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4))
1356    return 0;
1357
1358  bool isInc = false, isDec = false, isSub = false, isCN = false;
1359  ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Val);
1360  if (CN && CN->getSExtValue() == (int32_t)CN->getSExtValue()) {
1361    isCN = true;
1362    int64_t CNVal = CN->getSExtValue();
1363    if (CNVal == 1)
1364      isInc = true;
1365    else if (CNVal == -1)
1366      isDec = true;
1367    else if (CNVal >= 0)
1368      Val = CurDAG->getTargetConstant(CNVal, NVT);
1369    else {
1370      isSub = true;
1371      Val = CurDAG->getTargetConstant(-CNVal, NVT);
1372    }
1373  } else if (Val.hasOneUse() &&
1374             Val.getOpcode() == ISD::SUB &&
1375             X86::isZeroNode(Val.getOperand(0))) {
1376    isSub = true;
1377    Val = Val.getOperand(1);
1378  }
1379
1380  DebugLoc dl = Node->getDebugLoc();
1381  unsigned Opc = 0;
1382  switch (NVT.getSimpleVT().SimpleTy) {
1383  default: return 0;
1384  case MVT::i8:
1385    if (isInc)
1386      Opc = X86::LOCK_INC8m;
1387    else if (isDec)
1388      Opc = X86::LOCK_DEC8m;
1389    else if (isSub) {
1390      if (isCN)
1391        Opc = X86::LOCK_SUB8mi;
1392      else
1393        Opc = X86::LOCK_SUB8mr;
1394    } else {
1395      if (isCN)
1396        Opc = X86::LOCK_ADD8mi;
1397      else
1398        Opc = X86::LOCK_ADD8mr;
1399    }
1400    break;
1401  case MVT::i16:
1402    if (isInc)
1403      Opc = X86::LOCK_INC16m;
1404    else if (isDec)
1405      Opc = X86::LOCK_DEC16m;
1406    else if (isSub) {
1407      if (isCN) {
1408        if (immSext8(Val.getNode()))
1409          Opc = X86::LOCK_SUB16mi8;
1410        else
1411          Opc = X86::LOCK_SUB16mi;
1412      } else
1413        Opc = X86::LOCK_SUB16mr;
1414    } else {
1415      if (isCN) {
1416        if (immSext8(Val.getNode()))
1417          Opc = X86::LOCK_ADD16mi8;
1418        else
1419          Opc = X86::LOCK_ADD16mi;
1420      } else
1421        Opc = X86::LOCK_ADD16mr;
1422    }
1423    break;
1424  case MVT::i32:
1425    if (isInc)
1426      Opc = X86::LOCK_INC32m;
1427    else if (isDec)
1428      Opc = X86::LOCK_DEC32m;
1429    else if (isSub) {
1430      if (isCN) {
1431        if (immSext8(Val.getNode()))
1432          Opc = X86::LOCK_SUB32mi8;
1433        else
1434          Opc = X86::LOCK_SUB32mi;
1435      } else
1436        Opc = X86::LOCK_SUB32mr;
1437    } else {
1438      if (isCN) {
1439        if (immSext8(Val.getNode()))
1440          Opc = X86::LOCK_ADD32mi8;
1441        else
1442          Opc = X86::LOCK_ADD32mi;
1443      } else
1444        Opc = X86::LOCK_ADD32mr;
1445    }
1446    break;
1447  case MVT::i64:
1448    if (isInc)
1449      Opc = X86::LOCK_INC64m;
1450    else if (isDec)
1451      Opc = X86::LOCK_DEC64m;
1452    else if (isSub) {
1453      Opc = X86::LOCK_SUB64mr;
1454      if (isCN) {
1455        if (immSext8(Val.getNode()))
1456          Opc = X86::LOCK_SUB64mi8;
1457        else if (i64immSExt32(Val.getNode()))
1458          Opc = X86::LOCK_SUB64mi32;
1459      }
1460    } else {
1461      Opc = X86::LOCK_ADD64mr;
1462      if (isCN) {
1463        if (immSext8(Val.getNode()))
1464          Opc = X86::LOCK_ADD64mi8;
1465        else if (i64immSExt32(Val.getNode()))
1466          Opc = X86::LOCK_ADD64mi32;
1467      }
1468    }
1469    break;
1470  }
1471
1472  SDValue Undef = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
1473                                                 dl, NVT), 0);
1474  MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1475  MemOp[0] = cast<MemSDNode>(Node)->getMemOperand();
1476  if (isInc || isDec) {
1477    SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Chain };
1478    SDValue Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 6), 0);
1479    cast<MachineSDNode>(Ret)->setMemRefs(MemOp, MemOp + 1);
1480    SDValue RetVals[] = { Undef, Ret };
1481    return CurDAG->getMergeValues(RetVals, 2, dl).getNode();
1482  } else {
1483    SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Val, Chain };
1484    SDValue Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 7), 0);
1485    cast<MachineSDNode>(Ret)->setMemRefs(MemOp, MemOp + 1);
1486    SDValue RetVals[] = { Undef, Ret };
1487    return CurDAG->getMergeValues(RetVals, 2, dl).getNode();
1488  }
1489}
1490
1491enum AtomicOpc {
1492  OR,
1493  AND,
1494  XOR,
1495  AtomicOpcEnd
1496};
1497
1498enum AtomicSz {
1499  ConstantI8,
1500  I8,
1501  SextConstantI16,
1502  ConstantI16,
1503  I16,
1504  SextConstantI32,
1505  ConstantI32,
1506  I32,
1507  SextConstantI64,
1508  ConstantI64,
1509  I64,
1510  AtomicSzEnd
1511};
1512
1513static const unsigned int AtomicOpcTbl[AtomicOpcEnd][AtomicSzEnd] = {
1514  {
1515    X86::LOCK_OR8mi,
1516    X86::LOCK_OR8mr,
1517    X86::LOCK_OR16mi8,
1518    X86::LOCK_OR16mi,
1519    X86::LOCK_OR16mr,
1520    X86::LOCK_OR32mi8,
1521    X86::LOCK_OR32mi,
1522    X86::LOCK_OR32mr,
1523    X86::LOCK_OR64mi8,
1524    X86::LOCK_OR64mi32,
1525    X86::LOCK_OR64mr
1526  },
1527  {
1528    X86::LOCK_AND8mi,
1529    X86::LOCK_AND8mr,
1530    X86::LOCK_AND16mi8,
1531    X86::LOCK_AND16mi,
1532    X86::LOCK_AND16mr,
1533    X86::LOCK_AND32mi8,
1534    X86::LOCK_AND32mi,
1535    X86::LOCK_AND32mr,
1536    X86::LOCK_AND64mi8,
1537    X86::LOCK_AND64mi32,
1538    X86::LOCK_AND64mr
1539  },
1540  {
1541    X86::LOCK_XOR8mi,
1542    X86::LOCK_XOR8mr,
1543    X86::LOCK_XOR16mi8,
1544    X86::LOCK_XOR16mi,
1545    X86::LOCK_XOR16mr,
1546    X86::LOCK_XOR32mi8,
1547    X86::LOCK_XOR32mi,
1548    X86::LOCK_XOR32mr,
1549    X86::LOCK_XOR64mi8,
1550    X86::LOCK_XOR64mi32,
1551    X86::LOCK_XOR64mr
1552  }
1553};
1554
1555SDNode *X86DAGToDAGISel::SelectAtomicLoadArith(SDNode *Node, EVT NVT) {
1556  if (Node->hasAnyUseOfValue(0))
1557    return 0;
1558
1559  // Optimize common patterns for __sync_or_and_fetch and similar arith
1560  // operations where the result is not used. This allows us to use the "lock"
1561  // version of the arithmetic instruction.
1562  // FIXME: Same as for 'add' and 'sub', try to merge those down here.
1563  SDValue Chain = Node->getOperand(0);
1564  SDValue Ptr = Node->getOperand(1);
1565  SDValue Val = Node->getOperand(2);
1566  SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
1567  if (!SelectAddr(Node, Ptr, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4))
1568    return 0;
1569
1570  // Which index into the table.
1571  enum AtomicOpc Op;
1572  switch (Node->getOpcode()) {
1573    case ISD::ATOMIC_LOAD_OR:
1574      Op = OR;
1575      break;
1576    case ISD::ATOMIC_LOAD_AND:
1577      Op = AND;
1578      break;
1579    case ISD::ATOMIC_LOAD_XOR:
1580      Op = XOR;
1581      break;
1582    default:
1583      return 0;
1584  }
1585
1586  bool isCN = false;
1587  ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Val);
1588  if (CN && (int32_t)CN->getSExtValue() == CN->getSExtValue()) {
1589    isCN = true;
1590    Val = CurDAG->getTargetConstant(CN->getSExtValue(), NVT);
1591  }
1592
1593  unsigned Opc = 0;
1594  switch (NVT.getSimpleVT().SimpleTy) {
1595    default: return 0;
1596    case MVT::i8:
1597      if (isCN)
1598        Opc = AtomicOpcTbl[Op][ConstantI8];
1599      else
1600        Opc = AtomicOpcTbl[Op][I8];
1601      break;
1602    case MVT::i16:
1603      if (isCN) {
1604        if (immSext8(Val.getNode()))
1605          Opc = AtomicOpcTbl[Op][SextConstantI16];
1606        else
1607          Opc = AtomicOpcTbl[Op][ConstantI16];
1608      } else
1609        Opc = AtomicOpcTbl[Op][I16];
1610      break;
1611    case MVT::i32:
1612      if (isCN) {
1613        if (immSext8(Val.getNode()))
1614          Opc = AtomicOpcTbl[Op][SextConstantI32];
1615        else
1616          Opc = AtomicOpcTbl[Op][ConstantI32];
1617      } else
1618        Opc = AtomicOpcTbl[Op][I32];
1619      break;
1620    case MVT::i64:
1621      Opc = AtomicOpcTbl[Op][I64];
1622      if (isCN) {
1623        if (immSext8(Val.getNode()))
1624          Opc = AtomicOpcTbl[Op][SextConstantI64];
1625        else if (i64immSExt32(Val.getNode()))
1626          Opc = AtomicOpcTbl[Op][ConstantI64];
1627      }
1628      break;
1629  }
1630
1631  assert(Opc != 0 && "Invalid arith lock transform!");
1632
1633  DebugLoc dl = Node->getDebugLoc();
1634  SDValue Undef = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
1635                                                 dl, NVT), 0);
1636  MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1637  MemOp[0] = cast<MemSDNode>(Node)->getMemOperand();
1638  SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Val, Chain };
1639  SDValue Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 7), 0);
1640  cast<MachineSDNode>(Ret)->setMemRefs(MemOp, MemOp + 1);
1641  SDValue RetVals[] = { Undef, Ret };
1642  return CurDAG->getMergeValues(RetVals, 2, dl).getNode();
1643}
1644
1645/// HasNoSignedComparisonUses - Test whether the given X86ISD::CMP node has
1646/// any uses which require the SF or OF bits to be accurate.
1647static bool HasNoSignedComparisonUses(SDNode *N) {
1648  // Examine each user of the node.
1649  for (SDNode::use_iterator UI = N->use_begin(),
1650         UE = N->use_end(); UI != UE; ++UI) {
1651    // Only examine CopyToReg uses.
1652    if (UI->getOpcode() != ISD::CopyToReg)
1653      return false;
1654    // Only examine CopyToReg uses that copy to EFLAGS.
1655    if (cast<RegisterSDNode>(UI->getOperand(1))->getReg() !=
1656          X86::EFLAGS)
1657      return false;
1658    // Examine each user of the CopyToReg use.
1659    for (SDNode::use_iterator FlagUI = UI->use_begin(),
1660           FlagUE = UI->use_end(); FlagUI != FlagUE; ++FlagUI) {
1661      // Only examine the Flag result.
1662      if (FlagUI.getUse().getResNo() != 1) continue;
1663      // Anything unusual: assume conservatively.
1664      if (!FlagUI->isMachineOpcode()) return false;
1665      // Examine the opcode of the user.
1666      switch (FlagUI->getMachineOpcode()) {
1667      // These comparisons don't treat the most significant bit specially.
1668      case X86::SETAr: case X86::SETAEr: case X86::SETBr: case X86::SETBEr:
1669      case X86::SETEr: case X86::SETNEr: case X86::SETPr: case X86::SETNPr:
1670      case X86::SETAm: case X86::SETAEm: case X86::SETBm: case X86::SETBEm:
1671      case X86::SETEm: case X86::SETNEm: case X86::SETPm: case X86::SETNPm:
1672      case X86::JA_4: case X86::JAE_4: case X86::JB_4: case X86::JBE_4:
1673      case X86::JE_4: case X86::JNE_4: case X86::JP_4: case X86::JNP_4:
1674      case X86::CMOVA16rr: case X86::CMOVA16rm:
1675      case X86::CMOVA32rr: case X86::CMOVA32rm:
1676      case X86::CMOVA64rr: case X86::CMOVA64rm:
1677      case X86::CMOVAE16rr: case X86::CMOVAE16rm:
1678      case X86::CMOVAE32rr: case X86::CMOVAE32rm:
1679      case X86::CMOVAE64rr: case X86::CMOVAE64rm:
1680      case X86::CMOVB16rr: case X86::CMOVB16rm:
1681      case X86::CMOVB32rr: case X86::CMOVB32rm:
1682      case X86::CMOVB64rr: case X86::CMOVB64rm:
1683      case X86::CMOVBE16rr: case X86::CMOVBE16rm:
1684      case X86::CMOVBE32rr: case X86::CMOVBE32rm:
1685      case X86::CMOVBE64rr: case X86::CMOVBE64rm:
1686      case X86::CMOVE16rr: case X86::CMOVE16rm:
1687      case X86::CMOVE32rr: case X86::CMOVE32rm:
1688      case X86::CMOVE64rr: case X86::CMOVE64rm:
1689      case X86::CMOVNE16rr: case X86::CMOVNE16rm:
1690      case X86::CMOVNE32rr: case X86::CMOVNE32rm:
1691      case X86::CMOVNE64rr: case X86::CMOVNE64rm:
1692      case X86::CMOVNP16rr: case X86::CMOVNP16rm:
1693      case X86::CMOVNP32rr: case X86::CMOVNP32rm:
1694      case X86::CMOVNP64rr: case X86::CMOVNP64rm:
1695      case X86::CMOVP16rr: case X86::CMOVP16rm:
1696      case X86::CMOVP32rr: case X86::CMOVP32rm:
1697      case X86::CMOVP64rr: case X86::CMOVP64rm:
1698        continue;
1699      // Anything else: assume conservatively.
1700      default: return false;
1701      }
1702    }
1703  }
1704  return true;
1705}
1706
1707SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
1708  EVT NVT = Node->getValueType(0);
1709  unsigned Opc, MOpc;
1710  unsigned Opcode = Node->getOpcode();
1711  DebugLoc dl = Node->getDebugLoc();
1712
1713  DEBUG(dbgs() << "Selecting: "; Node->dump(CurDAG); dbgs() << '\n');
1714
1715  if (Node->isMachineOpcode()) {
1716    DEBUG(dbgs() << "== ";  Node->dump(CurDAG); dbgs() << '\n');
1717    return NULL;   // Already selected.
1718  }
1719
1720  switch (Opcode) {
1721  default: break;
1722  case X86ISD::GlobalBaseReg:
1723    return getGlobalBaseReg();
1724
1725  case X86ISD::ATOMOR64_DAG:
1726    return SelectAtomic64(Node, X86::ATOMOR6432);
1727  case X86ISD::ATOMXOR64_DAG:
1728    return SelectAtomic64(Node, X86::ATOMXOR6432);
1729  case X86ISD::ATOMADD64_DAG:
1730    return SelectAtomic64(Node, X86::ATOMADD6432);
1731  case X86ISD::ATOMSUB64_DAG:
1732    return SelectAtomic64(Node, X86::ATOMSUB6432);
1733  case X86ISD::ATOMNAND64_DAG:
1734    return SelectAtomic64(Node, X86::ATOMNAND6432);
1735  case X86ISD::ATOMAND64_DAG:
1736    return SelectAtomic64(Node, X86::ATOMAND6432);
1737  case X86ISD::ATOMSWAP64_DAG:
1738    return SelectAtomic64(Node, X86::ATOMSWAP6432);
1739
1740  case ISD::ATOMIC_LOAD_ADD: {
1741    SDNode *RetVal = SelectAtomicLoadAdd(Node, NVT);
1742    if (RetVal)
1743      return RetVal;
1744    break;
1745  }
1746  case ISD::ATOMIC_LOAD_XOR:
1747  case ISD::ATOMIC_LOAD_AND:
1748  case ISD::ATOMIC_LOAD_OR: {
1749    SDNode *RetVal = SelectAtomicLoadArith(Node, NVT);
1750    if (RetVal)
1751      return RetVal;
1752    break;
1753  }
1754  case ISD::AND:
1755  case ISD::OR:
1756  case ISD::XOR: {
1757    // For operations of the form (x << C1) op C2, check if we can use a smaller
1758    // encoding for C2 by transforming it into (x op (C2>>C1)) << C1.
1759    SDValue N0 = Node->getOperand(0);
1760    SDValue N1 = Node->getOperand(1);
1761
1762    if (N0->getOpcode() != ISD::SHL || !N0->hasOneUse())
1763      break;
1764
1765    // i8 is unshrinkable, i16 should be promoted to i32.
1766    if (NVT != MVT::i32 && NVT != MVT::i64)
1767      break;
1768
1769    ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N1);
1770    ConstantSDNode *ShlCst = dyn_cast<ConstantSDNode>(N0->getOperand(1));
1771    if (!Cst || !ShlCst)
1772      break;
1773
1774    int64_t Val = Cst->getSExtValue();
1775    uint64_t ShlVal = ShlCst->getZExtValue();
1776
1777    // Make sure that we don't change the operation by removing bits.
1778    // This only matters for OR and XOR, AND is unaffected.
1779    if (Opcode != ISD::AND && ((Val >> ShlVal) << ShlVal) != Val)
1780      break;
1781
1782    unsigned ShlOp, Op = 0;
1783    EVT CstVT = NVT;
1784
1785    // Check the minimum bitwidth for the new constant.
1786    // TODO: AND32ri is the same as AND64ri32 with zext imm.
1787    // TODO: MOV32ri+OR64r is cheaper than MOV64ri64+OR64rr
1788    // TODO: Using 16 and 8 bit operations is also possible for or32 & xor32.
1789    if (!isInt<8>(Val) && isInt<8>(Val >> ShlVal))
1790      CstVT = MVT::i8;
1791    else if (!isInt<32>(Val) && isInt<32>(Val >> ShlVal))
1792      CstVT = MVT::i32;
1793
1794    // Bail if there is no smaller encoding.
1795    if (NVT == CstVT)
1796      break;
1797
1798    switch (NVT.getSimpleVT().SimpleTy) {
1799    default: llvm_unreachable("Unsupported VT!");
1800    case MVT::i32:
1801      assert(CstVT == MVT::i8);
1802      ShlOp = X86::SHL32ri;
1803
1804      switch (Opcode) {
1805      case ISD::AND: Op = X86::AND32ri8; break;
1806      case ISD::OR:  Op =  X86::OR32ri8; break;
1807      case ISD::XOR: Op = X86::XOR32ri8; break;
1808      }
1809      break;
1810    case MVT::i64:
1811      assert(CstVT == MVT::i8 || CstVT == MVT::i32);
1812      ShlOp = X86::SHL64ri;
1813
1814      switch (Opcode) {
1815      case ISD::AND: Op = CstVT==MVT::i8? X86::AND64ri8 : X86::AND64ri32; break;
1816      case ISD::OR:  Op = CstVT==MVT::i8?  X86::OR64ri8 :  X86::OR64ri32; break;
1817      case ISD::XOR: Op = CstVT==MVT::i8? X86::XOR64ri8 : X86::XOR64ri32; break;
1818      }
1819      break;
1820    }
1821
1822    // Emit the smaller op and the shift.
1823    SDValue NewCst = CurDAG->getTargetConstant(Val >> ShlVal, CstVT);
1824    SDNode *New = CurDAG->getMachineNode(Op, dl, NVT, N0->getOperand(0),NewCst);
1825    return CurDAG->SelectNodeTo(Node, ShlOp, NVT, SDValue(New, 0),
1826                                getI8Imm(ShlVal));
1827    break;
1828  }
1829  case X86ISD::UMUL: {
1830    SDValue N0 = Node->getOperand(0);
1831    SDValue N1 = Node->getOperand(1);
1832
1833    unsigned LoReg;
1834    switch (NVT.getSimpleVT().SimpleTy) {
1835    default: llvm_unreachable("Unsupported VT!");
1836    case MVT::i8:  LoReg = X86::AL;  Opc = X86::MUL8r; break;
1837    case MVT::i16: LoReg = X86::AX;  Opc = X86::MUL16r; break;
1838    case MVT::i32: LoReg = X86::EAX; Opc = X86::MUL32r; break;
1839    case MVT::i64: LoReg = X86::RAX; Opc = X86::MUL64r; break;
1840    }
1841
1842    SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg,
1843                                          N0, SDValue()).getValue(1);
1844
1845    SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::i32);
1846    SDValue Ops[] = {N1, InFlag};
1847    SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops, 2);
1848
1849    ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0));
1850    ReplaceUses(SDValue(Node, 1), SDValue(CNode, 1));
1851    ReplaceUses(SDValue(Node, 2), SDValue(CNode, 2));
1852    return NULL;
1853  }
1854
1855  case ISD::SMUL_LOHI:
1856  case ISD::UMUL_LOHI: {
1857    SDValue N0 = Node->getOperand(0);
1858    SDValue N1 = Node->getOperand(1);
1859
1860    bool isSigned = Opcode == ISD::SMUL_LOHI;
1861    if (!isSigned) {
1862      switch (NVT.getSimpleVT().SimpleTy) {
1863      default: llvm_unreachable("Unsupported VT!");
1864      case MVT::i8:  Opc = X86::MUL8r;  MOpc = X86::MUL8m;  break;
1865      case MVT::i16: Opc = X86::MUL16r; MOpc = X86::MUL16m; break;
1866      case MVT::i32: Opc = X86::MUL32r; MOpc = X86::MUL32m; break;
1867      case MVT::i64: Opc = X86::MUL64r; MOpc = X86::MUL64m; break;
1868      }
1869    } else {
1870      switch (NVT.getSimpleVT().SimpleTy) {
1871      default: llvm_unreachable("Unsupported VT!");
1872      case MVT::i8:  Opc = X86::IMUL8r;  MOpc = X86::IMUL8m;  break;
1873      case MVT::i16: Opc = X86::IMUL16r; MOpc = X86::IMUL16m; break;
1874      case MVT::i32: Opc = X86::IMUL32r; MOpc = X86::IMUL32m; break;
1875      case MVT::i64: Opc = X86::IMUL64r; MOpc = X86::IMUL64m; break;
1876      }
1877    }
1878
1879    unsigned LoReg, HiReg;
1880    switch (NVT.getSimpleVT().SimpleTy) {
1881    default: llvm_unreachable("Unsupported VT!");
1882    case MVT::i8:  LoReg = X86::AL;  HiReg = X86::AH;  break;
1883    case MVT::i16: LoReg = X86::AX;  HiReg = X86::DX;  break;
1884    case MVT::i32: LoReg = X86::EAX; HiReg = X86::EDX; break;
1885    case MVT::i64: LoReg = X86::RAX; HiReg = X86::RDX; break;
1886    }
1887
1888    SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
1889    bool foldedLoad = TryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
1890    // Multiply is commmutative.
1891    if (!foldedLoad) {
1892      foldedLoad = TryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
1893      if (foldedLoad)
1894        std::swap(N0, N1);
1895    }
1896
1897    SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg,
1898                                            N0, SDValue()).getValue(1);
1899
1900    if (foldedLoad) {
1901      SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
1902                        InFlag };
1903      SDNode *CNode =
1904        CurDAG->getMachineNode(MOpc, dl, MVT::Other, MVT::Glue, Ops,
1905                               array_lengthof(Ops));
1906      InFlag = SDValue(CNode, 1);
1907
1908      // Update the chain.
1909      ReplaceUses(N1.getValue(1), SDValue(CNode, 0));
1910    } else {
1911      SDNode *CNode = CurDAG->getMachineNode(Opc, dl, MVT::Glue, N1, InFlag);
1912      InFlag = SDValue(CNode, 0);
1913    }
1914
1915    // Prevent use of AH in a REX instruction by referencing AX instead.
1916    if (HiReg == X86::AH && Subtarget->is64Bit() &&
1917        !SDValue(Node, 1).use_empty()) {
1918      SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
1919                                              X86::AX, MVT::i16, InFlag);
1920      InFlag = Result.getValue(2);
1921      // Get the low part if needed. Don't use getCopyFromReg for aliasing
1922      // registers.
1923      if (!SDValue(Node, 0).use_empty())
1924        ReplaceUses(SDValue(Node, 1),
1925          CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result));
1926
1927      // Shift AX down 8 bits.
1928      Result = SDValue(CurDAG->getMachineNode(X86::SHR16ri, dl, MVT::i16,
1929                                              Result,
1930                                     CurDAG->getTargetConstant(8, MVT::i8)), 0);
1931      // Then truncate it down to i8.
1932      ReplaceUses(SDValue(Node, 1),
1933        CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result));
1934    }
1935    // Copy the low half of the result, if it is needed.
1936    if (!SDValue(Node, 0).use_empty()) {
1937      SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
1938                                                LoReg, NVT, InFlag);
1939      InFlag = Result.getValue(2);
1940      ReplaceUses(SDValue(Node, 0), Result);
1941      DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
1942    }
1943    // Copy the high half of the result, if it is needed.
1944    if (!SDValue(Node, 1).use_empty()) {
1945      SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
1946                                              HiReg, NVT, InFlag);
1947      InFlag = Result.getValue(2);
1948      ReplaceUses(SDValue(Node, 1), Result);
1949      DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
1950    }
1951
1952    return NULL;
1953  }
1954
1955  case ISD::SDIVREM:
1956  case ISD::UDIVREM: {
1957    SDValue N0 = Node->getOperand(0);
1958    SDValue N1 = Node->getOperand(1);
1959
1960    bool isSigned = Opcode == ISD::SDIVREM;
1961    if (!isSigned) {
1962      switch (NVT.getSimpleVT().SimpleTy) {
1963      default: llvm_unreachable("Unsupported VT!");
1964      case MVT::i8:  Opc = X86::DIV8r;  MOpc = X86::DIV8m;  break;
1965      case MVT::i16: Opc = X86::DIV16r; MOpc = X86::DIV16m; break;
1966      case MVT::i32: Opc = X86::DIV32r; MOpc = X86::DIV32m; break;
1967      case MVT::i64: Opc = X86::DIV64r; MOpc = X86::DIV64m; break;
1968      }
1969    } else {
1970      switch (NVT.getSimpleVT().SimpleTy) {
1971      default: llvm_unreachable("Unsupported VT!");
1972      case MVT::i8:  Opc = X86::IDIV8r;  MOpc = X86::IDIV8m;  break;
1973      case MVT::i16: Opc = X86::IDIV16r; MOpc = X86::IDIV16m; break;
1974      case MVT::i32: Opc = X86::IDIV32r; MOpc = X86::IDIV32m; break;
1975      case MVT::i64: Opc = X86::IDIV64r; MOpc = X86::IDIV64m; break;
1976      }
1977    }
1978
1979    unsigned LoReg, HiReg, ClrReg;
1980    unsigned ClrOpcode, SExtOpcode;
1981    switch (NVT.getSimpleVT().SimpleTy) {
1982    default: llvm_unreachable("Unsupported VT!");
1983    case MVT::i8:
1984      LoReg = X86::AL;  ClrReg = HiReg = X86::AH;
1985      ClrOpcode  = 0;
1986      SExtOpcode = X86::CBW;
1987      break;
1988    case MVT::i16:
1989      LoReg = X86::AX;  HiReg = X86::DX;
1990      ClrOpcode  = X86::MOV16r0; ClrReg = X86::DX;
1991      SExtOpcode = X86::CWD;
1992      break;
1993    case MVT::i32:
1994      LoReg = X86::EAX; ClrReg = HiReg = X86::EDX;
1995      ClrOpcode  = X86::MOV32r0;
1996      SExtOpcode = X86::CDQ;
1997      break;
1998    case MVT::i64:
1999      LoReg = X86::RAX; ClrReg = HiReg = X86::RDX;
2000      ClrOpcode  = X86::MOV64r0;
2001      SExtOpcode = X86::CQO;
2002      break;
2003    }
2004
2005    SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
2006    bool foldedLoad = TryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
2007    bool signBitIsZero = CurDAG->SignBitIsZero(N0);
2008
2009    SDValue InFlag;
2010    if (NVT == MVT::i8 && (!isSigned || signBitIsZero)) {
2011      // Special case for div8, just use a move with zero extension to AX to
2012      // clear the upper 8 bits (AH).
2013      SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Move, Chain;
2014      if (TryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
2015        SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N0.getOperand(0) };
2016        Move =
2017          SDValue(CurDAG->getMachineNode(X86::MOVZX32rm8, dl, MVT::i32,
2018                                         MVT::Other, Ops,
2019                                         array_lengthof(Ops)), 0);
2020        Chain = Move.getValue(1);
2021        ReplaceUses(N0.getValue(1), Chain);
2022      } else {
2023        Move =
2024          SDValue(CurDAG->getMachineNode(X86::MOVZX32rr8, dl, MVT::i32, N0),0);
2025        Chain = CurDAG->getEntryNode();
2026      }
2027      Chain  = CurDAG->getCopyToReg(Chain, dl, X86::EAX, Move, SDValue());
2028      InFlag = Chain.getValue(1);
2029    } else {
2030      InFlag =
2031        CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl,
2032                             LoReg, N0, SDValue()).getValue(1);
2033      if (isSigned && !signBitIsZero) {
2034        // Sign extend the low part into the high part.
2035        InFlag =
2036          SDValue(CurDAG->getMachineNode(SExtOpcode, dl, MVT::Glue, InFlag),0);
2037      } else {
2038        // Zero out the high part, effectively zero extending the input.
2039        SDValue ClrNode =
2040          SDValue(CurDAG->getMachineNode(ClrOpcode, dl, NVT), 0);
2041        InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, ClrReg,
2042                                      ClrNode, InFlag).getValue(1);
2043      }
2044    }
2045
2046    if (foldedLoad) {
2047      SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
2048                        InFlag };
2049      SDNode *CNode =
2050        CurDAG->getMachineNode(MOpc, dl, MVT::Other, MVT::Glue, Ops,
2051                               array_lengthof(Ops));
2052      InFlag = SDValue(CNode, 1);
2053      // Update the chain.
2054      ReplaceUses(N1.getValue(1), SDValue(CNode, 0));
2055    } else {
2056      InFlag =
2057        SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Glue, N1, InFlag), 0);
2058    }
2059
2060    // Prevent use of AH in a REX instruction by referencing AX instead.
2061    // Shift it down 8 bits.
2062    if (HiReg == X86::AH && Subtarget->is64Bit() &&
2063        !SDValue(Node, 1).use_empty()) {
2064      SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2065                                              X86::AX, MVT::i16, InFlag);
2066      InFlag = Result.getValue(2);
2067
2068      // If we also need AL (the quotient), get it by extracting a subreg from
2069      // Result. The fast register allocator does not like multiple CopyFromReg
2070      // nodes using aliasing registers.
2071      if (!SDValue(Node, 0).use_empty())
2072        ReplaceUses(SDValue(Node, 0),
2073          CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result));
2074
2075      // Shift AX right by 8 bits instead of using AH.
2076      Result = SDValue(CurDAG->getMachineNode(X86::SHR16ri, dl, MVT::i16,
2077                                         Result,
2078                                         CurDAG->getTargetConstant(8, MVT::i8)),
2079                       0);
2080      ReplaceUses(SDValue(Node, 1),
2081        CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result));
2082    }
2083    // Copy the division (low) result, if it is needed.
2084    if (!SDValue(Node, 0).use_empty()) {
2085      SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2086                                                LoReg, NVT, InFlag);
2087      InFlag = Result.getValue(2);
2088      ReplaceUses(SDValue(Node, 0), Result);
2089      DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
2090    }
2091    // Copy the remainder (high) result, if it is needed.
2092    if (!SDValue(Node, 1).use_empty()) {
2093      SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2094                                              HiReg, NVT, InFlag);
2095      InFlag = Result.getValue(2);
2096      ReplaceUses(SDValue(Node, 1), Result);
2097      DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
2098    }
2099    return NULL;
2100  }
2101
2102  case X86ISD::CMP: {
2103    SDValue N0 = Node->getOperand(0);
2104    SDValue N1 = Node->getOperand(1);
2105
2106    // Look for (X86cmp (and $op, $imm), 0) and see if we can convert it to
2107    // use a smaller encoding.
2108    if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse() &&
2109        HasNoSignedComparisonUses(Node))
2110      // Look past the truncate if CMP is the only use of it.
2111      N0 = N0.getOperand(0);
2112    if (N0.getNode()->getOpcode() == ISD::AND && N0.getNode()->hasOneUse() &&
2113        N0.getValueType() != MVT::i8 &&
2114        X86::isZeroNode(N1)) {
2115      ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getNode()->getOperand(1));
2116      if (!C) break;
2117
2118      // For example, convert "testl %eax, $8" to "testb %al, $8"
2119      if ((C->getZExtValue() & ~UINT64_C(0xff)) == 0 &&
2120          (!(C->getZExtValue() & 0x80) ||
2121           HasNoSignedComparisonUses(Node))) {
2122        SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i8);
2123        SDValue Reg = N0.getNode()->getOperand(0);
2124
2125        // On x86-32, only the ABCD registers have 8-bit subregisters.
2126        if (!Subtarget->is64Bit()) {
2127          TargetRegisterClass *TRC = 0;
2128          switch (N0.getValueType().getSimpleVT().SimpleTy) {
2129          case MVT::i32: TRC = &X86::GR32_ABCDRegClass; break;
2130          case MVT::i16: TRC = &X86::GR16_ABCDRegClass; break;
2131          default: llvm_unreachable("Unsupported TEST operand type!");
2132          }
2133          SDValue RC = CurDAG->getTargetConstant(TRC->getID(), MVT::i32);
2134          Reg = SDValue(CurDAG->getMachineNode(X86::COPY_TO_REGCLASS, dl,
2135                                               Reg.getValueType(), Reg, RC), 0);
2136        }
2137
2138        // Extract the l-register.
2139        SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl,
2140                                                        MVT::i8, Reg);
2141
2142        // Emit a testb.
2143        return CurDAG->getMachineNode(X86::TEST8ri, dl, MVT::i32, Subreg, Imm);
2144      }
2145
2146      // For example, "testl %eax, $2048" to "testb %ah, $8".
2147      if ((C->getZExtValue() & ~UINT64_C(0xff00)) == 0 &&
2148          (!(C->getZExtValue() & 0x8000) ||
2149           HasNoSignedComparisonUses(Node))) {
2150        // Shift the immediate right by 8 bits.
2151        SDValue ShiftedImm = CurDAG->getTargetConstant(C->getZExtValue() >> 8,
2152                                                       MVT::i8);
2153        SDValue Reg = N0.getNode()->getOperand(0);
2154
2155        // Put the value in an ABCD register.
2156        TargetRegisterClass *TRC = 0;
2157        switch (N0.getValueType().getSimpleVT().SimpleTy) {
2158        case MVT::i64: TRC = &X86::GR64_ABCDRegClass; break;
2159        case MVT::i32: TRC = &X86::GR32_ABCDRegClass; break;
2160        case MVT::i16: TRC = &X86::GR16_ABCDRegClass; break;
2161        default: llvm_unreachable("Unsupported TEST operand type!");
2162        }
2163        SDValue RC = CurDAG->getTargetConstant(TRC->getID(), MVT::i32);
2164        Reg = SDValue(CurDAG->getMachineNode(X86::COPY_TO_REGCLASS, dl,
2165                                             Reg.getValueType(), Reg, RC), 0);
2166
2167        // Extract the h-register.
2168        SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_8bit_hi, dl,
2169                                                        MVT::i8, Reg);
2170
2171        // Emit a testb. No special NOREX tricks are needed since there's
2172        // only one GPR operand!
2173        return CurDAG->getMachineNode(X86::TEST8ri, dl, MVT::i32,
2174                                      Subreg, ShiftedImm);
2175      }
2176
2177      // For example, "testl %eax, $32776" to "testw %ax, $32776".
2178      if ((C->getZExtValue() & ~UINT64_C(0xffff)) == 0 &&
2179          N0.getValueType() != MVT::i16 &&
2180          (!(C->getZExtValue() & 0x8000) ||
2181           HasNoSignedComparisonUses(Node))) {
2182        SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i16);
2183        SDValue Reg = N0.getNode()->getOperand(0);
2184
2185        // Extract the 16-bit subregister.
2186        SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_16bit, dl,
2187                                                        MVT::i16, Reg);
2188
2189        // Emit a testw.
2190        return CurDAG->getMachineNode(X86::TEST16ri, dl, MVT::i32, Subreg, Imm);
2191      }
2192
2193      // For example, "testq %rax, $268468232" to "testl %eax, $268468232".
2194      if ((C->getZExtValue() & ~UINT64_C(0xffffffff)) == 0 &&
2195          N0.getValueType() == MVT::i64 &&
2196          (!(C->getZExtValue() & 0x80000000) ||
2197           HasNoSignedComparisonUses(Node))) {
2198        SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
2199        SDValue Reg = N0.getNode()->getOperand(0);
2200
2201        // Extract the 32-bit subregister.
2202        SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_32bit, dl,
2203                                                        MVT::i32, Reg);
2204
2205        // Emit a testl.
2206        return CurDAG->getMachineNode(X86::TEST32ri, dl, MVT::i32, Subreg, Imm);
2207      }
2208    }
2209    break;
2210  }
2211  }
2212
2213  SDNode *ResNode = SelectCode(Node);
2214
2215  DEBUG(dbgs() << "=> ";
2216        if (ResNode == NULL || ResNode == Node)
2217          Node->dump(CurDAG);
2218        else
2219          ResNode->dump(CurDAG);
2220        dbgs() << '\n');
2221
2222  return ResNode;
2223}
2224
2225bool X86DAGToDAGISel::
2226SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
2227                             std::vector<SDValue> &OutOps) {
2228  SDValue Op0, Op1, Op2, Op3, Op4;
2229  switch (ConstraintCode) {
2230  case 'o':   // offsetable        ??
2231  case 'v':   // not offsetable    ??
2232  default: return true;
2233  case 'm':   // memory
2234    if (!SelectAddr(0, Op, Op0, Op1, Op2, Op3, Op4))
2235      return true;
2236    break;
2237  }
2238
2239  OutOps.push_back(Op0);
2240  OutOps.push_back(Op1);
2241  OutOps.push_back(Op2);
2242  OutOps.push_back(Op3);
2243  OutOps.push_back(Op4);
2244  return false;
2245}
2246
2247/// createX86ISelDag - This pass converts a legalized DAG into a
2248/// X86-specific DAG, ready for instruction scheduling.
2249///
2250FunctionPass *llvm::createX86ISelDag(X86TargetMachine &TM,
2251                                     llvm::CodeGenOpt::Level OptLevel) {
2252  return new X86DAGToDAGISel(TM, OptLevel);
2253}
2254