X86ISelLowering.cpp revision 112dedc520c1aec387a6fef1c8f512a7d27f0570
1//===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file was developed by Chris Lattner and is distributed under
6// the University of Illinois Open Source License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines the interfaces that X86 uses to lower LLVM code into a
11// selection DAG.
12//
13//===----------------------------------------------------------------------===//
14
15#include "X86.h"
16#include "X86InstrBuilder.h"
17#include "X86ISelLowering.h"
18#include "X86MachineFunctionInfo.h"
19#include "X86TargetMachine.h"
20#include "llvm/CallingConv.h"
21#include "llvm/Constants.h"
22#include "llvm/DerivedTypes.h"
23#include "llvm/GlobalVariable.h"
24#include "llvm/Function.h"
25#include "llvm/Intrinsics.h"
26#include "llvm/ADT/BitVector.h"
27#include "llvm/ADT/VectorExtras.h"
28#include "llvm/Analysis/ScalarEvolutionExpressions.h"
29#include "llvm/CodeGen/CallingConvLower.h"
30#include "llvm/CodeGen/MachineFrameInfo.h"
31#include "llvm/CodeGen/MachineFunction.h"
32#include "llvm/CodeGen/MachineInstrBuilder.h"
33#include "llvm/CodeGen/SelectionDAG.h"
34#include "llvm/CodeGen/SSARegMap.h"
35#include "llvm/Support/MathExtras.h"
36#include "llvm/Support/Debug.h"
37#include "llvm/Target/TargetOptions.h"
38#include "llvm/ADT/SmallSet.h"
39#include "llvm/ADT/StringExtras.h"
40#include "llvm/ParameterAttributes.h"
41using namespace llvm;
42
43X86TargetLowering::X86TargetLowering(TargetMachine &TM)
44  : TargetLowering(TM) {
45  Subtarget = &TM.getSubtarget<X86Subtarget>();
46  X86ScalarSSEf64 = Subtarget->hasSSE2();
47  X86ScalarSSEf32 = Subtarget->hasSSE1();
48  X86StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP;
49
50
51  RegInfo = TM.getRegisterInfo();
52
53  // Set up the TargetLowering object.
54
55  // X86 is weird, it always uses i8 for shift amounts and setcc results.
56  setShiftAmountType(MVT::i8);
57  setSetCCResultType(MVT::i8);
58  setSetCCResultContents(ZeroOrOneSetCCResult);
59  setSchedulingPreference(SchedulingForRegPressure);
60  setShiftAmountFlavor(Mask);   // shl X, 32 == shl X, 0
61  setStackPointerRegisterToSaveRestore(X86StackPtr);
62
63  if (Subtarget->isTargetDarwin()) {
64    // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp.
65    setUseUnderscoreSetJmp(false);
66    setUseUnderscoreLongJmp(false);
67  } else if (Subtarget->isTargetMingw()) {
68    // MS runtime is weird: it exports _setjmp, but longjmp!
69    setUseUnderscoreSetJmp(true);
70    setUseUnderscoreLongJmp(false);
71  } else {
72    setUseUnderscoreSetJmp(true);
73    setUseUnderscoreLongJmp(true);
74  }
75
76  // Set up the register classes.
77  addRegisterClass(MVT::i8, X86::GR8RegisterClass);
78  addRegisterClass(MVT::i16, X86::GR16RegisterClass);
79  addRegisterClass(MVT::i32, X86::GR32RegisterClass);
80  if (Subtarget->is64Bit())
81    addRegisterClass(MVT::i64, X86::GR64RegisterClass);
82
83  setLoadXAction(ISD::SEXTLOAD, MVT::i1, Expand);
84
85  // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
86  // operation.
87  setOperationAction(ISD::UINT_TO_FP       , MVT::i1   , Promote);
88  setOperationAction(ISD::UINT_TO_FP       , MVT::i8   , Promote);
89  setOperationAction(ISD::UINT_TO_FP       , MVT::i16  , Promote);
90
91  if (Subtarget->is64Bit()) {
92    setOperationAction(ISD::UINT_TO_FP     , MVT::i64  , Expand);
93    setOperationAction(ISD::UINT_TO_FP     , MVT::i32  , Promote);
94  } else {
95    if (X86ScalarSSEf64)
96      // If SSE i64 SINT_TO_FP is not available, expand i32 UINT_TO_FP.
97      setOperationAction(ISD::UINT_TO_FP   , MVT::i32  , Expand);
98    else
99      setOperationAction(ISD::UINT_TO_FP   , MVT::i32  , Promote);
100  }
101
102  // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
103  // this operation.
104  setOperationAction(ISD::SINT_TO_FP       , MVT::i1   , Promote);
105  setOperationAction(ISD::SINT_TO_FP       , MVT::i8   , Promote);
106  // SSE has no i16 to fp conversion, only i32
107  if (X86ScalarSSEf32) {
108    setOperationAction(ISD::SINT_TO_FP     , MVT::i16  , Promote);
109    // f32 and f64 cases are Legal, f80 case is not
110    setOperationAction(ISD::SINT_TO_FP     , MVT::i32  , Custom);
111  } else {
112    setOperationAction(ISD::SINT_TO_FP     , MVT::i16  , Custom);
113    setOperationAction(ISD::SINT_TO_FP     , MVT::i32  , Custom);
114  }
115
116  // In 32-bit mode these are custom lowered.  In 64-bit mode F32 and F64
117  // are Legal, f80 is custom lowered.
118  setOperationAction(ISD::FP_TO_SINT     , MVT::i64  , Custom);
119  setOperationAction(ISD::SINT_TO_FP     , MVT::i64  , Custom);
120
121  // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
122  // this operation.
123  setOperationAction(ISD::FP_TO_SINT       , MVT::i1   , Promote);
124  setOperationAction(ISD::FP_TO_SINT       , MVT::i8   , Promote);
125
126  if (X86ScalarSSEf32) {
127    setOperationAction(ISD::FP_TO_SINT     , MVT::i16  , Promote);
128    // f32 and f64 cases are Legal, f80 case is not
129    setOperationAction(ISD::FP_TO_SINT     , MVT::i32  , Custom);
130  } else {
131    setOperationAction(ISD::FP_TO_SINT     , MVT::i16  , Custom);
132    setOperationAction(ISD::FP_TO_SINT     , MVT::i32  , Custom);
133  }
134
135  // Handle FP_TO_UINT by promoting the destination to a larger signed
136  // conversion.
137  setOperationAction(ISD::FP_TO_UINT       , MVT::i1   , Promote);
138  setOperationAction(ISD::FP_TO_UINT       , MVT::i8   , Promote);
139  setOperationAction(ISD::FP_TO_UINT       , MVT::i16  , Promote);
140
141  if (Subtarget->is64Bit()) {
142    setOperationAction(ISD::FP_TO_UINT     , MVT::i64  , Expand);
143    setOperationAction(ISD::FP_TO_UINT     , MVT::i32  , Promote);
144  } else {
145    if (X86ScalarSSEf32 && !Subtarget->hasSSE3())
146      // Expand FP_TO_UINT into a select.
147      // FIXME: We would like to use a Custom expander here eventually to do
148      // the optimal thing for SSE vs. the default expansion in the legalizer.
149      setOperationAction(ISD::FP_TO_UINT   , MVT::i32  , Expand);
150    else
151      // With SSE3 we can use fisttpll to convert to a signed i64.
152      setOperationAction(ISD::FP_TO_UINT   , MVT::i32  , Promote);
153  }
154
155  // TODO: when we have SSE, these could be more efficient, by using movd/movq.
156  if (!X86ScalarSSEf64) {
157    setOperationAction(ISD::BIT_CONVERT      , MVT::f32  , Expand);
158    setOperationAction(ISD::BIT_CONVERT      , MVT::i32  , Expand);
159  }
160
161  // Scalar integer multiply, multiply-high, divide, and remainder are
162  // lowered to use operations that produce two results, to match the
163  // available instructions. This exposes the two-result form to trivial
164  // CSE, which is able to combine x/y and x%y into a single instruction,
165  // for example. The single-result multiply instructions are introduced
166  // in X86ISelDAGToDAG.cpp, after CSE, for uses where the the high part
167  // is not needed.
168  setOperationAction(ISD::MUL             , MVT::i8    , Expand);
169  setOperationAction(ISD::MULHS           , MVT::i8    , Expand);
170  setOperationAction(ISD::MULHU           , MVT::i8    , Expand);
171  setOperationAction(ISD::SDIV            , MVT::i8    , Expand);
172  setOperationAction(ISD::UDIV            , MVT::i8    , Expand);
173  setOperationAction(ISD::SREM            , MVT::i8    , Expand);
174  setOperationAction(ISD::UREM            , MVT::i8    , Expand);
175  setOperationAction(ISD::MUL             , MVT::i16   , Expand);
176  setOperationAction(ISD::MULHS           , MVT::i16   , Expand);
177  setOperationAction(ISD::MULHU           , MVT::i16   , Expand);
178  setOperationAction(ISD::SDIV            , MVT::i16   , Expand);
179  setOperationAction(ISD::UDIV            , MVT::i16   , Expand);
180  setOperationAction(ISD::SREM            , MVT::i16   , Expand);
181  setOperationAction(ISD::UREM            , MVT::i16   , Expand);
182  setOperationAction(ISD::MUL             , MVT::i32   , Expand);
183  setOperationAction(ISD::MULHS           , MVT::i32   , Expand);
184  setOperationAction(ISD::MULHU           , MVT::i32   , Expand);
185  setOperationAction(ISD::SDIV            , MVT::i32   , Expand);
186  setOperationAction(ISD::UDIV            , MVT::i32   , Expand);
187  setOperationAction(ISD::SREM            , MVT::i32   , Expand);
188  setOperationAction(ISD::UREM            , MVT::i32   , Expand);
189  setOperationAction(ISD::MUL             , MVT::i64   , Expand);
190  setOperationAction(ISD::MULHS           , MVT::i64   , Expand);
191  setOperationAction(ISD::MULHU           , MVT::i64   , Expand);
192  setOperationAction(ISD::SDIV            , MVT::i64   , Expand);
193  setOperationAction(ISD::UDIV            , MVT::i64   , Expand);
194  setOperationAction(ISD::SREM            , MVT::i64   , Expand);
195  setOperationAction(ISD::UREM            , MVT::i64   , Expand);
196
197  setOperationAction(ISD::BR_JT            , MVT::Other, Expand);
198  setOperationAction(ISD::BRCOND           , MVT::Other, Custom);
199  setOperationAction(ISD::BR_CC            , MVT::Other, Expand);
200  setOperationAction(ISD::SELECT_CC        , MVT::Other, Expand);
201  setOperationAction(ISD::MEMMOVE          , MVT::Other, Expand);
202  if (Subtarget->is64Bit())
203    setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
204  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16  , Legal);
205  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8   , Legal);
206  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1   , Expand);
207  setOperationAction(ISD::FP_ROUND_INREG   , MVT::f32  , Expand);
208  setOperationAction(ISD::FREM             , MVT::f64  , Expand);
209  setOperationAction(ISD::FLT_ROUNDS       , MVT::i32  , Custom);
210
211  setOperationAction(ISD::CTPOP            , MVT::i8   , Expand);
212  setOperationAction(ISD::CTTZ             , MVT::i8   , Custom);
213  setOperationAction(ISD::CTLZ             , MVT::i8   , Custom);
214  setOperationAction(ISD::CTPOP            , MVT::i16  , Expand);
215  setOperationAction(ISD::CTTZ             , MVT::i16  , Custom);
216  setOperationAction(ISD::CTLZ             , MVT::i16  , Custom);
217  setOperationAction(ISD::CTPOP            , MVT::i32  , Expand);
218  setOperationAction(ISD::CTTZ             , MVT::i32  , Custom);
219  setOperationAction(ISD::CTLZ             , MVT::i32  , Custom);
220  if (Subtarget->is64Bit()) {
221    setOperationAction(ISD::CTPOP          , MVT::i64  , Expand);
222    setOperationAction(ISD::CTTZ           , MVT::i64  , Custom);
223    setOperationAction(ISD::CTLZ           , MVT::i64  , Custom);
224  }
225
226  setOperationAction(ISD::READCYCLECOUNTER , MVT::i64  , Custom);
227  setOperationAction(ISD::BSWAP            , MVT::i16  , Expand);
228
229  // These should be promoted to a larger select which is supported.
230  setOperationAction(ISD::SELECT           , MVT::i1   , Promote);
231  setOperationAction(ISD::SELECT           , MVT::i8   , Promote);
232  // X86 wants to expand cmov itself.
233  setOperationAction(ISD::SELECT          , MVT::i16  , Custom);
234  setOperationAction(ISD::SELECT          , MVT::i32  , Custom);
235  setOperationAction(ISD::SELECT          , MVT::f32  , Custom);
236  setOperationAction(ISD::SELECT          , MVT::f64  , Custom);
237  setOperationAction(ISD::SELECT          , MVT::f80  , Custom);
238  setOperationAction(ISD::SETCC           , MVT::i8   , Custom);
239  setOperationAction(ISD::SETCC           , MVT::i16  , Custom);
240  setOperationAction(ISD::SETCC           , MVT::i32  , Custom);
241  setOperationAction(ISD::SETCC           , MVT::f32  , Custom);
242  setOperationAction(ISD::SETCC           , MVT::f64  , Custom);
243  setOperationAction(ISD::SETCC           , MVT::f80  , Custom);
244  if (Subtarget->is64Bit()) {
245    setOperationAction(ISD::SELECT        , MVT::i64  , Custom);
246    setOperationAction(ISD::SETCC         , MVT::i64  , Custom);
247  }
248  // X86 ret instruction may pop stack.
249  setOperationAction(ISD::RET             , MVT::Other, Custom);
250  if (!Subtarget->is64Bit())
251    setOperationAction(ISD::EH_RETURN       , MVT::Other, Custom);
252
253  // Darwin ABI issue.
254  setOperationAction(ISD::ConstantPool    , MVT::i32  , Custom);
255  setOperationAction(ISD::JumpTable       , MVT::i32  , Custom);
256  setOperationAction(ISD::GlobalAddress   , MVT::i32  , Custom);
257  setOperationAction(ISD::GlobalTLSAddress, MVT::i32  , Custom);
258  setOperationAction(ISD::ExternalSymbol  , MVT::i32  , Custom);
259  if (Subtarget->is64Bit()) {
260    setOperationAction(ISD::ConstantPool  , MVT::i64  , Custom);
261    setOperationAction(ISD::JumpTable     , MVT::i64  , Custom);
262    setOperationAction(ISD::GlobalAddress , MVT::i64  , Custom);
263    setOperationAction(ISD::ExternalSymbol, MVT::i64  , Custom);
264  }
265  // 64-bit addm sub, shl, sra, srl (iff 32-bit x86)
266  setOperationAction(ISD::SHL_PARTS       , MVT::i32  , Custom);
267  setOperationAction(ISD::SRA_PARTS       , MVT::i32  , Custom);
268  setOperationAction(ISD::SRL_PARTS       , MVT::i32  , Custom);
269  // X86 wants to expand memset / memcpy itself.
270  setOperationAction(ISD::MEMSET          , MVT::Other, Custom);
271  setOperationAction(ISD::MEMCPY          , MVT::Other, Custom);
272
273  // Use the default ISD::LOCATION expansion.
274  setOperationAction(ISD::LOCATION, MVT::Other, Expand);
275  // FIXME - use subtarget debug flags
276  if (!Subtarget->isTargetDarwin() &&
277      !Subtarget->isTargetELF() &&
278      !Subtarget->isTargetCygMing())
279    setOperationAction(ISD::LABEL, MVT::Other, Expand);
280
281  setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand);
282  setOperationAction(ISD::EHSELECTION,   MVT::i64, Expand);
283  setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand);
284  setOperationAction(ISD::EHSELECTION,   MVT::i32, Expand);
285  if (Subtarget->is64Bit()) {
286    // FIXME: Verify
287    setExceptionPointerRegister(X86::RAX);
288    setExceptionSelectorRegister(X86::RDX);
289  } else {
290    setExceptionPointerRegister(X86::EAX);
291    setExceptionSelectorRegister(X86::EDX);
292  }
293  setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom);
294
295  setOperationAction(ISD::TRAMPOLINE, MVT::Other, Custom);
296
297  // VASTART needs to be custom lowered to use the VarArgsFrameIndex
298  setOperationAction(ISD::VASTART           , MVT::Other, Custom);
299  setOperationAction(ISD::VAARG             , MVT::Other, Expand);
300  setOperationAction(ISD::VAEND             , MVT::Other, Expand);
301  if (Subtarget->is64Bit())
302    setOperationAction(ISD::VACOPY          , MVT::Other, Custom);
303  else
304    setOperationAction(ISD::VACOPY          , MVT::Other, Expand);
305
306  setOperationAction(ISD::STACKSAVE,          MVT::Other, Expand);
307  setOperationAction(ISD::STACKRESTORE,       MVT::Other, Expand);
308  if (Subtarget->is64Bit())
309    setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand);
310  if (Subtarget->isTargetCygMing())
311    setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
312  else
313    setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
314
315  if (X86ScalarSSEf64) {
316    // f32 and f64 use SSE.
317    // Set up the FP register classes.
318    addRegisterClass(MVT::f32, X86::FR32RegisterClass);
319    addRegisterClass(MVT::f64, X86::FR64RegisterClass);
320
321    // Use ANDPD to simulate FABS.
322    setOperationAction(ISD::FABS , MVT::f64, Custom);
323    setOperationAction(ISD::FABS , MVT::f32, Custom);
324
325    // Use XORP to simulate FNEG.
326    setOperationAction(ISD::FNEG , MVT::f64, Custom);
327    setOperationAction(ISD::FNEG , MVT::f32, Custom);
328
329    // Use ANDPD and ORPD to simulate FCOPYSIGN.
330    setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
331    setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
332
333    // We don't support sin/cos/fmod
334    setOperationAction(ISD::FSIN , MVT::f64, Expand);
335    setOperationAction(ISD::FCOS , MVT::f64, Expand);
336    setOperationAction(ISD::FREM , MVT::f64, Expand);
337    setOperationAction(ISD::FSIN , MVT::f32, Expand);
338    setOperationAction(ISD::FCOS , MVT::f32, Expand);
339    setOperationAction(ISD::FREM , MVT::f32, Expand);
340
341    // Expand FP immediates into loads from the stack, except for the special
342    // cases we handle.
343    setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
344    setOperationAction(ISD::ConstantFP, MVT::f32, Expand);
345    addLegalFPImmediate(APFloat(+0.0)); // xorpd
346    addLegalFPImmediate(APFloat(+0.0f)); // xorps
347
348    // Conversions to long double (in X87) go through memory.
349    setConvertAction(MVT::f32, MVT::f80, Expand);
350    setConvertAction(MVT::f64, MVT::f80, Expand);
351
352    // Conversions from long double (in X87) go through memory.
353    setConvertAction(MVT::f80, MVT::f32, Expand);
354    setConvertAction(MVT::f80, MVT::f64, Expand);
355  } else if (X86ScalarSSEf32) {
356    // Use SSE for f32, x87 for f64.
357    // Set up the FP register classes.
358    addRegisterClass(MVT::f32, X86::FR32RegisterClass);
359    addRegisterClass(MVT::f64, X86::RFP64RegisterClass);
360
361    // Use ANDPS to simulate FABS.
362    setOperationAction(ISD::FABS , MVT::f32, Custom);
363
364    // Use XORP to simulate FNEG.
365    setOperationAction(ISD::FNEG , MVT::f32, Custom);
366
367    setOperationAction(ISD::UNDEF,     MVT::f64, Expand);
368
369    // Use ANDPS and ORPS to simulate FCOPYSIGN.
370    setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
371    setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
372
373    // We don't support sin/cos/fmod
374    setOperationAction(ISD::FSIN , MVT::f32, Expand);
375    setOperationAction(ISD::FCOS , MVT::f32, Expand);
376    setOperationAction(ISD::FREM , MVT::f32, Expand);
377
378    // Expand FP immediates into loads from the stack, except for the special
379    // cases we handle.
380    setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
381    setOperationAction(ISD::ConstantFP, MVT::f32, Expand);
382    addLegalFPImmediate(APFloat(+0.0f)); // xorps
383    addLegalFPImmediate(APFloat(+0.0)); // FLD0
384    addLegalFPImmediate(APFloat(+1.0)); // FLD1
385    addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
386    addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
387
388    // SSE->x87 conversions go through memory.
389    setConvertAction(MVT::f32, MVT::f64, Expand);
390    setConvertAction(MVT::f32, MVT::f80, Expand);
391
392    // x87->SSE truncations need to go through memory.
393    setConvertAction(MVT::f80, MVT::f32, Expand);
394    setConvertAction(MVT::f64, MVT::f32, Expand);
395    // And x87->x87 truncations also.
396    setConvertAction(MVT::f80, MVT::f64, Expand);
397
398    if (!UnsafeFPMath) {
399      setOperationAction(ISD::FSIN           , MVT::f64  , Expand);
400      setOperationAction(ISD::FCOS           , MVT::f64  , Expand);
401    }
402  } else {
403    // f32 and f64 in x87.
404    // Set up the FP register classes.
405    addRegisterClass(MVT::f64, X86::RFP64RegisterClass);
406    addRegisterClass(MVT::f32, X86::RFP32RegisterClass);
407
408    setOperationAction(ISD::UNDEF,     MVT::f64, Expand);
409    setOperationAction(ISD::UNDEF,     MVT::f32, Expand);
410    setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
411    setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
412
413    // Floating truncations need to go through memory.
414    setConvertAction(MVT::f80, MVT::f32, Expand);
415    setConvertAction(MVT::f64, MVT::f32, Expand);
416    setConvertAction(MVT::f80, MVT::f64, Expand);
417
418    if (!UnsafeFPMath) {
419      setOperationAction(ISD::FSIN           , MVT::f64  , Expand);
420      setOperationAction(ISD::FCOS           , MVT::f64  , Expand);
421    }
422
423    setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
424    setOperationAction(ISD::ConstantFP, MVT::f32, Expand);
425    addLegalFPImmediate(APFloat(+0.0)); // FLD0
426    addLegalFPImmediate(APFloat(+1.0)); // FLD1
427    addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
428    addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
429    addLegalFPImmediate(APFloat(+0.0f)); // FLD0
430    addLegalFPImmediate(APFloat(+1.0f)); // FLD1
431    addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS
432    addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS
433  }
434
435  // Long double always uses X87.
436  addRegisterClass(MVT::f80, X86::RFP80RegisterClass);
437  setOperationAction(ISD::UNDEF,     MVT::f80, Expand);
438  setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand);
439  setOperationAction(ISD::ConstantFP, MVT::f80, Expand);
440  if (!UnsafeFPMath) {
441    setOperationAction(ISD::FSIN           , MVT::f80  , Expand);
442    setOperationAction(ISD::FCOS           , MVT::f80  , Expand);
443  }
444
445  // Always use a library call for pow.
446  setOperationAction(ISD::FPOW             , MVT::f32  , Expand);
447  setOperationAction(ISD::FPOW             , MVT::f64  , Expand);
448  setOperationAction(ISD::FPOW             , MVT::f80  , Expand);
449
450  // First set operation action for all vector types to expand. Then we
451  // will selectively turn on ones that can be effectively codegen'd.
452  for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
453       VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) {
454    setOperationAction(ISD::ADD , (MVT::ValueType)VT, Expand);
455    setOperationAction(ISD::SUB , (MVT::ValueType)VT, Expand);
456    setOperationAction(ISD::FADD, (MVT::ValueType)VT, Expand);
457    setOperationAction(ISD::FNEG, (MVT::ValueType)VT, Expand);
458    setOperationAction(ISD::FSUB, (MVT::ValueType)VT, Expand);
459    setOperationAction(ISD::MUL , (MVT::ValueType)VT, Expand);
460    setOperationAction(ISD::FMUL, (MVT::ValueType)VT, Expand);
461    setOperationAction(ISD::SDIV, (MVT::ValueType)VT, Expand);
462    setOperationAction(ISD::UDIV, (MVT::ValueType)VT, Expand);
463    setOperationAction(ISD::FDIV, (MVT::ValueType)VT, Expand);
464    setOperationAction(ISD::SREM, (MVT::ValueType)VT, Expand);
465    setOperationAction(ISD::UREM, (MVT::ValueType)VT, Expand);
466    setOperationAction(ISD::LOAD, (MVT::ValueType)VT, Expand);
467    setOperationAction(ISD::VECTOR_SHUFFLE,     (MVT::ValueType)VT, Expand);
468    setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Expand);
469    setOperationAction(ISD::INSERT_VECTOR_ELT,  (MVT::ValueType)VT, Expand);
470    setOperationAction(ISD::FABS, (MVT::ValueType)VT, Expand);
471    setOperationAction(ISD::FSIN, (MVT::ValueType)VT, Expand);
472    setOperationAction(ISD::FCOS, (MVT::ValueType)VT, Expand);
473    setOperationAction(ISD::FREM, (MVT::ValueType)VT, Expand);
474    setOperationAction(ISD::FPOWI, (MVT::ValueType)VT, Expand);
475    setOperationAction(ISD::FSQRT, (MVT::ValueType)VT, Expand);
476    setOperationAction(ISD::FCOPYSIGN, (MVT::ValueType)VT, Expand);
477    setOperationAction(ISD::SMUL_LOHI, (MVT::ValueType)VT, Expand);
478    setOperationAction(ISD::UMUL_LOHI, (MVT::ValueType)VT, Expand);
479    setOperationAction(ISD::SDIVREM, (MVT::ValueType)VT, Expand);
480    setOperationAction(ISD::UDIVREM, (MVT::ValueType)VT, Expand);
481    setOperationAction(ISD::FPOW, (MVT::ValueType)VT, Expand);
482    setOperationAction(ISD::CTPOP, (MVT::ValueType)VT, Expand);
483    setOperationAction(ISD::CTTZ, (MVT::ValueType)VT, Expand);
484    setOperationAction(ISD::CTLZ, (MVT::ValueType)VT, Expand);
485    setOperationAction(ISD::SHL, (MVT::ValueType)VT, Expand);
486    setOperationAction(ISD::SRA, (MVT::ValueType)VT, Expand);
487    setOperationAction(ISD::SRL, (MVT::ValueType)VT, Expand);
488    setOperationAction(ISD::ROTL, (MVT::ValueType)VT, Expand);
489    setOperationAction(ISD::ROTR, (MVT::ValueType)VT, Expand);
490    setOperationAction(ISD::BSWAP, (MVT::ValueType)VT, Expand);
491  }
492
493  if (Subtarget->hasMMX()) {
494    addRegisterClass(MVT::v8i8,  X86::VR64RegisterClass);
495    addRegisterClass(MVT::v4i16, X86::VR64RegisterClass);
496    addRegisterClass(MVT::v2i32, X86::VR64RegisterClass);
497    addRegisterClass(MVT::v1i64, X86::VR64RegisterClass);
498
499    // FIXME: add MMX packed arithmetics
500
501    setOperationAction(ISD::ADD,                MVT::v8i8,  Legal);
502    setOperationAction(ISD::ADD,                MVT::v4i16, Legal);
503    setOperationAction(ISD::ADD,                MVT::v2i32, Legal);
504    setOperationAction(ISD::ADD,                MVT::v1i64, Legal);
505
506    setOperationAction(ISD::SUB,                MVT::v8i8,  Legal);
507    setOperationAction(ISD::SUB,                MVT::v4i16, Legal);
508    setOperationAction(ISD::SUB,                MVT::v2i32, Legal);
509    setOperationAction(ISD::SUB,                MVT::v1i64, Legal);
510
511    setOperationAction(ISD::MULHS,              MVT::v4i16, Legal);
512    setOperationAction(ISD::MUL,                MVT::v4i16, Legal);
513
514    setOperationAction(ISD::AND,                MVT::v8i8,  Promote);
515    AddPromotedToType (ISD::AND,                MVT::v8i8,  MVT::v1i64);
516    setOperationAction(ISD::AND,                MVT::v4i16, Promote);
517    AddPromotedToType (ISD::AND,                MVT::v4i16, MVT::v1i64);
518    setOperationAction(ISD::AND,                MVT::v2i32, Promote);
519    AddPromotedToType (ISD::AND,                MVT::v2i32, MVT::v1i64);
520    setOperationAction(ISD::AND,                MVT::v1i64, Legal);
521
522    setOperationAction(ISD::OR,                 MVT::v8i8,  Promote);
523    AddPromotedToType (ISD::OR,                 MVT::v8i8,  MVT::v1i64);
524    setOperationAction(ISD::OR,                 MVT::v4i16, Promote);
525    AddPromotedToType (ISD::OR,                 MVT::v4i16, MVT::v1i64);
526    setOperationAction(ISD::OR,                 MVT::v2i32, Promote);
527    AddPromotedToType (ISD::OR,                 MVT::v2i32, MVT::v1i64);
528    setOperationAction(ISD::OR,                 MVT::v1i64, Legal);
529
530    setOperationAction(ISD::XOR,                MVT::v8i8,  Promote);
531    AddPromotedToType (ISD::XOR,                MVT::v8i8,  MVT::v1i64);
532    setOperationAction(ISD::XOR,                MVT::v4i16, Promote);
533    AddPromotedToType (ISD::XOR,                MVT::v4i16, MVT::v1i64);
534    setOperationAction(ISD::XOR,                MVT::v2i32, Promote);
535    AddPromotedToType (ISD::XOR,                MVT::v2i32, MVT::v1i64);
536    setOperationAction(ISD::XOR,                MVT::v1i64, Legal);
537
538    setOperationAction(ISD::LOAD,               MVT::v8i8,  Promote);
539    AddPromotedToType (ISD::LOAD,               MVT::v8i8,  MVT::v1i64);
540    setOperationAction(ISD::LOAD,               MVT::v4i16, Promote);
541    AddPromotedToType (ISD::LOAD,               MVT::v4i16, MVT::v1i64);
542    setOperationAction(ISD::LOAD,               MVT::v2i32, Promote);
543    AddPromotedToType (ISD::LOAD,               MVT::v2i32, MVT::v1i64);
544    setOperationAction(ISD::LOAD,               MVT::v1i64, Legal);
545
546    setOperationAction(ISD::BUILD_VECTOR,       MVT::v8i8,  Custom);
547    setOperationAction(ISD::BUILD_VECTOR,       MVT::v4i16, Custom);
548    setOperationAction(ISD::BUILD_VECTOR,       MVT::v2i32, Custom);
549    setOperationAction(ISD::BUILD_VECTOR,       MVT::v1i64, Custom);
550
551    setOperationAction(ISD::VECTOR_SHUFFLE,     MVT::v8i8,  Custom);
552    setOperationAction(ISD::VECTOR_SHUFFLE,     MVT::v4i16, Custom);
553    setOperationAction(ISD::VECTOR_SHUFFLE,     MVT::v2i32, Custom);
554    setOperationAction(ISD::VECTOR_SHUFFLE,     MVT::v1i64, Custom);
555
556    setOperationAction(ISD::SCALAR_TO_VECTOR,   MVT::v8i8,  Custom);
557    setOperationAction(ISD::SCALAR_TO_VECTOR,   MVT::v4i16, Custom);
558    setOperationAction(ISD::SCALAR_TO_VECTOR,   MVT::v2i32, Custom);
559    setOperationAction(ISD::SCALAR_TO_VECTOR,   MVT::v1i64, Custom);
560  }
561
562  if (Subtarget->hasSSE1()) {
563    addRegisterClass(MVT::v4f32, X86::VR128RegisterClass);
564
565    setOperationAction(ISD::FADD,               MVT::v4f32, Legal);
566    setOperationAction(ISD::FSUB,               MVT::v4f32, Legal);
567    setOperationAction(ISD::FMUL,               MVT::v4f32, Legal);
568    setOperationAction(ISD::FDIV,               MVT::v4f32, Legal);
569    setOperationAction(ISD::FSQRT,              MVT::v4f32, Legal);
570    setOperationAction(ISD::FNEG,               MVT::v4f32, Custom);
571    setOperationAction(ISD::LOAD,               MVT::v4f32, Legal);
572    setOperationAction(ISD::BUILD_VECTOR,       MVT::v4f32, Custom);
573    setOperationAction(ISD::VECTOR_SHUFFLE,     MVT::v4f32, Custom);
574    setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
575    setOperationAction(ISD::SELECT,             MVT::v4f32, Custom);
576  }
577
578  if (Subtarget->hasSSE2()) {
579    addRegisterClass(MVT::v2f64, X86::VR128RegisterClass);
580    addRegisterClass(MVT::v16i8, X86::VR128RegisterClass);
581    addRegisterClass(MVT::v8i16, X86::VR128RegisterClass);
582    addRegisterClass(MVT::v4i32, X86::VR128RegisterClass);
583    addRegisterClass(MVT::v2i64, X86::VR128RegisterClass);
584
585    setOperationAction(ISD::ADD,                MVT::v16i8, Legal);
586    setOperationAction(ISD::ADD,                MVT::v8i16, Legal);
587    setOperationAction(ISD::ADD,                MVT::v4i32, Legal);
588    setOperationAction(ISD::ADD,                MVT::v2i64, Legal);
589    setOperationAction(ISD::SUB,                MVT::v16i8, Legal);
590    setOperationAction(ISD::SUB,                MVT::v8i16, Legal);
591    setOperationAction(ISD::SUB,                MVT::v4i32, Legal);
592    setOperationAction(ISD::SUB,                MVT::v2i64, Legal);
593    setOperationAction(ISD::MUL,                MVT::v8i16, Legal);
594    setOperationAction(ISD::FADD,               MVT::v2f64, Legal);
595    setOperationAction(ISD::FSUB,               MVT::v2f64, Legal);
596    setOperationAction(ISD::FMUL,               MVT::v2f64, Legal);
597    setOperationAction(ISD::FDIV,               MVT::v2f64, Legal);
598    setOperationAction(ISD::FSQRT,              MVT::v2f64, Legal);
599    setOperationAction(ISD::FNEG,               MVT::v2f64, Custom);
600
601    setOperationAction(ISD::SCALAR_TO_VECTOR,   MVT::v16i8, Custom);
602    setOperationAction(ISD::SCALAR_TO_VECTOR,   MVT::v8i16, Custom);
603    setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v8i16, Custom);
604    setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v4i32, Custom);
605    // Implement v4f32 insert_vector_elt in terms of SSE2 v8i16 ones.
606    setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v4f32, Custom);
607
608    // Custom lower build_vector, vector_shuffle, and extract_vector_elt.
609    for (unsigned VT = (unsigned)MVT::v16i8; VT != (unsigned)MVT::v2i64; VT++) {
610      // Do not attempt to custom lower non-power-of-2 vectors
611      if (!isPowerOf2_32(MVT::getVectorNumElements(VT)))
612        continue;
613      setOperationAction(ISD::BUILD_VECTOR,        (MVT::ValueType)VT, Custom);
614      setOperationAction(ISD::VECTOR_SHUFFLE,      (MVT::ValueType)VT, Custom);
615      setOperationAction(ISD::EXTRACT_VECTOR_ELT,  (MVT::ValueType)VT, Custom);
616    }
617    setOperationAction(ISD::BUILD_VECTOR,       MVT::v2f64, Custom);
618    setOperationAction(ISD::BUILD_VECTOR,       MVT::v2i64, Custom);
619    setOperationAction(ISD::VECTOR_SHUFFLE,     MVT::v2f64, Custom);
620    setOperationAction(ISD::VECTOR_SHUFFLE,     MVT::v2i64, Custom);
621    setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom);
622    if (Subtarget->is64Bit())
623      setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom);
624
625    // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64.
626    for (unsigned VT = (unsigned)MVT::v16i8; VT != (unsigned)MVT::v2i64; VT++) {
627      setOperationAction(ISD::AND,    (MVT::ValueType)VT, Promote);
628      AddPromotedToType (ISD::AND,    (MVT::ValueType)VT, MVT::v2i64);
629      setOperationAction(ISD::OR,     (MVT::ValueType)VT, Promote);
630      AddPromotedToType (ISD::OR,     (MVT::ValueType)VT, MVT::v2i64);
631      setOperationAction(ISD::XOR,    (MVT::ValueType)VT, Promote);
632      AddPromotedToType (ISD::XOR,    (MVT::ValueType)VT, MVT::v2i64);
633      setOperationAction(ISD::LOAD,   (MVT::ValueType)VT, Promote);
634      AddPromotedToType (ISD::LOAD,   (MVT::ValueType)VT, MVT::v2i64);
635      setOperationAction(ISD::SELECT, (MVT::ValueType)VT, Promote);
636      AddPromotedToType (ISD::SELECT, (MVT::ValueType)VT, MVT::v2i64);
637    }
638
639    // Custom lower v2i64 and v2f64 selects.
640    setOperationAction(ISD::LOAD,               MVT::v2f64, Legal);
641    setOperationAction(ISD::LOAD,               MVT::v2i64, Legal);
642    setOperationAction(ISD::SELECT,             MVT::v2f64, Custom);
643    setOperationAction(ISD::SELECT,             MVT::v2i64, Custom);
644  }
645
646  // We want to custom lower some of our intrinsics.
647  setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
648
649  // We have target-specific dag combine patterns for the following nodes:
650  setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
651  setTargetDAGCombine(ISD::SELECT);
652
653  computeRegisterProperties();
654
655  // FIXME: These should be based on subtarget info. Plus, the values should
656  // be smaller when we are in optimizing for size mode.
657  maxStoresPerMemset = 16; // For %llvm.memset -> sequence of stores
658  maxStoresPerMemcpy = 16; // For %llvm.memcpy -> sequence of stores
659  maxStoresPerMemmove = 16; // For %llvm.memmove -> sequence of stores
660  allowUnalignedMemoryAccesses = true; // x86 supports it!
661}
662
663
664/// getPICJumpTableRelocaBase - Returns relocation base for the given PIC
665/// jumptable.
666SDOperand X86TargetLowering::getPICJumpTableRelocBase(SDOperand Table,
667                                                      SelectionDAG &DAG) const {
668  if (usesGlobalOffsetTable())
669    return DAG.getNode(ISD::GLOBAL_OFFSET_TABLE, getPointerTy());
670  if (!Subtarget->isPICStyleRIPRel())
671    return DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy());
672  return Table;
673}
674
675//===----------------------------------------------------------------------===//
676//               Return Value Calling Convention Implementation
677//===----------------------------------------------------------------------===//
678
679#include "X86GenCallingConv.inc"
680
681/// GetPossiblePreceedingTailCall - Get preceeding X86ISD::TAILCALL node if it
682/// exists skip possible ISD:TokenFactor.
683static SDOperand GetPossiblePreceedingTailCall(SDOperand Chain) {
684  if (Chain.getOpcode()==X86ISD::TAILCALL) {
685    return Chain;
686  } else if (Chain.getOpcode()==ISD::TokenFactor) {
687    if (Chain.getNumOperands() &&
688        Chain.getOperand(0).getOpcode()==X86ISD::TAILCALL)
689      return Chain.getOperand(0);
690  }
691  return Chain;
692}
693
694/// LowerRET - Lower an ISD::RET node.
695SDOperand X86TargetLowering::LowerRET(SDOperand Op, SelectionDAG &DAG) {
696  assert((Op.getNumOperands() & 1) == 1 && "ISD::RET should have odd # args");
697
698  SmallVector<CCValAssign, 16> RVLocs;
699  unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv();
700  bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg();
701  CCState CCInfo(CC, isVarArg, getTargetMachine(), RVLocs);
702  CCInfo.AnalyzeReturn(Op.Val, RetCC_X86);
703
704  // If this is the first return lowered for this function, add the regs to the
705  // liveout set for the function.
706  if (DAG.getMachineFunction().liveout_empty()) {
707    for (unsigned i = 0; i != RVLocs.size(); ++i)
708      if (RVLocs[i].isRegLoc())
709        DAG.getMachineFunction().addLiveOut(RVLocs[i].getLocReg());
710  }
711  SDOperand Chain = Op.getOperand(0);
712
713  // Handle tail call return.
714  Chain = GetPossiblePreceedingTailCall(Chain);
715  if (Chain.getOpcode() == X86ISD::TAILCALL) {
716    SDOperand TailCall = Chain;
717    SDOperand TargetAddress = TailCall.getOperand(1);
718    SDOperand StackAdjustment = TailCall.getOperand(2);
719    assert ( ((TargetAddress.getOpcode() == ISD::Register &&
720               (cast<RegisterSDNode>(TargetAddress)->getReg() == X86::ECX ||
721                cast<RegisterSDNode>(TargetAddress)->getReg() == X86::R9)) ||
722              TargetAddress.getOpcode() == ISD::TargetExternalSymbol ||
723              TargetAddress.getOpcode() == ISD::TargetGlobalAddress) &&
724             "Expecting an global address, external symbol, or register");
725    assert( StackAdjustment.getOpcode() == ISD::Constant &&
726            "Expecting a const value");
727
728    SmallVector<SDOperand,8> Operands;
729    Operands.push_back(Chain.getOperand(0));
730    Operands.push_back(TargetAddress);
731    Operands.push_back(StackAdjustment);
732    // Copy registers used by the call. Last operand is a flag so it is not
733    // copied.
734    for (unsigned i=3; i < TailCall.getNumOperands()-1; i++) {
735      Operands.push_back(Chain.getOperand(i));
736    }
737    return DAG.getNode(X86ISD::TC_RETURN, MVT::Other, &Operands[0],
738                       Operands.size());
739  }
740
741  // Regular return.
742  SDOperand Flag;
743
744  // Copy the result values into the output registers.
745  if (RVLocs.size() != 1 || !RVLocs[0].isRegLoc() ||
746      RVLocs[0].getLocReg() != X86::ST0) {
747    for (unsigned i = 0; i != RVLocs.size(); ++i) {
748      CCValAssign &VA = RVLocs[i];
749      assert(VA.isRegLoc() && "Can only return in registers!");
750      Chain = DAG.getCopyToReg(Chain, VA.getLocReg(), Op.getOperand(i*2+1),
751                               Flag);
752      Flag = Chain.getValue(1);
753    }
754  } else {
755    // We need to handle a destination of ST0 specially, because it isn't really
756    // a register.
757    SDOperand Value = Op.getOperand(1);
758
759    // If this is an FP return with ScalarSSE, we need to move the value from
760    // an XMM register onto the fp-stack.
761    if ((X86ScalarSSEf32 && RVLocs[0].getValVT()==MVT::f32) ||
762        (X86ScalarSSEf64 && RVLocs[0].getValVT()==MVT::f64)) {
763      SDOperand MemLoc;
764
765      // If this is a load into a scalarsse value, don't store the loaded value
766      // back to the stack, only to reload it: just replace the scalar-sse load.
767      if (ISD::isNON_EXTLoad(Value.Val) &&
768          (Chain == Value.getValue(1) || Chain == Value.getOperand(0))) {
769        Chain  = Value.getOperand(0);
770        MemLoc = Value.getOperand(1);
771      } else {
772        // Spill the value to memory and reload it into top of stack.
773        unsigned Size = MVT::getSizeInBits(RVLocs[0].getValVT())/8;
774        MachineFunction &MF = DAG.getMachineFunction();
775        int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size);
776        MemLoc = DAG.getFrameIndex(SSFI, getPointerTy());
777        Chain = DAG.getStore(Op.getOperand(0), Value, MemLoc, NULL, 0);
778      }
779      SDVTList Tys = DAG.getVTList(RVLocs[0].getValVT(), MVT::Other);
780      SDOperand Ops[] = {Chain, MemLoc, DAG.getValueType(RVLocs[0].getValVT())};
781      Value = DAG.getNode(X86ISD::FLD, Tys, Ops, 3);
782      Chain = Value.getValue(1);
783    }
784
785    SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag);
786    SDOperand Ops[] = { Chain, Value };
787    Chain = DAG.getNode(X86ISD::FP_SET_RESULT, Tys, Ops, 2);
788    Flag = Chain.getValue(1);
789  }
790
791  SDOperand BytesToPop = DAG.getConstant(getBytesToPopOnReturn(), MVT::i16);
792  if (Flag.Val)
793    return DAG.getNode(X86ISD::RET_FLAG, MVT::Other, Chain, BytesToPop, Flag);
794  else
795    return DAG.getNode(X86ISD::RET_FLAG, MVT::Other, Chain, BytesToPop);
796}
797
798
799/// LowerCallResult - Lower the result values of an ISD::CALL into the
800/// appropriate copies out of appropriate physical registers.  This assumes that
801/// Chain/InFlag are the input chain/flag to use, and that TheCall is the call
802/// being lowered.  The returns a SDNode with the same number of values as the
803/// ISD::CALL.
804SDNode *X86TargetLowering::
805LowerCallResult(SDOperand Chain, SDOperand InFlag, SDNode *TheCall,
806                unsigned CallingConv, SelectionDAG &DAG) {
807
808  // Assign locations to each value returned by this call.
809  SmallVector<CCValAssign, 16> RVLocs;
810  bool isVarArg = cast<ConstantSDNode>(TheCall->getOperand(2))->getValue() != 0;
811  CCState CCInfo(CallingConv, isVarArg, getTargetMachine(), RVLocs);
812  CCInfo.AnalyzeCallResult(TheCall, RetCC_X86);
813
814  SmallVector<SDOperand, 8> ResultVals;
815
816  // Copy all of the result registers out of their specified physreg.
817  if (RVLocs.size() != 1 || RVLocs[0].getLocReg() != X86::ST0) {
818    for (unsigned i = 0; i != RVLocs.size(); ++i) {
819      Chain = DAG.getCopyFromReg(Chain, RVLocs[i].getLocReg(),
820                                 RVLocs[i].getValVT(), InFlag).getValue(1);
821      InFlag = Chain.getValue(2);
822      ResultVals.push_back(Chain.getValue(0));
823    }
824  } else {
825    // Copies from the FP stack are special, as ST0 isn't a valid register
826    // before the fp stackifier runs.
827
828    // Copy ST0 into an RFP register with FP_GET_RESULT.
829    SDVTList Tys = DAG.getVTList(RVLocs[0].getValVT(), MVT::Other, MVT::Flag);
830    SDOperand GROps[] = { Chain, InFlag };
831    SDOperand RetVal = DAG.getNode(X86ISD::FP_GET_RESULT, Tys, GROps, 2);
832    Chain  = RetVal.getValue(1);
833    InFlag = RetVal.getValue(2);
834
835    // If we are using ScalarSSE, store ST(0) to the stack and reload it into
836    // an XMM register.
837    if ((X86ScalarSSEf32 && RVLocs[0].getValVT() == MVT::f32) ||
838        (X86ScalarSSEf64 && RVLocs[0].getValVT() == MVT::f64)) {
839      SDOperand StoreLoc;
840      const Value *SrcVal = 0;
841      int SrcValOffset = 0;
842
843      // Determine where to store the value.  If the call result is directly
844      // used by a store, see if we can store directly into the location.  In
845      // this case, we'll end up producing a fst + movss[load] + movss[store] to
846      // the same location, and the two movss's will be nuked as dead.  This
847      // optimizes common things like "*D = atof(..)" to not need an
848      // intermediate stack slot.
849      if (SDOperand(TheCall, 0).hasOneUse() &&
850          SDOperand(TheCall, 1).hasOneUse()) {
851        // Ok, we have one use of the value and one use of the chain.  See if
852        // they are the same node: a store.
853        if (StoreSDNode *N = dyn_cast<StoreSDNode>(*TheCall->use_begin())) {
854          if (N->getChain().Val == TheCall && N->getValue().Val == TheCall &&
855              !N->isVolatile() && !N->isTruncatingStore() &&
856              N->getAddressingMode() == ISD::UNINDEXED) {
857            StoreLoc = N->getBasePtr();
858            SrcVal = N->getSrcValue();
859            SrcValOffset = N->getSrcValueOffset();
860          }
861        }
862      }
863
864      // If we weren't able to optimize the result, just create a temporary
865      // stack slot.
866      if (StoreLoc.Val == 0) {
867        MachineFunction &MF = DAG.getMachineFunction();
868        int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8);
869        StoreLoc = DAG.getFrameIndex(SSFI, getPointerTy());
870      }
871
872      // FIXME: Currently the FST is flagged to the FP_GET_RESULT. This
873      // shouldn't be necessary except that RFP cannot be live across
874      // multiple blocks (which could happen if a select gets lowered into
875      // multiple blocks and scheduled in between them). When stackifier is
876      // fixed, they can be uncoupled.
877      SDOperand Ops[] = {
878        Chain, RetVal, StoreLoc, DAG.getValueType(RVLocs[0].getValVT()), InFlag
879      };
880      Chain = DAG.getNode(X86ISD::FST, MVT::Other, Ops, 5);
881      RetVal = DAG.getLoad(RVLocs[0].getValVT(), Chain,
882                           StoreLoc, SrcVal, SrcValOffset);
883      Chain = RetVal.getValue(1);
884    }
885    ResultVals.push_back(RetVal);
886  }
887
888  // Merge everything together with a MERGE_VALUES node.
889  ResultVals.push_back(Chain);
890  return DAG.getNode(ISD::MERGE_VALUES, TheCall->getVTList(),
891                     &ResultVals[0], ResultVals.size()).Val;
892}
893
894
895//===----------------------------------------------------------------------===//
896//                C & StdCall & Fast Calling Convention implementation
897//===----------------------------------------------------------------------===//
898//  StdCall calling convention seems to be standard for many Windows' API
899//  routines and around. It differs from C calling convention just a little:
900//  callee should clean up the stack, not caller. Symbols should be also
901//  decorated in some fancy way :) It doesn't support any vector arguments.
902//  For info on fast calling convention see Fast Calling Convention (tail call)
903//  implementation LowerX86_32FastCCCallTo.
904
905/// AddLiveIn - This helper function adds the specified physical register to the
906/// MachineFunction as a live in value.  It also creates a corresponding virtual
907/// register for it.
908static unsigned AddLiveIn(MachineFunction &MF, unsigned PReg,
909                          const TargetRegisterClass *RC) {
910  assert(RC->contains(PReg) && "Not the correct regclass!");
911  unsigned VReg = MF.getSSARegMap()->createVirtualRegister(RC);
912  MF.addLiveIn(PReg, VReg);
913  return VReg;
914}
915
916// align stack arguments according to platform alignment needed for tail calls
917unsigned GetAlignedArgumentStackSize(unsigned StackSize, SelectionDAG& DAG);
918
919SDOperand X86TargetLowering::LowerMemArgument(SDOperand Op, SelectionDAG &DAG,
920                                              const CCValAssign &VA,
921                                              MachineFrameInfo *MFI,
922                                              SDOperand Root, unsigned i) {
923  // Create the nodes corresponding to a load from this parameter slot.
924  int FI = MFI->CreateFixedObject(MVT::getSizeInBits(VA.getValVT())/8,
925                                  VA.getLocMemOffset());
926  SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy());
927
928  unsigned Flags =  cast<ConstantSDNode>(Op.getOperand(3 + i))->getValue();
929
930  if (Flags & ISD::ParamFlags::ByVal)
931    return FIN;
932  else
933    return DAG.getLoad(VA.getValVT(), Root, FIN, NULL, 0);
934}
935
936SDOperand X86TargetLowering::LowerCCCArguments(SDOperand Op, SelectionDAG &DAG,
937                                               bool isStdCall) {
938  unsigned NumArgs = Op.Val->getNumValues() - 1;
939  MachineFunction &MF = DAG.getMachineFunction();
940  MachineFrameInfo *MFI = MF.getFrameInfo();
941  SDOperand Root = Op.getOperand(0);
942  bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
943  unsigned CC = MF.getFunction()->getCallingConv();
944  // Assign locations to all of the incoming arguments.
945  SmallVector<CCValAssign, 16> ArgLocs;
946  CCState CCInfo(CC, isVarArg,
947                 getTargetMachine(), ArgLocs);
948  // Check for possible tail call calling convention.
949  if (CC == CallingConv::Fast && PerformTailCallOpt)
950    CCInfo.AnalyzeFormalArguments(Op.Val, CC_X86_32_TailCall);
951  else
952    CCInfo.AnalyzeFormalArguments(Op.Val, CC_X86_32_C);
953
954  SmallVector<SDOperand, 8> ArgValues;
955  unsigned LastVal = ~0U;
956  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
957    CCValAssign &VA = ArgLocs[i];
958    // TODO: If an arg is passed in two places (e.g. reg and stack), skip later
959    // places.
960    assert(VA.getValNo() != LastVal &&
961           "Don't support value assigned to multiple locs yet");
962    LastVal = VA.getValNo();
963
964    if (VA.isRegLoc()) {
965      MVT::ValueType RegVT = VA.getLocVT();
966      TargetRegisterClass *RC;
967      if (RegVT == MVT::i32)
968        RC = X86::GR32RegisterClass;
969      else {
970        assert(MVT::isVector(RegVT));
971        RC = X86::VR128RegisterClass;
972      }
973
974      unsigned Reg = AddLiveIn(DAG.getMachineFunction(), VA.getLocReg(), RC);
975      SDOperand ArgValue = DAG.getCopyFromReg(Root, Reg, RegVT);
976
977      // If this is an 8 or 16-bit value, it is really passed promoted to 32
978      // bits.  Insert an assert[sz]ext to capture this, then truncate to the
979      // right size.
980      if (VA.getLocInfo() == CCValAssign::SExt)
981        ArgValue = DAG.getNode(ISD::AssertSext, RegVT, ArgValue,
982                               DAG.getValueType(VA.getValVT()));
983      else if (VA.getLocInfo() == CCValAssign::ZExt)
984        ArgValue = DAG.getNode(ISD::AssertZext, RegVT, ArgValue,
985                               DAG.getValueType(VA.getValVT()));
986
987      if (VA.getLocInfo() != CCValAssign::Full)
988        ArgValue = DAG.getNode(ISD::TRUNCATE, VA.getValVT(), ArgValue);
989
990      ArgValues.push_back(ArgValue);
991    } else {
992      assert(VA.isMemLoc());
993      ArgValues.push_back(LowerMemArgument(Op, DAG, VA, MFI, Root, i));
994    }
995  }
996
997  unsigned StackSize = CCInfo.getNextStackOffset();
998  // align stack specially for tail calls
999  if (CC==CallingConv::Fast)
1000    StackSize = GetAlignedArgumentStackSize(StackSize,DAG);
1001
1002  ArgValues.push_back(Root);
1003
1004  // If the function takes variable number of arguments, make a frame index for
1005  // the start of the first vararg value... for expansion of llvm.va_start.
1006  if (isVarArg)
1007    VarArgsFrameIndex = MFI->CreateFixedObject(1, StackSize);
1008
1009  // Tail call calling convention (CallingConv::Fast) does not support varargs.
1010  assert( !(isVarArg && CC == CallingConv::Fast) &&
1011         "CallingConv::Fast does not support varargs.");
1012
1013  if (isStdCall && !isVarArg &&
1014      (CC==CallingConv::Fast && PerformTailCallOpt || CC!=CallingConv::Fast)) {
1015    BytesToPopOnReturn  = StackSize;    // Callee pops everything..
1016    BytesCallerReserves = 0;
1017  } else {
1018    BytesToPopOnReturn  = 0; // Callee pops nothing.
1019
1020    // If this is an sret function, the return should pop the hidden pointer.
1021    if (NumArgs &&
1022        (cast<ConstantSDNode>(Op.getOperand(3))->getValue() &
1023         ISD::ParamFlags::StructReturn))
1024      BytesToPopOnReturn = 4;
1025
1026    BytesCallerReserves = StackSize;
1027  }
1028
1029  RegSaveFrameIndex = 0xAAAAAAA;  // X86-64 only.
1030
1031  X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
1032  FuncInfo->setBytesToPopOnReturn(BytesToPopOnReturn);
1033
1034  // Return the new list of results.
1035  return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(),
1036                     &ArgValues[0], ArgValues.size()).getValue(Op.ResNo);
1037}
1038
1039SDOperand X86TargetLowering::LowerCCCCallTo(SDOperand Op, SelectionDAG &DAG,
1040                                            unsigned CC) {
1041  SDOperand Chain     = Op.getOperand(0);
1042  bool isVarArg       = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
1043  SDOperand Callee    = Op.getOperand(4);
1044  unsigned NumOps     = (Op.getNumOperands() - 5) / 2;
1045
1046  // Analyze operands of the call, assigning locations to each operand.
1047  SmallVector<CCValAssign, 16> ArgLocs;
1048  CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs);
1049  if(CC==CallingConv::Fast && PerformTailCallOpt)
1050    CCInfo.AnalyzeCallOperands(Op.Val, CC_X86_32_TailCall);
1051  else
1052    CCInfo.AnalyzeCallOperands(Op.Val, CC_X86_32_C);
1053
1054  // Get a count of how many bytes are to be pushed on the stack.
1055  unsigned NumBytes = CCInfo.getNextStackOffset();
1056  if (CC==CallingConv::Fast)
1057    NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
1058
1059  Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, getPointerTy()));
1060
1061  SmallVector<std::pair<unsigned, SDOperand>, 8> RegsToPass;
1062  SmallVector<SDOperand, 8> MemOpChains;
1063
1064  SDOperand StackPtr;
1065
1066  // Walk the register/memloc assignments, inserting copies/loads.
1067  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1068    CCValAssign &VA = ArgLocs[i];
1069    SDOperand Arg = Op.getOperand(5+2*VA.getValNo());
1070
1071    // Promote the value if needed.
1072    switch (VA.getLocInfo()) {
1073    default: assert(0 && "Unknown loc info!");
1074    case CCValAssign::Full: break;
1075    case CCValAssign::SExt:
1076      Arg = DAG.getNode(ISD::SIGN_EXTEND, VA.getLocVT(), Arg);
1077      break;
1078    case CCValAssign::ZExt:
1079      Arg = DAG.getNode(ISD::ZERO_EXTEND, VA.getLocVT(), Arg);
1080      break;
1081    case CCValAssign::AExt:
1082      Arg = DAG.getNode(ISD::ANY_EXTEND, VA.getLocVT(), Arg);
1083      break;
1084    }
1085
1086    if (VA.isRegLoc()) {
1087      RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1088    } else {
1089      assert(VA.isMemLoc());
1090      if (StackPtr.Val == 0)
1091        StackPtr = DAG.getRegister(getStackPtrReg(), getPointerTy());
1092
1093      MemOpChains.push_back(LowerMemOpCallTo(Op, DAG, StackPtr, VA, Chain,
1094                                             Arg));
1095    }
1096  }
1097
1098  // If the first argument is an sret pointer, remember it.
1099  bool isSRet = NumOps &&
1100    (cast<ConstantSDNode>(Op.getOperand(6))->getValue() &
1101     ISD::ParamFlags::StructReturn);
1102
1103  if (!MemOpChains.empty())
1104    Chain = DAG.getNode(ISD::TokenFactor, MVT::Other,
1105                        &MemOpChains[0], MemOpChains.size());
1106
1107  // Build a sequence of copy-to-reg nodes chained together with token chain
1108  // and flag operands which copy the outgoing args into registers.
1109  SDOperand InFlag;
1110  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1111    Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second,
1112                             InFlag);
1113    InFlag = Chain.getValue(1);
1114  }
1115
1116  // ELF / PIC requires GOT in the EBX register before function calls via PLT
1117  // GOT pointer.
1118  if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
1119      Subtarget->isPICStyleGOT()) {
1120    Chain = DAG.getCopyToReg(Chain, X86::EBX,
1121                             DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()),
1122                             InFlag);
1123    InFlag = Chain.getValue(1);
1124  }
1125
1126  // If the callee is a GlobalAddress node (quite common, every direct call is)
1127  // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1128  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1129    // We should use extra load for direct calls to dllimported functions in
1130    // non-JIT mode.
1131    if (!Subtarget->GVRequiresExtraLoad(G->getGlobal(),
1132                                        getTargetMachine(), true))
1133      Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy());
1134  } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee))
1135    Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy());
1136
1137  // Returns a chain & a flag for retval copy to use.
1138  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
1139  SmallVector<SDOperand, 8> Ops;
1140  Ops.push_back(Chain);
1141  Ops.push_back(Callee);
1142
1143  // Add argument registers to the end of the list so that they are known live
1144  // into the call.
1145  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1146    Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1147                                  RegsToPass[i].second.getValueType()));
1148
1149  // Add an implicit use GOT pointer in EBX.
1150  if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
1151      Subtarget->isPICStyleGOT())
1152    Ops.push_back(DAG.getRegister(X86::EBX, getPointerTy()));
1153
1154  if (InFlag.Val)
1155    Ops.push_back(InFlag);
1156
1157  Chain = DAG.getNode(X86ISD::CALL, NodeTys, &Ops[0], Ops.size());
1158  InFlag = Chain.getValue(1);
1159
1160  // Create the CALLSEQ_END node.
1161  unsigned NumBytesForCalleeToPush = 0;
1162
1163  if (CC == CallingConv::X86_StdCall ||
1164      (CC == CallingConv::Fast && PerformTailCallOpt)) {
1165    if (isVarArg)
1166      NumBytesForCalleeToPush = isSRet ? 4 : 0;
1167    else
1168      NumBytesForCalleeToPush = NumBytes;
1169    assert(!(isVarArg && CC==CallingConv::Fast) &&
1170            "CallingConv::Fast does not support varargs.");
1171  } else {
1172    // If this is is a call to a struct-return function, the callee
1173    // pops the hidden struct pointer, so we have to push it back.
1174    // This is common for Darwin/X86, Linux & Mingw32 targets.
1175    NumBytesForCalleeToPush = isSRet ? 4 : 0;
1176  }
1177
1178  Chain = DAG.getCALLSEQ_END(Chain,
1179                             DAG.getConstant(NumBytes, getPointerTy()),
1180                             DAG.getConstant(NumBytesForCalleeToPush,
1181                                             getPointerTy()),
1182                             InFlag);
1183  InFlag = Chain.getValue(1);
1184
1185  // Handle result values, copying them out of physregs into vregs that we
1186  // return.
1187  return SDOperand(LowerCallResult(Chain, InFlag, Op.Val, CC, DAG), Op.ResNo);
1188}
1189
1190
1191//===----------------------------------------------------------------------===//
1192//                   FastCall Calling Convention implementation
1193//===----------------------------------------------------------------------===//
1194//
1195// The X86 'fastcall' calling convention passes up to two integer arguments in
1196// registers (an appropriate portion of ECX/EDX), passes arguments in C order,
1197// and requires that the callee pop its arguments off the stack (allowing proper
1198// tail calls), and has the same return value conventions as C calling convs.
1199//
1200// This calling convention always arranges for the callee pop value to be 8n+4
1201// bytes, which is needed for tail recursion elimination and stack alignment
1202// reasons.
1203SDOperand
1204X86TargetLowering::LowerFastCCArguments(SDOperand Op, SelectionDAG &DAG) {
1205  MachineFunction &MF = DAG.getMachineFunction();
1206  MachineFrameInfo *MFI = MF.getFrameInfo();
1207  SDOperand Root = Op.getOperand(0);
1208  bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
1209
1210  // Assign locations to all of the incoming arguments.
1211  SmallVector<CCValAssign, 16> ArgLocs;
1212  CCState CCInfo(MF.getFunction()->getCallingConv(), isVarArg,
1213                 getTargetMachine(), ArgLocs);
1214  CCInfo.AnalyzeFormalArguments(Op.Val, CC_X86_32_FastCall);
1215
1216  SmallVector<SDOperand, 8> ArgValues;
1217  unsigned LastVal = ~0U;
1218  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1219    CCValAssign &VA = ArgLocs[i];
1220    // TODO: If an arg is passed in two places (e.g. reg and stack), skip later
1221    // places.
1222    assert(VA.getValNo() != LastVal &&
1223           "Don't support value assigned to multiple locs yet");
1224    LastVal = VA.getValNo();
1225
1226    if (VA.isRegLoc()) {
1227      MVT::ValueType RegVT = VA.getLocVT();
1228      TargetRegisterClass *RC;
1229      if (RegVT == MVT::i32)
1230        RC = X86::GR32RegisterClass;
1231      else {
1232        assert(MVT::isVector(RegVT));
1233        RC = X86::VR128RegisterClass;
1234      }
1235
1236      unsigned Reg = AddLiveIn(DAG.getMachineFunction(), VA.getLocReg(), RC);
1237      SDOperand ArgValue = DAG.getCopyFromReg(Root, Reg, RegVT);
1238
1239      // If this is an 8 or 16-bit value, it is really passed promoted to 32
1240      // bits.  Insert an assert[sz]ext to capture this, then truncate to the
1241      // right size.
1242      if (VA.getLocInfo() == CCValAssign::SExt)
1243        ArgValue = DAG.getNode(ISD::AssertSext, RegVT, ArgValue,
1244                               DAG.getValueType(VA.getValVT()));
1245      else if (VA.getLocInfo() == CCValAssign::ZExt)
1246        ArgValue = DAG.getNode(ISD::AssertZext, RegVT, ArgValue,
1247                               DAG.getValueType(VA.getValVT()));
1248
1249      if (VA.getLocInfo() != CCValAssign::Full)
1250        ArgValue = DAG.getNode(ISD::TRUNCATE, VA.getValVT(), ArgValue);
1251
1252      ArgValues.push_back(ArgValue);
1253    } else {
1254      assert(VA.isMemLoc());
1255      ArgValues.push_back(LowerMemArgument(Op, DAG, VA, MFI, Root, i));
1256    }
1257  }
1258
1259  ArgValues.push_back(Root);
1260
1261  unsigned StackSize = CCInfo.getNextStackOffset();
1262
1263  if (!Subtarget->isTargetCygMing() && !Subtarget->isTargetWindows()) {
1264    // Make sure the instruction takes 8n+4 bytes to make sure the start of the
1265    // arguments and the arguments after the retaddr has been pushed are
1266    // aligned.
1267    if ((StackSize & 7) == 0)
1268      StackSize += 4;
1269  }
1270
1271  VarArgsFrameIndex = 0xAAAAAAA;   // fastcc functions can't have varargs.
1272  RegSaveFrameIndex = 0xAAAAAAA;   // X86-64 only.
1273  BytesToPopOnReturn = StackSize;  // Callee pops all stack arguments.
1274  BytesCallerReserves = 0;
1275
1276  X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
1277  FuncInfo->setBytesToPopOnReturn(BytesToPopOnReturn);
1278
1279  // Return the new list of results.
1280  return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(),
1281                     &ArgValues[0], ArgValues.size()).getValue(Op.ResNo);
1282}
1283
1284SDOperand
1285X86TargetLowering::LowerMemOpCallTo(SDOperand Op, SelectionDAG &DAG,
1286                                    const SDOperand &StackPtr,
1287                                    const CCValAssign &VA,
1288                                    SDOperand Chain,
1289                                    SDOperand Arg) {
1290  SDOperand PtrOff = DAG.getConstant(VA.getLocMemOffset(), getPointerTy());
1291  PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff);
1292  SDOperand FlagsOp = Op.getOperand(6+2*VA.getValNo());
1293  unsigned Flags    = cast<ConstantSDNode>(FlagsOp)->getValue();
1294  if (Flags & ISD::ParamFlags::ByVal) {
1295    unsigned Align = 1 << ((Flags & ISD::ParamFlags::ByValAlign) >>
1296                           ISD::ParamFlags::ByValAlignOffs);
1297
1298    unsigned  Size = (Flags & ISD::ParamFlags::ByValSize) >>
1299        ISD::ParamFlags::ByValSizeOffs;
1300
1301    SDOperand AlignNode = DAG.getConstant(Align, MVT::i32);
1302    SDOperand  SizeNode = DAG.getConstant(Size, MVT::i32);
1303    SDOperand AlwaysInline = DAG.getConstant(1, MVT::i32);
1304
1305    return DAG.getMemcpy(Chain, PtrOff, Arg, SizeNode, AlignNode,
1306                         AlwaysInline);
1307  } else {
1308    return DAG.getStore(Chain, Arg, PtrOff, NULL, 0);
1309  }
1310}
1311
1312SDOperand X86TargetLowering::LowerFastCCCallTo(SDOperand Op, SelectionDAG &DAG,
1313                                               unsigned CC) {
1314  SDOperand Chain     = Op.getOperand(0);
1315  bool isTailCall     = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0;
1316  bool isVarArg       = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
1317  SDOperand Callee    = Op.getOperand(4);
1318
1319  // Analyze operands of the call, assigning locations to each operand.
1320  SmallVector<CCValAssign, 16> ArgLocs;
1321  CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs);
1322  CCInfo.AnalyzeCallOperands(Op.Val, CC_X86_32_FastCall);
1323
1324  // Get a count of how many bytes are to be pushed on the stack.
1325  unsigned NumBytes = CCInfo.getNextStackOffset();
1326
1327  if (!Subtarget->isTargetCygMing() && !Subtarget->isTargetWindows()) {
1328    // Make sure the instruction takes 8n+4 bytes to make sure the start of the
1329    // arguments and the arguments after the retaddr has been pushed are
1330    // aligned.
1331    if ((NumBytes & 7) == 0)
1332      NumBytes += 4;
1333  }
1334
1335  Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, getPointerTy()));
1336
1337  SmallVector<std::pair<unsigned, SDOperand>, 8> RegsToPass;
1338  SmallVector<SDOperand, 8> MemOpChains;
1339
1340  SDOperand StackPtr;
1341
1342  // Walk the register/memloc assignments, inserting copies/loads.
1343  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1344    CCValAssign &VA = ArgLocs[i];
1345    SDOperand Arg = Op.getOperand(5+2*VA.getValNo());
1346
1347    // Promote the value if needed.
1348    switch (VA.getLocInfo()) {
1349      default: assert(0 && "Unknown loc info!");
1350      case CCValAssign::Full: break;
1351      case CCValAssign::SExt:
1352        Arg = DAG.getNode(ISD::SIGN_EXTEND, VA.getLocVT(), Arg);
1353        break;
1354      case CCValAssign::ZExt:
1355        Arg = DAG.getNode(ISD::ZERO_EXTEND, VA.getLocVT(), Arg);
1356        break;
1357      case CCValAssign::AExt:
1358        Arg = DAG.getNode(ISD::ANY_EXTEND, VA.getLocVT(), Arg);
1359        break;
1360    }
1361
1362    if (VA.isRegLoc()) {
1363      RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1364    } else {
1365      assert(VA.isMemLoc());
1366      if (StackPtr.Val == 0)
1367        StackPtr = DAG.getRegister(getStackPtrReg(), getPointerTy());
1368
1369      MemOpChains.push_back(LowerMemOpCallTo(Op, DAG, StackPtr, VA, Chain,
1370                                             Arg));
1371    }
1372  }
1373
1374  if (!MemOpChains.empty())
1375    Chain = DAG.getNode(ISD::TokenFactor, MVT::Other,
1376                        &MemOpChains[0], MemOpChains.size());
1377
1378  // Build a sequence of copy-to-reg nodes chained together with token chain
1379  // and flag operands which copy the outgoing args into registers.
1380  SDOperand InFlag;
1381  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1382    Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second,
1383                             InFlag);
1384    InFlag = Chain.getValue(1);
1385  }
1386
1387  // If the callee is a GlobalAddress node (quite common, every direct call is)
1388  // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1389  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1390    // We should use extra load for direct calls to dllimported functions in
1391    // non-JIT mode.
1392    if (!Subtarget->GVRequiresExtraLoad(G->getGlobal(),
1393                                        getTargetMachine(), true))
1394      Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy());
1395  } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee))
1396    Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy());
1397
1398  // ELF / PIC requires GOT in the EBX register before function calls via PLT
1399  // GOT pointer.
1400  if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
1401      Subtarget->isPICStyleGOT()) {
1402    Chain = DAG.getCopyToReg(Chain, X86::EBX,
1403                             DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()),
1404                             InFlag);
1405    InFlag = Chain.getValue(1);
1406  }
1407
1408  // Returns a chain & a flag for retval copy to use.
1409  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
1410  SmallVector<SDOperand, 8> Ops;
1411  Ops.push_back(Chain);
1412  Ops.push_back(Callee);
1413
1414  // Add argument registers to the end of the list so that they are known live
1415  // into the call.
1416  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1417    Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1418                                  RegsToPass[i].second.getValueType()));
1419
1420  // Add an implicit use GOT pointer in EBX.
1421  if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
1422      Subtarget->isPICStyleGOT())
1423    Ops.push_back(DAG.getRegister(X86::EBX, getPointerTy()));
1424
1425  if (InFlag.Val)
1426    Ops.push_back(InFlag);
1427
1428  assert(isTailCall==false && "no tail call here");
1429  Chain = DAG.getNode(X86ISD::CALL,
1430                      NodeTys, &Ops[0], Ops.size());
1431  InFlag = Chain.getValue(1);
1432
1433  // Returns a flag for retval copy to use.
1434  NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
1435  Ops.clear();
1436  Ops.push_back(Chain);
1437  Ops.push_back(DAG.getConstant(NumBytes, getPointerTy()));
1438  Ops.push_back(DAG.getConstant(NumBytes, getPointerTy()));
1439  Ops.push_back(InFlag);
1440  Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size());
1441  InFlag = Chain.getValue(1);
1442
1443  // Handle result values, copying them out of physregs into vregs that we
1444  // return.
1445  return SDOperand(LowerCallResult(Chain, InFlag, Op.Val, CC, DAG), Op.ResNo);
1446}
1447
1448//===----------------------------------------------------------------------===//
1449//                Fast Calling Convention (tail call) implementation
1450//===----------------------------------------------------------------------===//
1451
1452//  Like std call, callee cleans arguments, convention except that ECX is
1453//  reserved for storing the tail called function address. Only 2 registers are
1454//  free for argument passing (inreg). Tail call optimization is performed
1455//  provided:
1456//                * tailcallopt is enabled
1457//                * caller/callee are fastcc
1458//                * elf/pic is disabled OR
1459//                * elf/pic enabled + callee is in module + callee has
1460//                  visibility protected or hidden
1461//  To keep the stack aligned according to platform abi the function
1462//  GetAlignedArgumentStackSize ensures that argument delta is always multiples
1463//  of stack alignment. (Dynamic linkers need this - darwin's dyld for example)
1464//  If a tail called function callee has more arguments than the caller the
1465//  caller needs to make sure that there is room to move the RETADDR to. This is
1466//  achieved by reserving an area the size of the argument delta right after the
1467//  original REtADDR, but before the saved framepointer or the spilled registers
1468//  e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4)
1469//  stack layout:
1470//    arg1
1471//    arg2
1472//    RETADDR
1473//    [ new RETADDR
1474//      move area ]
1475//    (possible EBP)
1476//    ESI
1477//    EDI
1478//    local1 ..
1479
1480/// GetAlignedArgumentStackSize - Make the stack size align e.g 16n + 12 aligned
1481/// for a 16 byte align requirement.
1482unsigned X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
1483                                                        SelectionDAG& DAG) {
1484  if (PerformTailCallOpt) {
1485    MachineFunction &MF = DAG.getMachineFunction();
1486    const TargetMachine &TM = MF.getTarget();
1487    const TargetFrameInfo &TFI = *TM.getFrameInfo();
1488    unsigned StackAlignment = TFI.getStackAlignment();
1489    uint64_t AlignMask = StackAlignment - 1;
1490    int64_t Offset = StackSize;
1491    unsigned SlotSize = Subtarget->is64Bit() ? 8 : 4;
1492    if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) {
1493      // Number smaller than 12 so just add the difference.
1494      Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask));
1495    } else {
1496      // Mask out lower bits, add stackalignment once plus the 12 bytes.
1497      Offset = ((~AlignMask) & Offset) + StackAlignment +
1498        (StackAlignment-SlotSize);
1499    }
1500    StackSize = Offset;
1501  }
1502  return StackSize;
1503}
1504
1505/// IsEligibleForTailCallElimination - Check to see whether the next instruction
1506/// following the call is a return. A function is eligible if caller/callee
1507/// calling conventions match, currently only fastcc supports tail calls, and
1508/// the function CALL is immediatly followed by a RET.
1509bool X86TargetLowering::IsEligibleForTailCallOptimization(SDOperand Call,
1510                                                      SDOperand Ret,
1511                                                      SelectionDAG& DAG) const {
1512  if (!PerformTailCallOpt)
1513    return false;
1514
1515  // Check whether CALL node immediatly preceeds the RET node and whether the
1516  // return uses the result of the node or is a void return.
1517  unsigned NumOps = Ret.getNumOperands();
1518  if ((NumOps == 1 &&
1519       (Ret.getOperand(0) == SDOperand(Call.Val,1) ||
1520        Ret.getOperand(0) == SDOperand(Call.Val,0))) ||
1521      (NumOps > 1 &&
1522       Ret.getOperand(0) == SDOperand(Call.Val,Call.Val->getNumValues()-1) &&
1523       Ret.getOperand(1) == SDOperand(Call.Val,0))) {
1524    MachineFunction &MF = DAG.getMachineFunction();
1525    unsigned CallerCC = MF.getFunction()->getCallingConv();
1526    unsigned CalleeCC = cast<ConstantSDNode>(Call.getOperand(1))->getValue();
1527    if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) {
1528      SDOperand Callee = Call.getOperand(4);
1529      // On elf/pic %ebx needs to be livein.
1530      if (getTargetMachine().getRelocationModel() != Reloc::PIC_ ||
1531          !Subtarget->isPICStyleGOT())
1532        return true;
1533
1534      // Can only do local tail calls with PIC.
1535      GlobalValue * GV = 0;
1536      GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
1537      if(G != 0 &&
1538         (GV = G->getGlobal()) &&
1539         (GV->hasHiddenVisibility() || GV->hasProtectedVisibility()))
1540        return true;
1541    }
1542  }
1543
1544  return false;
1545}
1546
1547SDOperand X86TargetLowering::LowerX86_TailCallTo(SDOperand Op,
1548                                                     SelectionDAG &DAG,
1549                                                     unsigned CC) {
1550  SDOperand Chain     = Op.getOperand(0);
1551  bool isVarArg       = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
1552  bool isTailCall     = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0;
1553  SDOperand Callee    = Op.getOperand(4);
1554  bool is64Bit        = Subtarget->is64Bit();
1555
1556  assert(isTailCall && PerformTailCallOpt && "Should only emit tail calls.");
1557
1558  // Analyze operands of the call, assigning locations to each operand.
1559  SmallVector<CCValAssign, 16> ArgLocs;
1560  CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs);
1561  if (is64Bit)
1562    CCInfo.AnalyzeCallOperands(Op.Val, CC_X86_64_TailCall);
1563  else
1564    CCInfo.AnalyzeCallOperands(Op.Val, CC_X86_32_TailCall);
1565
1566
1567  // Lower arguments at fp - stackoffset + fpdiff.
1568  MachineFunction &MF = DAG.getMachineFunction();
1569
1570  unsigned NumBytesToBePushed =
1571    GetAlignedArgumentStackSize(CCInfo.getNextStackOffset(), DAG);
1572
1573  unsigned NumBytesCallerPushed =
1574    MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn();
1575  int FPDiff = NumBytesCallerPushed - NumBytesToBePushed;
1576
1577  // Set the delta of movement of the returnaddr stackslot.
1578  // But only set if delta is greater than previous delta.
1579  if (FPDiff < (MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta()))
1580    MF.getInfo<X86MachineFunctionInfo>()->setTCReturnAddrDelta(FPDiff);
1581
1582  Chain = DAG.
1583   getCALLSEQ_START(Chain, DAG.getConstant(NumBytesToBePushed, getPointerTy()));
1584
1585  // Adjust the Return address stack slot.
1586  SDOperand RetAddrFrIdx, NewRetAddrFrIdx;
1587  if (FPDiff) {
1588    MVT::ValueType VT = is64Bit ? MVT::i64 : MVT::i32;
1589    RetAddrFrIdx = getReturnAddressFrameIndex(DAG);
1590    // Load the "old" Return address.
1591    RetAddrFrIdx =
1592      DAG.getLoad(VT, Chain,RetAddrFrIdx, NULL, 0);
1593    // Calculate the new stack slot for the return address.
1594    int SlotSize = is64Bit ? 8 : 4;
1595    int NewReturnAddrFI =
1596      MF.getFrameInfo()->CreateFixedObject(SlotSize, FPDiff-SlotSize);
1597    NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, VT);
1598    Chain = SDOperand(RetAddrFrIdx.Val, 1);
1599  }
1600
1601  SmallVector<std::pair<unsigned, SDOperand>, 8> RegsToPass;
1602  SmallVector<SDOperand, 8> MemOpChains;
1603  SmallVector<SDOperand, 8> MemOpChains2;
1604  SDOperand FramePtr, StackPtr;
1605  SDOperand PtrOff;
1606  SDOperand FIN;
1607  int FI = 0;
1608
1609  // Walk the register/memloc assignments, inserting copies/loads.  Lower
1610  // arguments first to the stack slot where they would normally - in case of a
1611  // normal function call - be.
1612  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1613    CCValAssign &VA = ArgLocs[i];
1614    SDOperand Arg = Op.getOperand(5+2*VA.getValNo());
1615
1616    // Promote the value if needed.
1617    switch (VA.getLocInfo()) {
1618    default: assert(0 && "Unknown loc info!");
1619    case CCValAssign::Full: break;
1620    case CCValAssign::SExt:
1621      Arg = DAG.getNode(ISD::SIGN_EXTEND, VA.getLocVT(), Arg);
1622      break;
1623    case CCValAssign::ZExt:
1624      Arg = DAG.getNode(ISD::ZERO_EXTEND, VA.getLocVT(), Arg);
1625      break;
1626    case CCValAssign::AExt:
1627      Arg = DAG.getNode(ISD::ANY_EXTEND, VA.getLocVT(), Arg);
1628      break;
1629    }
1630
1631    if (VA.isRegLoc()) {
1632      RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1633    } else {
1634      assert(VA.isMemLoc());
1635      if (StackPtr.Val == 0)
1636        StackPtr = DAG.getRegister(getStackPtrReg(), getPointerTy());
1637
1638      MemOpChains.push_back(LowerMemOpCallTo(Op, DAG, StackPtr, VA, Chain,
1639                                             Arg));
1640    }
1641  }
1642
1643  if (!MemOpChains.empty())
1644    Chain = DAG.getNode(ISD::TokenFactor, MVT::Other,
1645                        &MemOpChains[0], MemOpChains.size());
1646
1647  // Build a sequence of copy-to-reg nodes chained together with token chain
1648  // and flag operands which copy the outgoing args into registers.
1649  SDOperand InFlag;
1650  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1651    Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second,
1652                             InFlag);
1653    InFlag = Chain.getValue(1);
1654  }
1655  InFlag = SDOperand();
1656
1657  // Copy from stack slots to stack slot of a tail called function. This needs
1658  // to be done because if we would lower the arguments directly to their real
1659  // stack slot we might end up overwriting each other.
1660  // TODO: To make this more efficient (sometimes saving a store/load) we could
1661  // analyse the arguments and emit this store/load/store sequence only for
1662  // arguments which would be overwritten otherwise.
1663  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1664    CCValAssign &VA = ArgLocs[i];
1665    if (!VA.isRegLoc()) {
1666      SDOperand FlagsOp = Op.getOperand(6+2*VA.getValNo());
1667      unsigned Flags    = cast<ConstantSDNode>(FlagsOp)->getValue();
1668
1669      // Get source stack slot.
1670      SDOperand PtrOff = DAG.getConstant(VA.getLocMemOffset(), getPointerTy());
1671      PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff);
1672      // Create frame index.
1673      int32_t Offset = VA.getLocMemOffset()+FPDiff;
1674      uint32_t OpSize = (MVT::getSizeInBits(VA.getLocVT())+7)/8;
1675      FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset);
1676      FIN = DAG.getFrameIndex(FI, MVT::i32);
1677      if (Flags & ISD::ParamFlags::ByVal) {
1678        // Copy relative to framepointer.
1679        unsigned Align = 1 << ((Flags & ISD::ParamFlags::ByValAlign) >>
1680                               ISD::ParamFlags::ByValAlignOffs);
1681
1682        unsigned  Size = (Flags & ISD::ParamFlags::ByValSize) >>
1683          ISD::ParamFlags::ByValSizeOffs;
1684
1685        SDOperand AlignNode = DAG.getConstant(Align, MVT::i32);
1686        SDOperand  SizeNode = DAG.getConstant(Size, MVT::i32);
1687        SDOperand AlwaysInline = DAG.getConstant(1, MVT::i1);
1688
1689        MemOpChains2.push_back(DAG.getMemcpy(Chain, FIN, PtrOff, SizeNode,
1690                                             AlignNode,AlwaysInline));
1691      } else {
1692        SDOperand LoadedArg = DAG.getLoad(VA.getValVT(), Chain, PtrOff, NULL,0);
1693        // Store relative to framepointer.
1694        MemOpChains2.push_back(DAG.getStore(Chain, LoadedArg, FIN, NULL, 0));
1695      }
1696    }
1697  }
1698
1699  if (!MemOpChains2.empty())
1700    Chain = DAG.getNode(ISD::TokenFactor, MVT::Other,
1701                        &MemOpChains2[0], MemOpChains.size());
1702
1703  // Store the return address to the appropriate stack slot.
1704  if (FPDiff)
1705    Chain = DAG.getStore(Chain,RetAddrFrIdx, NewRetAddrFrIdx, NULL, 0);
1706
1707  // ELF / PIC requires GOT in the EBX register before function calls via PLT
1708  // GOT pointer.
1709  // Does not work with tail call since ebx is not restored correctly by
1710  // tailcaller. TODO: at least for x86 - verify for x86-64
1711
1712  // If the callee is a GlobalAddress node (quite common, every direct call is)
1713  // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1714  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1715    // We should use extra load for direct calls to dllimported functions in
1716    // non-JIT mode.
1717    if (!Subtarget->GVRequiresExtraLoad(G->getGlobal(),
1718                                        getTargetMachine(), true))
1719      Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy());
1720  } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee))
1721    Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy());
1722  else {
1723    assert(Callee.getOpcode() == ISD::LOAD &&
1724           "Function destination must be loaded into virtual register");
1725    unsigned Opc = is64Bit ? X86::R9 : X86::ECX;
1726
1727    Chain = DAG.getCopyToReg(Chain,
1728                             DAG.getRegister(Opc, getPointerTy()) ,
1729                             Callee,InFlag);
1730    Callee = DAG.getRegister(Opc, getPointerTy());
1731    // Add register as live out.
1732    DAG.getMachineFunction().addLiveOut(Opc);
1733  }
1734
1735  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
1736  SmallVector<SDOperand, 8> Ops;
1737
1738  Ops.push_back(Chain);
1739  Ops.push_back(DAG.getConstant(NumBytesToBePushed, getPointerTy()));
1740  Ops.push_back(DAG.getConstant(0, getPointerTy()));
1741  if (InFlag.Val)
1742    Ops.push_back(InFlag);
1743  Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size());
1744  InFlag = Chain.getValue(1);
1745
1746  // Returns a chain & a flag for retval copy to use.
1747  NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
1748  Ops.clear();
1749  Ops.push_back(Chain);
1750  Ops.push_back(Callee);
1751  Ops.push_back(DAG.getConstant(FPDiff, MVT::i32));
1752  // Add argument registers to the end of the list so that they are known live
1753  // into the call.
1754  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1755    Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1756                                  RegsToPass[i].second.getValueType()));
1757  if (InFlag.Val)
1758    Ops.push_back(InFlag);
1759  assert(InFlag.Val &&
1760         "Flag must be set. Depend on flag being set in LowerRET");
1761  Chain = DAG.getNode(X86ISD::TAILCALL,
1762                      Op.Val->getVTList(), &Ops[0], Ops.size());
1763
1764  return SDOperand(Chain.Val, Op.ResNo);
1765}
1766
1767//===----------------------------------------------------------------------===//
1768//                 X86-64 C Calling Convention implementation
1769//===----------------------------------------------------------------------===//
1770
1771SDOperand
1772X86TargetLowering::LowerX86_64CCCArguments(SDOperand Op, SelectionDAG &DAG) {
1773  MachineFunction &MF = DAG.getMachineFunction();
1774  MachineFrameInfo *MFI = MF.getFrameInfo();
1775  SDOperand Root = Op.getOperand(0);
1776  bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
1777  unsigned CC= MF.getFunction()->getCallingConv();
1778
1779  static const unsigned GPR64ArgRegs[] = {
1780    X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8,  X86::R9
1781  };
1782  static const unsigned XMMArgRegs[] = {
1783    X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
1784    X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
1785  };
1786
1787
1788  // Assign locations to all of the incoming arguments.
1789  SmallVector<CCValAssign, 16> ArgLocs;
1790  CCState CCInfo(CC, isVarArg,
1791                 getTargetMachine(), ArgLocs);
1792  if (CC == CallingConv::Fast && PerformTailCallOpt)
1793    CCInfo.AnalyzeFormalArguments(Op.Val, CC_X86_64_TailCall);
1794  else
1795    CCInfo.AnalyzeFormalArguments(Op.Val, CC_X86_64_C);
1796
1797  SmallVector<SDOperand, 8> ArgValues;
1798  unsigned LastVal = ~0U;
1799  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1800    CCValAssign &VA = ArgLocs[i];
1801    // TODO: If an arg is passed in two places (e.g. reg and stack), skip later
1802    // places.
1803    assert(VA.getValNo() != LastVal &&
1804           "Don't support value assigned to multiple locs yet");
1805    LastVal = VA.getValNo();
1806
1807    if (VA.isRegLoc()) {
1808      MVT::ValueType RegVT = VA.getLocVT();
1809      TargetRegisterClass *RC;
1810      if (RegVT == MVT::i32)
1811        RC = X86::GR32RegisterClass;
1812      else if (RegVT == MVT::i64)
1813        RC = X86::GR64RegisterClass;
1814      else if (RegVT == MVT::f32)
1815        RC = X86::FR32RegisterClass;
1816      else if (RegVT == MVT::f64)
1817        RC = X86::FR64RegisterClass;
1818      else {
1819        assert(MVT::isVector(RegVT));
1820        if (MVT::getSizeInBits(RegVT) == 64) {
1821          RC = X86::GR64RegisterClass;       // MMX values are passed in GPRs.
1822          RegVT = MVT::i64;
1823        } else
1824          RC = X86::VR128RegisterClass;
1825      }
1826
1827      unsigned Reg = AddLiveIn(DAG.getMachineFunction(), VA.getLocReg(), RC);
1828      SDOperand ArgValue = DAG.getCopyFromReg(Root, Reg, RegVT);
1829
1830      // If this is an 8 or 16-bit value, it is really passed promoted to 32
1831      // bits.  Insert an assert[sz]ext to capture this, then truncate to the
1832      // right size.
1833      if (VA.getLocInfo() == CCValAssign::SExt)
1834        ArgValue = DAG.getNode(ISD::AssertSext, RegVT, ArgValue,
1835                               DAG.getValueType(VA.getValVT()));
1836      else if (VA.getLocInfo() == CCValAssign::ZExt)
1837        ArgValue = DAG.getNode(ISD::AssertZext, RegVT, ArgValue,
1838                               DAG.getValueType(VA.getValVT()));
1839
1840      if (VA.getLocInfo() != CCValAssign::Full)
1841        ArgValue = DAG.getNode(ISD::TRUNCATE, VA.getValVT(), ArgValue);
1842
1843      // Handle MMX values passed in GPRs.
1844      if (RegVT != VA.getLocVT() && RC == X86::GR64RegisterClass &&
1845          MVT::getSizeInBits(RegVT) == 64)
1846        ArgValue = DAG.getNode(ISD::BIT_CONVERT, VA.getLocVT(), ArgValue);
1847
1848      ArgValues.push_back(ArgValue);
1849    } else {
1850      assert(VA.isMemLoc());
1851      ArgValues.push_back(LowerMemArgument(Op, DAG, VA, MFI, Root, i));
1852    }
1853  }
1854
1855  unsigned StackSize = CCInfo.getNextStackOffset();
1856  if (CC==CallingConv::Fast)
1857    StackSize =GetAlignedArgumentStackSize(StackSize, DAG);
1858
1859  // If the function takes variable number of arguments, make a frame index for
1860  // the start of the first vararg value... for expansion of llvm.va_start.
1861  if (isVarArg) {
1862    assert(CC!=CallingConv::Fast
1863           && "Var arg not supported with calling convention fastcc");
1864    unsigned NumIntRegs = CCInfo.getFirstUnallocated(GPR64ArgRegs, 6);
1865    unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8);
1866
1867    // For X86-64, if there are vararg parameters that are passed via
1868    // registers, then we must store them to their spots on the stack so they
1869    // may be loaded by deferencing the result of va_next.
1870    VarArgsGPOffset = NumIntRegs * 8;
1871    VarArgsFPOffset = 6 * 8 + NumXMMRegs * 16;
1872    VarArgsFrameIndex = MFI->CreateFixedObject(1, StackSize);
1873    RegSaveFrameIndex = MFI->CreateStackObject(6 * 8 + 8 * 16, 16);
1874
1875    // Store the integer parameter registers.
1876    SmallVector<SDOperand, 8> MemOps;
1877    SDOperand RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy());
1878    SDOperand FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN,
1879                              DAG.getConstant(VarArgsGPOffset, getPointerTy()));
1880    for (; NumIntRegs != 6; ++NumIntRegs) {
1881      unsigned VReg = AddLiveIn(MF, GPR64ArgRegs[NumIntRegs],
1882                                X86::GR64RegisterClass);
1883      SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::i64);
1884      SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0);
1885      MemOps.push_back(Store);
1886      FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN,
1887                        DAG.getConstant(8, getPointerTy()));
1888    }
1889
1890    // Now store the XMM (fp + vector) parameter registers.
1891    FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN,
1892                      DAG.getConstant(VarArgsFPOffset, getPointerTy()));
1893    for (; NumXMMRegs != 8; ++NumXMMRegs) {
1894      unsigned VReg = AddLiveIn(MF, XMMArgRegs[NumXMMRegs],
1895                                X86::VR128RegisterClass);
1896      SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::v4f32);
1897      SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0);
1898      MemOps.push_back(Store);
1899      FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN,
1900                        DAG.getConstant(16, getPointerTy()));
1901    }
1902    if (!MemOps.empty())
1903        Root = DAG.getNode(ISD::TokenFactor, MVT::Other,
1904                           &MemOps[0], MemOps.size());
1905  }
1906
1907  ArgValues.push_back(Root);
1908  // Tail call convention (fastcc) needs callee pop.
1909  if (CC == CallingConv::Fast && PerformTailCallOpt) {
1910    BytesToPopOnReturn = StackSize;  // Callee pops everything.
1911    BytesCallerReserves = 0;
1912  } else {
1913    BytesToPopOnReturn = 0;  // Callee pops nothing.
1914    BytesCallerReserves = StackSize;
1915  }
1916  X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
1917  FuncInfo->setBytesToPopOnReturn(BytesToPopOnReturn);
1918
1919  // Return the new list of results.
1920  return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(),
1921                     &ArgValues[0], ArgValues.size()).getValue(Op.ResNo);
1922}
1923
1924SDOperand
1925X86TargetLowering::LowerX86_64CCCCallTo(SDOperand Op, SelectionDAG &DAG,
1926                                        unsigned CC) {
1927  SDOperand Chain     = Op.getOperand(0);
1928  bool isVarArg       = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
1929  SDOperand Callee    = Op.getOperand(4);
1930
1931  // Analyze operands of the call, assigning locations to each operand.
1932  SmallVector<CCValAssign, 16> ArgLocs;
1933  CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs);
1934  if (CC==CallingConv::Fast && PerformTailCallOpt)
1935    CCInfo.AnalyzeCallOperands(Op.Val, CC_X86_64_TailCall);
1936  else
1937    CCInfo.AnalyzeCallOperands(Op.Val, CC_X86_64_C);
1938
1939  // Get a count of how many bytes are to be pushed on the stack.
1940  unsigned NumBytes = CCInfo.getNextStackOffset();
1941  if (CC == CallingConv::Fast)
1942    NumBytes = GetAlignedArgumentStackSize(NumBytes,DAG);
1943
1944  Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, getPointerTy()));
1945
1946  SmallVector<std::pair<unsigned, SDOperand>, 8> RegsToPass;
1947  SmallVector<SDOperand, 8> MemOpChains;
1948
1949  SDOperand StackPtr;
1950
1951  // Walk the register/memloc assignments, inserting copies/loads.
1952  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1953    CCValAssign &VA = ArgLocs[i];
1954    SDOperand Arg = Op.getOperand(5+2*VA.getValNo());
1955
1956    // Promote the value if needed.
1957    switch (VA.getLocInfo()) {
1958    default: assert(0 && "Unknown loc info!");
1959    case CCValAssign::Full: break;
1960    case CCValAssign::SExt:
1961      Arg = DAG.getNode(ISD::SIGN_EXTEND, VA.getLocVT(), Arg);
1962      break;
1963    case CCValAssign::ZExt:
1964      Arg = DAG.getNode(ISD::ZERO_EXTEND, VA.getLocVT(), Arg);
1965      break;
1966    case CCValAssign::AExt:
1967      Arg = DAG.getNode(ISD::ANY_EXTEND, VA.getLocVT(), Arg);
1968      break;
1969    }
1970
1971    if (VA.isRegLoc()) {
1972      RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1973    } else {
1974      assert(VA.isMemLoc());
1975      if (StackPtr.Val == 0)
1976        StackPtr = DAG.getRegister(getStackPtrReg(), getPointerTy());
1977
1978      MemOpChains.push_back(LowerMemOpCallTo(Op, DAG, StackPtr, VA, Chain,
1979                                             Arg));
1980    }
1981  }
1982
1983  if (!MemOpChains.empty())
1984    Chain = DAG.getNode(ISD::TokenFactor, MVT::Other,
1985                        &MemOpChains[0], MemOpChains.size());
1986
1987  // Build a sequence of copy-to-reg nodes chained together with token chain
1988  // and flag operands which copy the outgoing args into registers.
1989  SDOperand InFlag;
1990  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1991    Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second,
1992                             InFlag);
1993    InFlag = Chain.getValue(1);
1994  }
1995
1996  if (isVarArg) {
1997    assert ( CallingConv::Fast != CC &&
1998             "Var args not supported with calling convention fastcc");
1999
2000    // From AMD64 ABI document:
2001    // For calls that may call functions that use varargs or stdargs
2002    // (prototype-less calls or calls to functions containing ellipsis (...) in
2003    // the declaration) %al is used as hidden argument to specify the number
2004    // of SSE registers used. The contents of %al do not need to match exactly
2005    // the number of registers, but must be an ubound on the number of SSE
2006    // registers used and is in the range 0 - 8 inclusive.
2007
2008    // Count the number of XMM registers allocated.
2009    static const unsigned XMMArgRegs[] = {
2010      X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
2011      X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
2012    };
2013    unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8);
2014
2015    Chain = DAG.getCopyToReg(Chain, X86::AL,
2016                             DAG.getConstant(NumXMMRegs, MVT::i8), InFlag);
2017    InFlag = Chain.getValue(1);
2018  }
2019
2020  // If the callee is a GlobalAddress node (quite common, every direct call is)
2021  // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
2022  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
2023    // We should use extra load for direct calls to dllimported functions in
2024    // non-JIT mode.
2025    if (getTargetMachine().getCodeModel() != CodeModel::Large
2026        && !Subtarget->GVRequiresExtraLoad(G->getGlobal(),
2027                                           getTargetMachine(), true))
2028      Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy());
2029  } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee))
2030    if (getTargetMachine().getCodeModel() != CodeModel::Large)
2031      Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy());
2032
2033  // Returns a chain & a flag for retval copy to use.
2034  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
2035  SmallVector<SDOperand, 8> Ops;
2036  Ops.push_back(Chain);
2037  Ops.push_back(Callee);
2038
2039  // Add argument registers to the end of the list so that they are known live
2040  // into the call.
2041  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
2042    Ops.push_back(DAG.getRegister(RegsToPass[i].first,
2043                                  RegsToPass[i].second.getValueType()));
2044
2045  if (InFlag.Val)
2046    Ops.push_back(InFlag);
2047
2048  Chain = DAG.getNode(X86ISD::CALL,
2049                      NodeTys, &Ops[0], Ops.size());
2050  InFlag = Chain.getValue(1);
2051  int NumBytesForCalleeToPush = 0;
2052   if (CC==CallingConv::Fast && PerformTailCallOpt) {
2053    NumBytesForCalleeToPush = NumBytes;  // Callee pops everything
2054  } else {
2055    NumBytesForCalleeToPush = 0;  // Callee pops nothing.
2056  }
2057  // Returns a flag for retval copy to use.
2058  NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
2059  Ops.clear();
2060  Ops.push_back(Chain);
2061  Ops.push_back(DAG.getConstant(NumBytes, getPointerTy()));
2062  Ops.push_back(DAG.getConstant(NumBytesForCalleeToPush, getPointerTy()));
2063  Ops.push_back(InFlag);
2064  Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size());
2065  InFlag = Chain.getValue(1);
2066
2067  // Handle result values, copying them out of physregs into vregs that we
2068  // return.
2069  return SDOperand(LowerCallResult(Chain, InFlag, Op.Val, CC, DAG), Op.ResNo);
2070}
2071
2072
2073//===----------------------------------------------------------------------===//
2074//                           Other Lowering Hooks
2075//===----------------------------------------------------------------------===//
2076
2077
2078SDOperand X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) {
2079  MachineFunction &MF = DAG.getMachineFunction();
2080  X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2081  int ReturnAddrIndex = FuncInfo->getRAIndex();
2082
2083  if (ReturnAddrIndex == 0) {
2084    // Set up a frame object for the return address.
2085    if (Subtarget->is64Bit())
2086      ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(8, -8);
2087    else
2088      ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(4, -4);
2089
2090    FuncInfo->setRAIndex(ReturnAddrIndex);
2091  }
2092
2093  return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy());
2094}
2095
2096
2097
2098/// translateX86CC - do a one to one translation of a ISD::CondCode to the X86
2099/// specific condition code. It returns a false if it cannot do a direct
2100/// translation. X86CC is the translated CondCode.  LHS/RHS are modified as
2101/// needed.
2102static bool translateX86CC(ISD::CondCode SetCCOpcode, bool isFP,
2103                           unsigned &X86CC, SDOperand &LHS, SDOperand &RHS,
2104                           SelectionDAG &DAG) {
2105  X86CC = X86::COND_INVALID;
2106  if (!isFP) {
2107    if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
2108      if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) {
2109        // X > -1   -> X == 0, jump !sign.
2110        RHS = DAG.getConstant(0, RHS.getValueType());
2111        X86CC = X86::COND_NS;
2112        return true;
2113      } else if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) {
2114        // X < 0   -> X == 0, jump on sign.
2115        X86CC = X86::COND_S;
2116        return true;
2117      } else if (SetCCOpcode == ISD::SETLT && RHSC->getValue() == 1) {
2118        // X < 1   -> X <= 0
2119        RHS = DAG.getConstant(0, RHS.getValueType());
2120        X86CC = X86::COND_LE;
2121        return true;
2122      }
2123    }
2124
2125    switch (SetCCOpcode) {
2126    default: break;
2127    case ISD::SETEQ:  X86CC = X86::COND_E;  break;
2128    case ISD::SETGT:  X86CC = X86::COND_G;  break;
2129    case ISD::SETGE:  X86CC = X86::COND_GE; break;
2130    case ISD::SETLT:  X86CC = X86::COND_L;  break;
2131    case ISD::SETLE:  X86CC = X86::COND_LE; break;
2132    case ISD::SETNE:  X86CC = X86::COND_NE; break;
2133    case ISD::SETULT: X86CC = X86::COND_B;  break;
2134    case ISD::SETUGT: X86CC = X86::COND_A;  break;
2135    case ISD::SETULE: X86CC = X86::COND_BE; break;
2136    case ISD::SETUGE: X86CC = X86::COND_AE; break;
2137    }
2138  } else {
2139    // On a floating point condition, the flags are set as follows:
2140    // ZF  PF  CF   op
2141    //  0 | 0 | 0 | X > Y
2142    //  0 | 0 | 1 | X < Y
2143    //  1 | 0 | 0 | X == Y
2144    //  1 | 1 | 1 | unordered
2145    bool Flip = false;
2146    switch (SetCCOpcode) {
2147    default: break;
2148    case ISD::SETUEQ:
2149    case ISD::SETEQ: X86CC = X86::COND_E;  break;
2150    case ISD::SETOLT: Flip = true; // Fallthrough
2151    case ISD::SETOGT:
2152    case ISD::SETGT: X86CC = X86::COND_A;  break;
2153    case ISD::SETOLE: Flip = true; // Fallthrough
2154    case ISD::SETOGE:
2155    case ISD::SETGE: X86CC = X86::COND_AE; break;
2156    case ISD::SETUGT: Flip = true; // Fallthrough
2157    case ISD::SETULT:
2158    case ISD::SETLT: X86CC = X86::COND_B;  break;
2159    case ISD::SETUGE: Flip = true; // Fallthrough
2160    case ISD::SETULE:
2161    case ISD::SETLE: X86CC = X86::COND_BE; break;
2162    case ISD::SETONE:
2163    case ISD::SETNE: X86CC = X86::COND_NE; break;
2164    case ISD::SETUO: X86CC = X86::COND_P;  break;
2165    case ISD::SETO:  X86CC = X86::COND_NP; break;
2166    }
2167    if (Flip)
2168      std::swap(LHS, RHS);
2169  }
2170
2171  return X86CC != X86::COND_INVALID;
2172}
2173
2174/// hasFPCMov - is there a floating point cmov for the specific X86 condition
2175/// code. Current x86 isa includes the following FP cmov instructions:
2176/// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu.
2177static bool hasFPCMov(unsigned X86CC) {
2178  switch (X86CC) {
2179  default:
2180    return false;
2181  case X86::COND_B:
2182  case X86::COND_BE:
2183  case X86::COND_E:
2184  case X86::COND_P:
2185  case X86::COND_A:
2186  case X86::COND_AE:
2187  case X86::COND_NE:
2188  case X86::COND_NP:
2189    return true;
2190  }
2191}
2192
2193/// isUndefOrInRange - Op is either an undef node or a ConstantSDNode.  Return
2194/// true if Op is undef or if its value falls within the specified range (L, H].
2195static bool isUndefOrInRange(SDOperand Op, unsigned Low, unsigned Hi) {
2196  if (Op.getOpcode() == ISD::UNDEF)
2197    return true;
2198
2199  unsigned Val = cast<ConstantSDNode>(Op)->getValue();
2200  return (Val >= Low && Val < Hi);
2201}
2202
2203/// isUndefOrEqual - Op is either an undef node or a ConstantSDNode.  Return
2204/// true if Op is undef or if its value equal to the specified value.
2205static bool isUndefOrEqual(SDOperand Op, unsigned Val) {
2206  if (Op.getOpcode() == ISD::UNDEF)
2207    return true;
2208  return cast<ConstantSDNode>(Op)->getValue() == Val;
2209}
2210
2211/// isPSHUFDMask - Return true if the specified VECTOR_SHUFFLE operand
2212/// specifies a shuffle of elements that is suitable for input to PSHUFD.
2213bool X86::isPSHUFDMask(SDNode *N) {
2214  assert(N->getOpcode() == ISD::BUILD_VECTOR);
2215
2216  if (N->getNumOperands() != 2 && N->getNumOperands() != 4)
2217    return false;
2218
2219  // Check if the value doesn't reference the second vector.
2220  for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
2221    SDOperand Arg = N->getOperand(i);
2222    if (Arg.getOpcode() == ISD::UNDEF) continue;
2223    assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
2224    if (cast<ConstantSDNode>(Arg)->getValue() >= e)
2225      return false;
2226  }
2227
2228  return true;
2229}
2230
2231/// isPSHUFHWMask - Return true if the specified VECTOR_SHUFFLE operand
2232/// specifies a shuffle of elements that is suitable for input to PSHUFHW.
2233bool X86::isPSHUFHWMask(SDNode *N) {
2234  assert(N->getOpcode() == ISD::BUILD_VECTOR);
2235
2236  if (N->getNumOperands() != 8)
2237    return false;
2238
2239  // Lower quadword copied in order.
2240  for (unsigned i = 0; i != 4; ++i) {
2241    SDOperand Arg = N->getOperand(i);
2242    if (Arg.getOpcode() == ISD::UNDEF) continue;
2243    assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
2244    if (cast<ConstantSDNode>(Arg)->getValue() != i)
2245      return false;
2246  }
2247
2248  // Upper quadword shuffled.
2249  for (unsigned i = 4; i != 8; ++i) {
2250    SDOperand Arg = N->getOperand(i);
2251    if (Arg.getOpcode() == ISD::UNDEF) continue;
2252    assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
2253    unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
2254    if (Val < 4 || Val > 7)
2255      return false;
2256  }
2257
2258  return true;
2259}
2260
2261/// isPSHUFLWMask - Return true if the specified VECTOR_SHUFFLE operand
2262/// specifies a shuffle of elements that is suitable for input to PSHUFLW.
2263bool X86::isPSHUFLWMask(SDNode *N) {
2264  assert(N->getOpcode() == ISD::BUILD_VECTOR);
2265
2266  if (N->getNumOperands() != 8)
2267    return false;
2268
2269  // Upper quadword copied in order.
2270  for (unsigned i = 4; i != 8; ++i)
2271    if (!isUndefOrEqual(N->getOperand(i), i))
2272      return false;
2273
2274  // Lower quadword shuffled.
2275  for (unsigned i = 0; i != 4; ++i)
2276    if (!isUndefOrInRange(N->getOperand(i), 0, 4))
2277      return false;
2278
2279  return true;
2280}
2281
2282/// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand
2283/// specifies a shuffle of elements that is suitable for input to SHUFP*.
2284static bool isSHUFPMask(const SDOperand *Elems, unsigned NumElems) {
2285  if (NumElems != 2 && NumElems != 4) return false;
2286
2287  unsigned Half = NumElems / 2;
2288  for (unsigned i = 0; i < Half; ++i)
2289    if (!isUndefOrInRange(Elems[i], 0, NumElems))
2290      return false;
2291  for (unsigned i = Half; i < NumElems; ++i)
2292    if (!isUndefOrInRange(Elems[i], NumElems, NumElems*2))
2293      return false;
2294
2295  return true;
2296}
2297
2298bool X86::isSHUFPMask(SDNode *N) {
2299  assert(N->getOpcode() == ISD::BUILD_VECTOR);
2300  return ::isSHUFPMask(N->op_begin(), N->getNumOperands());
2301}
2302
2303/// isCommutedSHUFP - Returns true if the shuffle mask is exactly
2304/// the reverse of what x86 shuffles want. x86 shuffles requires the lower
2305/// half elements to come from vector 1 (which would equal the dest.) and
2306/// the upper half to come from vector 2.
2307static bool isCommutedSHUFP(const SDOperand *Ops, unsigned NumOps) {
2308  if (NumOps != 2 && NumOps != 4) return false;
2309
2310  unsigned Half = NumOps / 2;
2311  for (unsigned i = 0; i < Half; ++i)
2312    if (!isUndefOrInRange(Ops[i], NumOps, NumOps*2))
2313      return false;
2314  for (unsigned i = Half; i < NumOps; ++i)
2315    if (!isUndefOrInRange(Ops[i], 0, NumOps))
2316      return false;
2317  return true;
2318}
2319
2320static bool isCommutedSHUFP(SDNode *N) {
2321  assert(N->getOpcode() == ISD::BUILD_VECTOR);
2322  return isCommutedSHUFP(N->op_begin(), N->getNumOperands());
2323}
2324
2325/// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand
2326/// specifies a shuffle of elements that is suitable for input to MOVHLPS.
2327bool X86::isMOVHLPSMask(SDNode *N) {
2328  assert(N->getOpcode() == ISD::BUILD_VECTOR);
2329
2330  if (N->getNumOperands() != 4)
2331    return false;
2332
2333  // Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3
2334  return isUndefOrEqual(N->getOperand(0), 6) &&
2335         isUndefOrEqual(N->getOperand(1), 7) &&
2336         isUndefOrEqual(N->getOperand(2), 2) &&
2337         isUndefOrEqual(N->getOperand(3), 3);
2338}
2339
2340/// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form
2341/// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef,
2342/// <2, 3, 2, 3>
2343bool X86::isMOVHLPS_v_undef_Mask(SDNode *N) {
2344  assert(N->getOpcode() == ISD::BUILD_VECTOR);
2345
2346  if (N->getNumOperands() != 4)
2347    return false;
2348
2349  // Expect bit0 == 2, bit1 == 3, bit2 == 2, bit3 == 3
2350  return isUndefOrEqual(N->getOperand(0), 2) &&
2351         isUndefOrEqual(N->getOperand(1), 3) &&
2352         isUndefOrEqual(N->getOperand(2), 2) &&
2353         isUndefOrEqual(N->getOperand(3), 3);
2354}
2355
2356/// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand
2357/// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}.
2358bool X86::isMOVLPMask(SDNode *N) {
2359  assert(N->getOpcode() == ISD::BUILD_VECTOR);
2360
2361  unsigned NumElems = N->getNumOperands();
2362  if (NumElems != 2 && NumElems != 4)
2363    return false;
2364
2365  for (unsigned i = 0; i < NumElems/2; ++i)
2366    if (!isUndefOrEqual(N->getOperand(i), i + NumElems))
2367      return false;
2368
2369  for (unsigned i = NumElems/2; i < NumElems; ++i)
2370    if (!isUndefOrEqual(N->getOperand(i), i))
2371      return false;
2372
2373  return true;
2374}
2375
2376/// isMOVHPMask - Return true if the specified VECTOR_SHUFFLE operand
2377/// specifies a shuffle of elements that is suitable for input to MOVHP{S|D}
2378/// and MOVLHPS.
2379bool X86::isMOVHPMask(SDNode *N) {
2380  assert(N->getOpcode() == ISD::BUILD_VECTOR);
2381
2382  unsigned NumElems = N->getNumOperands();
2383  if (NumElems != 2 && NumElems != 4)
2384    return false;
2385
2386  for (unsigned i = 0; i < NumElems/2; ++i)
2387    if (!isUndefOrEqual(N->getOperand(i), i))
2388      return false;
2389
2390  for (unsigned i = 0; i < NumElems/2; ++i) {
2391    SDOperand Arg = N->getOperand(i + NumElems/2);
2392    if (!isUndefOrEqual(Arg, i + NumElems))
2393      return false;
2394  }
2395
2396  return true;
2397}
2398
2399/// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand
2400/// specifies a shuffle of elements that is suitable for input to UNPCKL.
2401bool static isUNPCKLMask(const SDOperand *Elts, unsigned NumElts,
2402                         bool V2IsSplat = false) {
2403  if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16)
2404    return false;
2405
2406  for (unsigned i = 0, j = 0; i != NumElts; i += 2, ++j) {
2407    SDOperand BitI  = Elts[i];
2408    SDOperand BitI1 = Elts[i+1];
2409    if (!isUndefOrEqual(BitI, j))
2410      return false;
2411    if (V2IsSplat) {
2412      if (isUndefOrEqual(BitI1, NumElts))
2413        return false;
2414    } else {
2415      if (!isUndefOrEqual(BitI1, j + NumElts))
2416        return false;
2417    }
2418  }
2419
2420  return true;
2421}
2422
2423bool X86::isUNPCKLMask(SDNode *N, bool V2IsSplat) {
2424  assert(N->getOpcode() == ISD::BUILD_VECTOR);
2425  return ::isUNPCKLMask(N->op_begin(), N->getNumOperands(), V2IsSplat);
2426}
2427
2428/// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand
2429/// specifies a shuffle of elements that is suitable for input to UNPCKH.
2430bool static isUNPCKHMask(const SDOperand *Elts, unsigned NumElts,
2431                         bool V2IsSplat = false) {
2432  if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16)
2433    return false;
2434
2435  for (unsigned i = 0, j = 0; i != NumElts; i += 2, ++j) {
2436    SDOperand BitI  = Elts[i];
2437    SDOperand BitI1 = Elts[i+1];
2438    if (!isUndefOrEqual(BitI, j + NumElts/2))
2439      return false;
2440    if (V2IsSplat) {
2441      if (isUndefOrEqual(BitI1, NumElts))
2442        return false;
2443    } else {
2444      if (!isUndefOrEqual(BitI1, j + NumElts/2 + NumElts))
2445        return false;
2446    }
2447  }
2448
2449  return true;
2450}
2451
2452bool X86::isUNPCKHMask(SDNode *N, bool V2IsSplat) {
2453  assert(N->getOpcode() == ISD::BUILD_VECTOR);
2454  return ::isUNPCKHMask(N->op_begin(), N->getNumOperands(), V2IsSplat);
2455}
2456
2457/// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form
2458/// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef,
2459/// <0, 0, 1, 1>
2460bool X86::isUNPCKL_v_undef_Mask(SDNode *N) {
2461  assert(N->getOpcode() == ISD::BUILD_VECTOR);
2462
2463  unsigned NumElems = N->getNumOperands();
2464  if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16)
2465    return false;
2466
2467  for (unsigned i = 0, j = 0; i != NumElems; i += 2, ++j) {
2468    SDOperand BitI  = N->getOperand(i);
2469    SDOperand BitI1 = N->getOperand(i+1);
2470
2471    if (!isUndefOrEqual(BitI, j))
2472      return false;
2473    if (!isUndefOrEqual(BitI1, j))
2474      return false;
2475  }
2476
2477  return true;
2478}
2479
2480/// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form
2481/// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef,
2482/// <2, 2, 3, 3>
2483bool X86::isUNPCKH_v_undef_Mask(SDNode *N) {
2484  assert(N->getOpcode() == ISD::BUILD_VECTOR);
2485
2486  unsigned NumElems = N->getNumOperands();
2487  if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16)
2488    return false;
2489
2490  for (unsigned i = 0, j = NumElems / 2; i != NumElems; i += 2, ++j) {
2491    SDOperand BitI  = N->getOperand(i);
2492    SDOperand BitI1 = N->getOperand(i + 1);
2493
2494    if (!isUndefOrEqual(BitI, j))
2495      return false;
2496    if (!isUndefOrEqual(BitI1, j))
2497      return false;
2498  }
2499
2500  return true;
2501}
2502
2503/// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand
2504/// specifies a shuffle of elements that is suitable for input to MOVSS,
2505/// MOVSD, and MOVD, i.e. setting the lowest element.
2506static bool isMOVLMask(const SDOperand *Elts, unsigned NumElts) {
2507  if (NumElts != 2 && NumElts != 4)
2508    return false;
2509
2510  if (!isUndefOrEqual(Elts[0], NumElts))
2511    return false;
2512
2513  for (unsigned i = 1; i < NumElts; ++i) {
2514    if (!isUndefOrEqual(Elts[i], i))
2515      return false;
2516  }
2517
2518  return true;
2519}
2520
2521bool X86::isMOVLMask(SDNode *N) {
2522  assert(N->getOpcode() == ISD::BUILD_VECTOR);
2523  return ::isMOVLMask(N->op_begin(), N->getNumOperands());
2524}
2525
2526/// isCommutedMOVL - Returns true if the shuffle mask is except the reverse
2527/// of what x86 movss want. X86 movs requires the lowest  element to be lowest
2528/// element of vector 2 and the other elements to come from vector 1 in order.
2529static bool isCommutedMOVL(const SDOperand *Ops, unsigned NumOps,
2530                           bool V2IsSplat = false,
2531                           bool V2IsUndef = false) {
2532  if (NumOps != 2 && NumOps != 4 && NumOps != 8 && NumOps != 16)
2533    return false;
2534
2535  if (!isUndefOrEqual(Ops[0], 0))
2536    return false;
2537
2538  for (unsigned i = 1; i < NumOps; ++i) {
2539    SDOperand Arg = Ops[i];
2540    if (!(isUndefOrEqual(Arg, i+NumOps) ||
2541          (V2IsUndef && isUndefOrInRange(Arg, NumOps, NumOps*2)) ||
2542          (V2IsSplat && isUndefOrEqual(Arg, NumOps))))
2543      return false;
2544  }
2545
2546  return true;
2547}
2548
2549static bool isCommutedMOVL(SDNode *N, bool V2IsSplat = false,
2550                           bool V2IsUndef = false) {
2551  assert(N->getOpcode() == ISD::BUILD_VECTOR);
2552  return isCommutedMOVL(N->op_begin(), N->getNumOperands(),
2553                        V2IsSplat, V2IsUndef);
2554}
2555
2556/// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand
2557/// specifies a shuffle of elements that is suitable for input to MOVSHDUP.
2558bool X86::isMOVSHDUPMask(SDNode *N) {
2559  assert(N->getOpcode() == ISD::BUILD_VECTOR);
2560
2561  if (N->getNumOperands() != 4)
2562    return false;
2563
2564  // Expect 1, 1, 3, 3
2565  for (unsigned i = 0; i < 2; ++i) {
2566    SDOperand Arg = N->getOperand(i);
2567    if (Arg.getOpcode() == ISD::UNDEF) continue;
2568    assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
2569    unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
2570    if (Val != 1) return false;
2571  }
2572
2573  bool HasHi = false;
2574  for (unsigned i = 2; i < 4; ++i) {
2575    SDOperand Arg = N->getOperand(i);
2576    if (Arg.getOpcode() == ISD::UNDEF) continue;
2577    assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
2578    unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
2579    if (Val != 3) return false;
2580    HasHi = true;
2581  }
2582
2583  // Don't use movshdup if it can be done with a shufps.
2584  return HasHi;
2585}
2586
2587/// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand
2588/// specifies a shuffle of elements that is suitable for input to MOVSLDUP.
2589bool X86::isMOVSLDUPMask(SDNode *N) {
2590  assert(N->getOpcode() == ISD::BUILD_VECTOR);
2591
2592  if (N->getNumOperands() != 4)
2593    return false;
2594
2595  // Expect 0, 0, 2, 2
2596  for (unsigned i = 0; i < 2; ++i) {
2597    SDOperand Arg = N->getOperand(i);
2598    if (Arg.getOpcode() == ISD::UNDEF) continue;
2599    assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
2600    unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
2601    if (Val != 0) return false;
2602  }
2603
2604  bool HasHi = false;
2605  for (unsigned i = 2; i < 4; ++i) {
2606    SDOperand Arg = N->getOperand(i);
2607    if (Arg.getOpcode() == ISD::UNDEF) continue;
2608    assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
2609    unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
2610    if (Val != 2) return false;
2611    HasHi = true;
2612  }
2613
2614  // Don't use movshdup if it can be done with a shufps.
2615  return HasHi;
2616}
2617
2618/// isIdentityMask - Return true if the specified VECTOR_SHUFFLE operand
2619/// specifies a identity operation on the LHS or RHS.
2620static bool isIdentityMask(SDNode *N, bool RHS = false) {
2621  unsigned NumElems = N->getNumOperands();
2622  for (unsigned i = 0; i < NumElems; ++i)
2623    if (!isUndefOrEqual(N->getOperand(i), i + (RHS ? NumElems : 0)))
2624      return false;
2625  return true;
2626}
2627
2628/// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand specifies
2629/// a splat of a single element.
2630static bool isSplatMask(SDNode *N) {
2631  assert(N->getOpcode() == ISD::BUILD_VECTOR);
2632
2633  // This is a splat operation if each element of the permute is the same, and
2634  // if the value doesn't reference the second vector.
2635  unsigned NumElems = N->getNumOperands();
2636  SDOperand ElementBase;
2637  unsigned i = 0;
2638  for (; i != NumElems; ++i) {
2639    SDOperand Elt = N->getOperand(i);
2640    if (isa<ConstantSDNode>(Elt)) {
2641      ElementBase = Elt;
2642      break;
2643    }
2644  }
2645
2646  if (!ElementBase.Val)
2647    return false;
2648
2649  for (; i != NumElems; ++i) {
2650    SDOperand Arg = N->getOperand(i);
2651    if (Arg.getOpcode() == ISD::UNDEF) continue;
2652    assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
2653    if (Arg != ElementBase) return false;
2654  }
2655
2656  // Make sure it is a splat of the first vector operand.
2657  return cast<ConstantSDNode>(ElementBase)->getValue() < NumElems;
2658}
2659
2660/// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand specifies
2661/// a splat of a single element and it's a 2 or 4 element mask.
2662bool X86::isSplatMask(SDNode *N) {
2663  assert(N->getOpcode() == ISD::BUILD_VECTOR);
2664
2665  // We can only splat 64-bit, and 32-bit quantities with a single instruction.
2666  if (N->getNumOperands() != 4 && N->getNumOperands() != 2)
2667    return false;
2668  return ::isSplatMask(N);
2669}
2670
2671/// isSplatLoMask - Return true if the specified VECTOR_SHUFFLE operand
2672/// specifies a splat of zero element.
2673bool X86::isSplatLoMask(SDNode *N) {
2674  assert(N->getOpcode() == ISD::BUILD_VECTOR);
2675
2676  for (unsigned i = 0, e = N->getNumOperands(); i < e; ++i)
2677    if (!isUndefOrEqual(N->getOperand(i), 0))
2678      return false;
2679  return true;
2680}
2681
2682/// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle
2683/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUF* and SHUFP*
2684/// instructions.
2685unsigned X86::getShuffleSHUFImmediate(SDNode *N) {
2686  unsigned NumOperands = N->getNumOperands();
2687  unsigned Shift = (NumOperands == 4) ? 2 : 1;
2688  unsigned Mask = 0;
2689  for (unsigned i = 0; i < NumOperands; ++i) {
2690    unsigned Val = 0;
2691    SDOperand Arg = N->getOperand(NumOperands-i-1);
2692    if (Arg.getOpcode() != ISD::UNDEF)
2693      Val = cast<ConstantSDNode>(Arg)->getValue();
2694    if (Val >= NumOperands) Val -= NumOperands;
2695    Mask |= Val;
2696    if (i != NumOperands - 1)
2697      Mask <<= Shift;
2698  }
2699
2700  return Mask;
2701}
2702
2703/// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle
2704/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFHW
2705/// instructions.
2706unsigned X86::getShufflePSHUFHWImmediate(SDNode *N) {
2707  unsigned Mask = 0;
2708  // 8 nodes, but we only care about the last 4.
2709  for (unsigned i = 7; i >= 4; --i) {
2710    unsigned Val = 0;
2711    SDOperand Arg = N->getOperand(i);
2712    if (Arg.getOpcode() != ISD::UNDEF)
2713      Val = cast<ConstantSDNode>(Arg)->getValue();
2714    Mask |= (Val - 4);
2715    if (i != 4)
2716      Mask <<= 2;
2717  }
2718
2719  return Mask;
2720}
2721
2722/// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle
2723/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFLW
2724/// instructions.
2725unsigned X86::getShufflePSHUFLWImmediate(SDNode *N) {
2726  unsigned Mask = 0;
2727  // 8 nodes, but we only care about the first 4.
2728  for (int i = 3; i >= 0; --i) {
2729    unsigned Val = 0;
2730    SDOperand Arg = N->getOperand(i);
2731    if (Arg.getOpcode() != ISD::UNDEF)
2732      Val = cast<ConstantSDNode>(Arg)->getValue();
2733    Mask |= Val;
2734    if (i != 0)
2735      Mask <<= 2;
2736  }
2737
2738  return Mask;
2739}
2740
2741/// isPSHUFHW_PSHUFLWMask - true if the specified VECTOR_SHUFFLE operand
2742/// specifies a 8 element shuffle that can be broken into a pair of
2743/// PSHUFHW and PSHUFLW.
2744static bool isPSHUFHW_PSHUFLWMask(SDNode *N) {
2745  assert(N->getOpcode() == ISD::BUILD_VECTOR);
2746
2747  if (N->getNumOperands() != 8)
2748    return false;
2749
2750  // Lower quadword shuffled.
2751  for (unsigned i = 0; i != 4; ++i) {
2752    SDOperand Arg = N->getOperand(i);
2753    if (Arg.getOpcode() == ISD::UNDEF) continue;
2754    assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
2755    unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
2756    if (Val >= 4)
2757      return false;
2758  }
2759
2760  // Upper quadword shuffled.
2761  for (unsigned i = 4; i != 8; ++i) {
2762    SDOperand Arg = N->getOperand(i);
2763    if (Arg.getOpcode() == ISD::UNDEF) continue;
2764    assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
2765    unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
2766    if (Val < 4 || Val > 7)
2767      return false;
2768  }
2769
2770  return true;
2771}
2772
2773/// CommuteVectorShuffle - Swap vector_shuffle operands as well as
2774/// values in ther permute mask.
2775static SDOperand CommuteVectorShuffle(SDOperand Op, SDOperand &V1,
2776                                      SDOperand &V2, SDOperand &Mask,
2777                                      SelectionDAG &DAG) {
2778  MVT::ValueType VT = Op.getValueType();
2779  MVT::ValueType MaskVT = Mask.getValueType();
2780  MVT::ValueType EltVT = MVT::getVectorElementType(MaskVT);
2781  unsigned NumElems = Mask.getNumOperands();
2782  SmallVector<SDOperand, 8> MaskVec;
2783
2784  for (unsigned i = 0; i != NumElems; ++i) {
2785    SDOperand Arg = Mask.getOperand(i);
2786    if (Arg.getOpcode() == ISD::UNDEF) {
2787      MaskVec.push_back(DAG.getNode(ISD::UNDEF, EltVT));
2788      continue;
2789    }
2790    assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
2791    unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
2792    if (Val < NumElems)
2793      MaskVec.push_back(DAG.getConstant(Val + NumElems, EltVT));
2794    else
2795      MaskVec.push_back(DAG.getConstant(Val - NumElems, EltVT));
2796  }
2797
2798  std::swap(V1, V2);
2799  Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], NumElems);
2800  return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask);
2801}
2802
2803/// CommuteVectorShuffleMask - Change values in a shuffle permute mask assuming
2804/// the two vector operands have swapped position.
2805static
2806SDOperand CommuteVectorShuffleMask(SDOperand Mask, SelectionDAG &DAG) {
2807  MVT::ValueType MaskVT = Mask.getValueType();
2808  MVT::ValueType EltVT = MVT::getVectorElementType(MaskVT);
2809  unsigned NumElems = Mask.getNumOperands();
2810  SmallVector<SDOperand, 8> MaskVec;
2811  for (unsigned i = 0; i != NumElems; ++i) {
2812    SDOperand Arg = Mask.getOperand(i);
2813    if (Arg.getOpcode() == ISD::UNDEF) {
2814      MaskVec.push_back(DAG.getNode(ISD::UNDEF, EltVT));
2815      continue;
2816    }
2817    assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
2818    unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
2819    if (Val < NumElems)
2820      MaskVec.push_back(DAG.getConstant(Val + NumElems, EltVT));
2821    else
2822      MaskVec.push_back(DAG.getConstant(Val - NumElems, EltVT));
2823  }
2824  return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], NumElems);
2825}
2826
2827
2828/// ShouldXformToMOVHLPS - Return true if the node should be transformed to
2829/// match movhlps. The lower half elements should come from upper half of
2830/// V1 (and in order), and the upper half elements should come from the upper
2831/// half of V2 (and in order).
2832static bool ShouldXformToMOVHLPS(SDNode *Mask) {
2833  unsigned NumElems = Mask->getNumOperands();
2834  if (NumElems != 4)
2835    return false;
2836  for (unsigned i = 0, e = 2; i != e; ++i)
2837    if (!isUndefOrEqual(Mask->getOperand(i), i+2))
2838      return false;
2839  for (unsigned i = 2; i != 4; ++i)
2840    if (!isUndefOrEqual(Mask->getOperand(i), i+4))
2841      return false;
2842  return true;
2843}
2844
2845/// isScalarLoadToVector - Returns true if the node is a scalar load that
2846/// is promoted to a vector.
2847static inline bool isScalarLoadToVector(SDNode *N) {
2848  if (N->getOpcode() == ISD::SCALAR_TO_VECTOR) {
2849    N = N->getOperand(0).Val;
2850    return ISD::isNON_EXTLoad(N);
2851  }
2852  return false;
2853}
2854
2855/// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to
2856/// match movlp{s|d}. The lower half elements should come from lower half of
2857/// V1 (and in order), and the upper half elements should come from the upper
2858/// half of V2 (and in order). And since V1 will become the source of the
2859/// MOVLP, it must be either a vector load or a scalar load to vector.
2860static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2, SDNode *Mask) {
2861  if (!ISD::isNON_EXTLoad(V1) && !isScalarLoadToVector(V1))
2862    return false;
2863  // Is V2 is a vector load, don't do this transformation. We will try to use
2864  // load folding shufps op.
2865  if (ISD::isNON_EXTLoad(V2))
2866    return false;
2867
2868  unsigned NumElems = Mask->getNumOperands();
2869  if (NumElems != 2 && NumElems != 4)
2870    return false;
2871  for (unsigned i = 0, e = NumElems/2; i != e; ++i)
2872    if (!isUndefOrEqual(Mask->getOperand(i), i))
2873      return false;
2874  for (unsigned i = NumElems/2; i != NumElems; ++i)
2875    if (!isUndefOrEqual(Mask->getOperand(i), i+NumElems))
2876      return false;
2877  return true;
2878}
2879
2880/// isSplatVector - Returns true if N is a BUILD_VECTOR node whose elements are
2881/// all the same.
2882static bool isSplatVector(SDNode *N) {
2883  if (N->getOpcode() != ISD::BUILD_VECTOR)
2884    return false;
2885
2886  SDOperand SplatValue = N->getOperand(0);
2887  for (unsigned i = 1, e = N->getNumOperands(); i != e; ++i)
2888    if (N->getOperand(i) != SplatValue)
2889      return false;
2890  return true;
2891}
2892
2893/// isUndefShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved
2894/// to an undef.
2895static bool isUndefShuffle(SDNode *N) {
2896  if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
2897    return false;
2898
2899  SDOperand V1 = N->getOperand(0);
2900  SDOperand V2 = N->getOperand(1);
2901  SDOperand Mask = N->getOperand(2);
2902  unsigned NumElems = Mask.getNumOperands();
2903  for (unsigned i = 0; i != NumElems; ++i) {
2904    SDOperand Arg = Mask.getOperand(i);
2905    if (Arg.getOpcode() != ISD::UNDEF) {
2906      unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
2907      if (Val < NumElems && V1.getOpcode() != ISD::UNDEF)
2908        return false;
2909      else if (Val >= NumElems && V2.getOpcode() != ISD::UNDEF)
2910        return false;
2911    }
2912  }
2913  return true;
2914}
2915
2916/// isZeroNode - Returns true if Elt is a constant zero or a floating point
2917/// constant +0.0.
2918static inline bool isZeroNode(SDOperand Elt) {
2919  return ((isa<ConstantSDNode>(Elt) &&
2920           cast<ConstantSDNode>(Elt)->getValue() == 0) ||
2921          (isa<ConstantFPSDNode>(Elt) &&
2922           cast<ConstantFPSDNode>(Elt)->getValueAPF().isPosZero()));
2923}
2924
2925/// isZeroShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved
2926/// to an zero vector.
2927static bool isZeroShuffle(SDNode *N) {
2928  if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
2929    return false;
2930
2931  SDOperand V1 = N->getOperand(0);
2932  SDOperand V2 = N->getOperand(1);
2933  SDOperand Mask = N->getOperand(2);
2934  unsigned NumElems = Mask.getNumOperands();
2935  for (unsigned i = 0; i != NumElems; ++i) {
2936    SDOperand Arg = Mask.getOperand(i);
2937    if (Arg.getOpcode() == ISD::UNDEF)
2938      continue;
2939
2940    unsigned Idx = cast<ConstantSDNode>(Arg)->getValue();
2941    if (Idx < NumElems) {
2942      unsigned Opc = V1.Val->getOpcode();
2943      if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V1.Val))
2944        continue;
2945      if (Opc != ISD::BUILD_VECTOR ||
2946          !isZeroNode(V1.Val->getOperand(Idx)))
2947        return false;
2948    } else if (Idx >= NumElems) {
2949      unsigned Opc = V2.Val->getOpcode();
2950      if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V2.Val))
2951        continue;
2952      if (Opc != ISD::BUILD_VECTOR ||
2953          !isZeroNode(V2.Val->getOperand(Idx - NumElems)))
2954        return false;
2955    }
2956  }
2957  return true;
2958}
2959
2960/// getZeroVector - Returns a vector of specified type with all zero elements.
2961///
2962static SDOperand getZeroVector(MVT::ValueType VT, SelectionDAG &DAG) {
2963  assert(MVT::isVector(VT) && "Expected a vector type");
2964
2965  // Always build zero vectors as <4 x i32> or <2 x i32> bitcasted to their dest
2966  // type.  This ensures they get CSE'd.
2967  SDOperand Cst = DAG.getTargetConstant(0, MVT::i32);
2968  SDOperand Vec;
2969  if (MVT::getSizeInBits(VT) == 64)  // MMX
2970    Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, Cst, Cst);
2971  else                                              // SSE
2972    Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Cst, Cst, Cst, Cst);
2973  return DAG.getNode(ISD::BIT_CONVERT, VT, Vec);
2974}
2975
2976/// getOnesVector - Returns a vector of specified type with all bits set.
2977///
2978static SDOperand getOnesVector(MVT::ValueType VT, SelectionDAG &DAG) {
2979  assert(MVT::isVector(VT) && "Expected a vector type");
2980
2981  // Always build ones vectors as <4 x i32> or <2 x i32> bitcasted to their dest
2982  // type.  This ensures they get CSE'd.
2983  SDOperand Cst = DAG.getTargetConstant(~0U, MVT::i32);
2984  SDOperand Vec;
2985  if (MVT::getSizeInBits(VT) == 64)  // MMX
2986    Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, Cst, Cst);
2987  else                                              // SSE
2988    Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Cst, Cst, Cst, Cst);
2989  return DAG.getNode(ISD::BIT_CONVERT, VT, Vec);
2990}
2991
2992
2993/// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements
2994/// that point to V2 points to its first element.
2995static SDOperand NormalizeMask(SDOperand Mask, SelectionDAG &DAG) {
2996  assert(Mask.getOpcode() == ISD::BUILD_VECTOR);
2997
2998  bool Changed = false;
2999  SmallVector<SDOperand, 8> MaskVec;
3000  unsigned NumElems = Mask.getNumOperands();
3001  for (unsigned i = 0; i != NumElems; ++i) {
3002    SDOperand Arg = Mask.getOperand(i);
3003    if (Arg.getOpcode() != ISD::UNDEF) {
3004      unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
3005      if (Val > NumElems) {
3006        Arg = DAG.getConstant(NumElems, Arg.getValueType());
3007        Changed = true;
3008      }
3009    }
3010    MaskVec.push_back(Arg);
3011  }
3012
3013  if (Changed)
3014    Mask = DAG.getNode(ISD::BUILD_VECTOR, Mask.getValueType(),
3015                       &MaskVec[0], MaskVec.size());
3016  return Mask;
3017}
3018
3019/// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd
3020/// operation of specified width.
3021static SDOperand getMOVLMask(unsigned NumElems, SelectionDAG &DAG) {
3022  MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
3023  MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT);
3024
3025  SmallVector<SDOperand, 8> MaskVec;
3026  MaskVec.push_back(DAG.getConstant(NumElems, BaseVT));
3027  for (unsigned i = 1; i != NumElems; ++i)
3028    MaskVec.push_back(DAG.getConstant(i, BaseVT));
3029  return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size());
3030}
3031
3032/// getUnpacklMask - Returns a vector_shuffle mask for an unpackl operation
3033/// of specified width.
3034static SDOperand getUnpacklMask(unsigned NumElems, SelectionDAG &DAG) {
3035  MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
3036  MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT);
3037  SmallVector<SDOperand, 8> MaskVec;
3038  for (unsigned i = 0, e = NumElems/2; i != e; ++i) {
3039    MaskVec.push_back(DAG.getConstant(i,            BaseVT));
3040    MaskVec.push_back(DAG.getConstant(i + NumElems, BaseVT));
3041  }
3042  return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size());
3043}
3044
3045/// getUnpackhMask - Returns a vector_shuffle mask for an unpackh operation
3046/// of specified width.
3047static SDOperand getUnpackhMask(unsigned NumElems, SelectionDAG &DAG) {
3048  MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
3049  MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT);
3050  unsigned Half = NumElems/2;
3051  SmallVector<SDOperand, 8> MaskVec;
3052  for (unsigned i = 0; i != Half; ++i) {
3053    MaskVec.push_back(DAG.getConstant(i + Half,            BaseVT));
3054    MaskVec.push_back(DAG.getConstant(i + NumElems + Half, BaseVT));
3055  }
3056  return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size());
3057}
3058
3059/// PromoteSplat - Promote a splat of v8i16 or v16i8 to v4i32.
3060///
3061static SDOperand PromoteSplat(SDOperand Op, SelectionDAG &DAG) {
3062  SDOperand V1 = Op.getOperand(0);
3063  SDOperand Mask = Op.getOperand(2);
3064  MVT::ValueType VT = Op.getValueType();
3065  unsigned NumElems = Mask.getNumOperands();
3066  Mask = getUnpacklMask(NumElems, DAG);
3067  while (NumElems != 4) {
3068    V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V1, Mask);
3069    NumElems >>= 1;
3070  }
3071  V1 = DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, V1);
3072
3073  Mask = getZeroVector(MVT::v4i32, DAG);
3074  SDOperand Shuffle = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v4i32, V1,
3075                                  DAG.getNode(ISD::UNDEF, MVT::v4i32), Mask);
3076  return DAG.getNode(ISD::BIT_CONVERT, VT, Shuffle);
3077}
3078
3079/// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified
3080/// vector of zero or undef vector.  This produces a shuffle where the low
3081/// element of V2 is swizzled into the zero/undef vector, landing at element
3082/// Idx.  This produces a shuffle mask like 4,1,2,3 (idx=0) or  0,1,2,4 (idx=3).
3083static SDOperand getShuffleVectorZeroOrUndef(SDOperand V2, MVT::ValueType VT,
3084                                             unsigned NumElems, unsigned Idx,
3085                                             bool isZero, SelectionDAG &DAG) {
3086  SDOperand V1 = isZero ? getZeroVector(VT, DAG) : DAG.getNode(ISD::UNDEF, VT);
3087  MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
3088  MVT::ValueType EVT = MVT::getVectorElementType(MaskVT);
3089  SmallVector<SDOperand, 16> MaskVec;
3090  for (unsigned i = 0; i != NumElems; ++i)
3091    if (i == Idx)  // If this is the insertion idx, put the low elt of V2 here.
3092      MaskVec.push_back(DAG.getConstant(NumElems, EVT));
3093    else
3094      MaskVec.push_back(DAG.getConstant(i, EVT));
3095  SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
3096                               &MaskVec[0], MaskVec.size());
3097  return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask);
3098}
3099
3100/// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8.
3101///
3102static SDOperand LowerBuildVectorv16i8(SDOperand Op, unsigned NonZeros,
3103                                       unsigned NumNonZero, unsigned NumZero,
3104                                       SelectionDAG &DAG, TargetLowering &TLI) {
3105  if (NumNonZero > 8)
3106    return SDOperand();
3107
3108  SDOperand V(0, 0);
3109  bool First = true;
3110  for (unsigned i = 0; i < 16; ++i) {
3111    bool ThisIsNonZero = (NonZeros & (1 << i)) != 0;
3112    if (ThisIsNonZero && First) {
3113      if (NumZero)
3114        V = getZeroVector(MVT::v8i16, DAG);
3115      else
3116        V = DAG.getNode(ISD::UNDEF, MVT::v8i16);
3117      First = false;
3118    }
3119
3120    if ((i & 1) != 0) {
3121      SDOperand ThisElt(0, 0), LastElt(0, 0);
3122      bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0;
3123      if (LastIsNonZero) {
3124        LastElt = DAG.getNode(ISD::ZERO_EXTEND, MVT::i16, Op.getOperand(i-1));
3125      }
3126      if (ThisIsNonZero) {
3127        ThisElt = DAG.getNode(ISD::ZERO_EXTEND, MVT::i16, Op.getOperand(i));
3128        ThisElt = DAG.getNode(ISD::SHL, MVT::i16,
3129                              ThisElt, DAG.getConstant(8, MVT::i8));
3130        if (LastIsNonZero)
3131          ThisElt = DAG.getNode(ISD::OR, MVT::i16, ThisElt, LastElt);
3132      } else
3133        ThisElt = LastElt;
3134
3135      if (ThisElt.Val)
3136        V = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, V, ThisElt,
3137                        DAG.getConstant(i/2, TLI.getPointerTy()));
3138    }
3139  }
3140
3141  return DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, V);
3142}
3143
3144/// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16.
3145///
3146static SDOperand LowerBuildVectorv8i16(SDOperand Op, unsigned NonZeros,
3147                                       unsigned NumNonZero, unsigned NumZero,
3148                                       SelectionDAG &DAG, TargetLowering &TLI) {
3149  if (NumNonZero > 4)
3150    return SDOperand();
3151
3152  SDOperand V(0, 0);
3153  bool First = true;
3154  for (unsigned i = 0; i < 8; ++i) {
3155    bool isNonZero = (NonZeros & (1 << i)) != 0;
3156    if (isNonZero) {
3157      if (First) {
3158        if (NumZero)
3159          V = getZeroVector(MVT::v8i16, DAG);
3160        else
3161          V = DAG.getNode(ISD::UNDEF, MVT::v8i16);
3162        First = false;
3163      }
3164      V = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, V, Op.getOperand(i),
3165                      DAG.getConstant(i, TLI.getPointerTy()));
3166    }
3167  }
3168
3169  return V;
3170}
3171
3172SDOperand
3173X86TargetLowering::LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) {
3174  // All zero's are handled with pxor, all one's are handled with pcmpeqd.
3175  if (ISD::isBuildVectorAllZeros(Op.Val) || ISD::isBuildVectorAllOnes(Op.Val)) {
3176    // Canonicalize this to either <4 x i32> or <2 x i32> (SSE vs MMX) to
3177    // 1) ensure the zero vectors are CSE'd, and 2) ensure that i64 scalars are
3178    // eliminated on x86-32 hosts.
3179    if (Op.getValueType() == MVT::v4i32 || Op.getValueType() == MVT::v2i32)
3180      return Op;
3181
3182    if (ISD::isBuildVectorAllOnes(Op.Val))
3183      return getOnesVector(Op.getValueType(), DAG);
3184    return getZeroVector(Op.getValueType(), DAG);
3185  }
3186
3187  MVT::ValueType VT = Op.getValueType();
3188  MVT::ValueType EVT = MVT::getVectorElementType(VT);
3189  unsigned EVTBits = MVT::getSizeInBits(EVT);
3190
3191  unsigned NumElems = Op.getNumOperands();
3192  unsigned NumZero  = 0;
3193  unsigned NumNonZero = 0;
3194  unsigned NonZeros = 0;
3195  bool HasNonImms = false;
3196  SmallSet<SDOperand, 8> Values;
3197  for (unsigned i = 0; i < NumElems; ++i) {
3198    SDOperand Elt = Op.getOperand(i);
3199    if (Elt.getOpcode() == ISD::UNDEF)
3200      continue;
3201    Values.insert(Elt);
3202    if (Elt.getOpcode() != ISD::Constant &&
3203        Elt.getOpcode() != ISD::ConstantFP)
3204      HasNonImms = true;
3205    if (isZeroNode(Elt))
3206      NumZero++;
3207    else {
3208      NonZeros |= (1 << i);
3209      NumNonZero++;
3210    }
3211  }
3212
3213  if (NumNonZero == 0) {
3214    // All undef vector. Return an UNDEF.  All zero vectors were handled above.
3215    return DAG.getNode(ISD::UNDEF, VT);
3216  }
3217
3218  // Splat is obviously ok. Let legalizer expand it to a shuffle.
3219  if (Values.size() == 1)
3220    return SDOperand();
3221
3222  // Special case for single non-zero element.
3223  if (NumNonZero == 1 && NumElems <= 4) {
3224    unsigned Idx = CountTrailingZeros_32(NonZeros);
3225    SDOperand Item = Op.getOperand(Idx);
3226    Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Item);
3227    if (Idx == 0)
3228      // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector.
3229      return getShuffleVectorZeroOrUndef(Item, VT, NumElems, Idx,
3230                                         NumZero > 0, DAG);
3231    else if (!HasNonImms) // Otherwise, it's better to do a constpool load.
3232      return SDOperand();
3233
3234    if (EVTBits == 32) {
3235      // Turn it into a shuffle of zero and zero-extended scalar to vector.
3236      Item = getShuffleVectorZeroOrUndef(Item, VT, NumElems, 0, NumZero > 0,
3237                                         DAG);
3238      MVT::ValueType MaskVT  = MVT::getIntVectorWithNumElements(NumElems);
3239      MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT);
3240      SmallVector<SDOperand, 8> MaskVec;
3241      for (unsigned i = 0; i < NumElems; i++)
3242        MaskVec.push_back(DAG.getConstant((i == Idx) ? 0 : 1, MaskEVT));
3243      SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
3244                                   &MaskVec[0], MaskVec.size());
3245      return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, Item,
3246                         DAG.getNode(ISD::UNDEF, VT), Mask);
3247    }
3248  }
3249
3250  // A vector full of immediates; various special cases are already
3251  // handled, so this is best done with a single constant-pool load.
3252  if (!HasNonImms)
3253    return SDOperand();
3254
3255  // Let legalizer expand 2-wide build_vectors.
3256  if (EVTBits == 64)
3257    return SDOperand();
3258
3259  // If element VT is < 32 bits, convert it to inserts into a zero vector.
3260  if (EVTBits == 8 && NumElems == 16) {
3261    SDOperand V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG,
3262                                        *this);
3263    if (V.Val) return V;
3264  }
3265
3266  if (EVTBits == 16 && NumElems == 8) {
3267    SDOperand V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG,
3268                                        *this);
3269    if (V.Val) return V;
3270  }
3271
3272  // If element VT is == 32 bits, turn it into a number of shuffles.
3273  SmallVector<SDOperand, 8> V;
3274  V.resize(NumElems);
3275  if (NumElems == 4 && NumZero > 0) {
3276    for (unsigned i = 0; i < 4; ++i) {
3277      bool isZero = !(NonZeros & (1 << i));
3278      if (isZero)
3279        V[i] = getZeroVector(VT, DAG);
3280      else
3281        V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(i));
3282    }
3283
3284    for (unsigned i = 0; i < 2; ++i) {
3285      switch ((NonZeros & (0x3 << i*2)) >> (i*2)) {
3286        default: break;
3287        case 0:
3288          V[i] = V[i*2];  // Must be a zero vector.
3289          break;
3290        case 1:
3291          V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2+1], V[i*2],
3292                             getMOVLMask(NumElems, DAG));
3293          break;
3294        case 2:
3295          V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2], V[i*2+1],
3296                             getMOVLMask(NumElems, DAG));
3297          break;
3298        case 3:
3299          V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2], V[i*2+1],
3300                             getUnpacklMask(NumElems, DAG));
3301          break;
3302      }
3303    }
3304
3305    // Take advantage of the fact GR32 to VR128 scalar_to_vector (i.e. movd)
3306    // clears the upper bits.
3307    // FIXME: we can do the same for v4f32 case when we know both parts of
3308    // the lower half come from scalar_to_vector (loadf32). We should do
3309    // that in post legalizer dag combiner with target specific hooks.
3310    if (MVT::isInteger(EVT) && (NonZeros & (0x3 << 2)) == 0)
3311      return V[0];
3312    MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
3313    MVT::ValueType EVT = MVT::getVectorElementType(MaskVT);
3314    SmallVector<SDOperand, 8> MaskVec;
3315    bool Reverse = (NonZeros & 0x3) == 2;
3316    for (unsigned i = 0; i < 2; ++i)
3317      if (Reverse)
3318        MaskVec.push_back(DAG.getConstant(1-i, EVT));
3319      else
3320        MaskVec.push_back(DAG.getConstant(i, EVT));
3321    Reverse = ((NonZeros & (0x3 << 2)) >> 2) == 2;
3322    for (unsigned i = 0; i < 2; ++i)
3323      if (Reverse)
3324        MaskVec.push_back(DAG.getConstant(1-i+NumElems, EVT));
3325      else
3326        MaskVec.push_back(DAG.getConstant(i+NumElems, EVT));
3327    SDOperand ShufMask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
3328                                     &MaskVec[0], MaskVec.size());
3329    return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[0], V[1], ShufMask);
3330  }
3331
3332  if (Values.size() > 2) {
3333    // Expand into a number of unpckl*.
3334    // e.g. for v4f32
3335    //   Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0>
3336    //         : unpcklps 1, 3 ==> Y: <?, ?, 3, 1>
3337    //   Step 2: unpcklps X, Y ==>    <3, 2, 1, 0>
3338    SDOperand UnpckMask = getUnpacklMask(NumElems, DAG);
3339    for (unsigned i = 0; i < NumElems; ++i)
3340      V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(i));
3341    NumElems >>= 1;
3342    while (NumElems != 0) {
3343      for (unsigned i = 0; i < NumElems; ++i)
3344        V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i], V[i + NumElems],
3345                           UnpckMask);
3346      NumElems >>= 1;
3347    }
3348    return V[0];
3349  }
3350
3351  return SDOperand();
3352}
3353
3354static
3355SDOperand LowerVECTOR_SHUFFLEv8i16(SDOperand V1, SDOperand V2,
3356                                   SDOperand PermMask, SelectionDAG &DAG,
3357                                   TargetLowering &TLI) {
3358  SDOperand NewV;
3359  MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(8);
3360  MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT);
3361  MVT::ValueType PtrVT = TLI.getPointerTy();
3362  SmallVector<SDOperand, 8> MaskElts(PermMask.Val->op_begin(),
3363                                     PermMask.Val->op_end());
3364
3365  // First record which half of which vector the low elements come from.
3366  SmallVector<unsigned, 4> LowQuad(4);
3367  for (unsigned i = 0; i < 4; ++i) {
3368    SDOperand Elt = MaskElts[i];
3369    if (Elt.getOpcode() == ISD::UNDEF)
3370      continue;
3371    unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue();
3372    int QuadIdx = EltIdx / 4;
3373    ++LowQuad[QuadIdx];
3374  }
3375  int BestLowQuad = -1;
3376  unsigned MaxQuad = 1;
3377  for (unsigned i = 0; i < 4; ++i) {
3378    if (LowQuad[i] > MaxQuad) {
3379      BestLowQuad = i;
3380      MaxQuad = LowQuad[i];
3381    }
3382  }
3383
3384  // Record which half of which vector the high elements come from.
3385  SmallVector<unsigned, 4> HighQuad(4);
3386  for (unsigned i = 4; i < 8; ++i) {
3387    SDOperand Elt = MaskElts[i];
3388    if (Elt.getOpcode() == ISD::UNDEF)
3389      continue;
3390    unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue();
3391    int QuadIdx = EltIdx / 4;
3392    ++HighQuad[QuadIdx];
3393  }
3394  int BestHighQuad = -1;
3395  MaxQuad = 1;
3396  for (unsigned i = 0; i < 4; ++i) {
3397    if (HighQuad[i] > MaxQuad) {
3398      BestHighQuad = i;
3399      MaxQuad = HighQuad[i];
3400    }
3401  }
3402
3403  // If it's possible to sort parts of either half with PSHUF{H|L}W, then do it.
3404  if (BestLowQuad != -1 || BestHighQuad != -1) {
3405    // First sort the 4 chunks in order using shufpd.
3406    SmallVector<SDOperand, 8> MaskVec;
3407    if (BestLowQuad != -1)
3408      MaskVec.push_back(DAG.getConstant(BestLowQuad, MVT::i32));
3409    else
3410      MaskVec.push_back(DAG.getConstant(0, MVT::i32));
3411    if (BestHighQuad != -1)
3412      MaskVec.push_back(DAG.getConstant(BestHighQuad, MVT::i32));
3413    else
3414      MaskVec.push_back(DAG.getConstant(1, MVT::i32));
3415    SDOperand Mask= DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, &MaskVec[0],2);
3416    NewV = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v2i64,
3417                       DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, V1),
3418                       DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, V2), Mask);
3419    NewV = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, NewV);
3420
3421    // Now sort high and low parts separately.
3422    BitVector InOrder(8);
3423    if (BestLowQuad != -1) {
3424      // Sort lower half in order using PSHUFLW.
3425      MaskVec.clear();
3426      bool AnyOutOrder = false;
3427      for (unsigned i = 0; i != 4; ++i) {
3428        SDOperand Elt = MaskElts[i];
3429        if (Elt.getOpcode() == ISD::UNDEF) {
3430          MaskVec.push_back(Elt);
3431          InOrder.set(i);
3432        } else {
3433          unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue();
3434          if (EltIdx != i)
3435            AnyOutOrder = true;
3436          MaskVec.push_back(DAG.getConstant(EltIdx % 4, MaskEVT));
3437          // If this element is in the right place after this shuffle, then
3438          // remember it.
3439          if ((int)(EltIdx / 4) == BestLowQuad)
3440            InOrder.set(i);
3441        }
3442      }
3443      if (AnyOutOrder) {
3444        for (unsigned i = 4; i != 8; ++i)
3445          MaskVec.push_back(DAG.getConstant(i, MaskEVT));
3446        SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], 8);
3447        NewV = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v8i16, NewV, NewV, Mask);
3448      }
3449    }
3450
3451    if (BestHighQuad != -1) {
3452      // Sort high half in order using PSHUFHW if possible.
3453      MaskVec.clear();
3454      for (unsigned i = 0; i != 4; ++i)
3455        MaskVec.push_back(DAG.getConstant(i, MaskEVT));
3456      bool AnyOutOrder = false;
3457      for (unsigned i = 4; i != 8; ++i) {
3458        SDOperand Elt = MaskElts[i];
3459        if (Elt.getOpcode() == ISD::UNDEF) {
3460          MaskVec.push_back(Elt);
3461          InOrder.set(i);
3462        } else {
3463          unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue();
3464          if (EltIdx != i)
3465            AnyOutOrder = true;
3466          MaskVec.push_back(DAG.getConstant((EltIdx % 4) + 4, MaskEVT));
3467          // If this element is in the right place after this shuffle, then
3468          // remember it.
3469          if ((int)(EltIdx / 4) == BestHighQuad)
3470            InOrder.set(i);
3471        }
3472      }
3473      if (AnyOutOrder) {
3474        SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], 8);
3475        NewV = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v8i16, NewV, NewV, Mask);
3476      }
3477    }
3478
3479    // The other elements are put in the right place using pextrw and pinsrw.
3480    for (unsigned i = 0; i != 8; ++i) {
3481      if (InOrder[i])
3482        continue;
3483      SDOperand Elt = MaskElts[i];
3484      unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue();
3485      if (EltIdx == i)
3486        continue;
3487      SDOperand ExtOp = (EltIdx < 8)
3488        ? DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V1,
3489                      DAG.getConstant(EltIdx, PtrVT))
3490        : DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V2,
3491                      DAG.getConstant(EltIdx - 8, PtrVT));
3492      NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, NewV, ExtOp,
3493                         DAG.getConstant(i, PtrVT));
3494    }
3495    return NewV;
3496  }
3497
3498  // PSHUF{H|L}W are not used. Lower into extracts and inserts but try to use
3499  ///as few as possible.
3500  // First, let's find out how many elements are already in the right order.
3501  unsigned V1InOrder = 0;
3502  unsigned V1FromV1 = 0;
3503  unsigned V2InOrder = 0;
3504  unsigned V2FromV2 = 0;
3505  SmallVector<SDOperand, 8> V1Elts;
3506  SmallVector<SDOperand, 8> V2Elts;
3507  for (unsigned i = 0; i < 8; ++i) {
3508    SDOperand Elt = MaskElts[i];
3509    if (Elt.getOpcode() == ISD::UNDEF) {
3510      V1Elts.push_back(Elt);
3511      V2Elts.push_back(Elt);
3512      ++V1InOrder;
3513      ++V2InOrder;
3514      continue;
3515    }
3516    unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue();
3517    if (EltIdx == i) {
3518      V1Elts.push_back(Elt);
3519      V2Elts.push_back(DAG.getConstant(i+8, MaskEVT));
3520      ++V1InOrder;
3521    } else if (EltIdx == i+8) {
3522      V1Elts.push_back(Elt);
3523      V2Elts.push_back(DAG.getConstant(i, MaskEVT));
3524      ++V2InOrder;
3525    } else if (EltIdx < 8) {
3526      V1Elts.push_back(Elt);
3527      ++V1FromV1;
3528    } else {
3529      V2Elts.push_back(DAG.getConstant(EltIdx-8, MaskEVT));
3530      ++V2FromV2;
3531    }
3532  }
3533
3534  if (V2InOrder > V1InOrder) {
3535    PermMask = CommuteVectorShuffleMask(PermMask, DAG);
3536    std::swap(V1, V2);
3537    std::swap(V1Elts, V2Elts);
3538    std::swap(V1FromV1, V2FromV2);
3539  }
3540
3541  if ((V1FromV1 + V1InOrder) != 8) {
3542    // Some elements are from V2.
3543    if (V1FromV1) {
3544      // If there are elements that are from V1 but out of place,
3545      // then first sort them in place
3546      SmallVector<SDOperand, 8> MaskVec;
3547      for (unsigned i = 0; i < 8; ++i) {
3548        SDOperand Elt = V1Elts[i];
3549        if (Elt.getOpcode() == ISD::UNDEF) {
3550          MaskVec.push_back(DAG.getNode(ISD::UNDEF, MaskEVT));
3551          continue;
3552        }
3553        unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue();
3554        if (EltIdx >= 8)
3555          MaskVec.push_back(DAG.getNode(ISD::UNDEF, MaskEVT));
3556        else
3557          MaskVec.push_back(DAG.getConstant(EltIdx, MaskEVT));
3558      }
3559      SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], 8);
3560      V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v8i16, V1, V1, Mask);
3561    }
3562
3563    NewV = V1;
3564    for (unsigned i = 0; i < 8; ++i) {
3565      SDOperand Elt = V1Elts[i];
3566      if (Elt.getOpcode() == ISD::UNDEF)
3567        continue;
3568      unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue();
3569      if (EltIdx < 8)
3570        continue;
3571      SDOperand ExtOp = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V2,
3572                                    DAG.getConstant(EltIdx - 8, PtrVT));
3573      NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, NewV, ExtOp,
3574                         DAG.getConstant(i, PtrVT));
3575    }
3576    return NewV;
3577  } else {
3578    // All elements are from V1.
3579    NewV = V1;
3580    for (unsigned i = 0; i < 8; ++i) {
3581      SDOperand Elt = V1Elts[i];
3582      if (Elt.getOpcode() == ISD::UNDEF)
3583        continue;
3584      unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue();
3585      SDOperand ExtOp = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V1,
3586                                    DAG.getConstant(EltIdx, PtrVT));
3587      NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, NewV, ExtOp,
3588                         DAG.getConstant(i, PtrVT));
3589    }
3590    return NewV;
3591  }
3592}
3593
3594/// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide
3595/// ones, or rewriting v4i32 / v2f32 as 2 wide ones if possible. This can be
3596/// done when every pair / quad of shuffle mask elements point to elements in
3597/// the right sequence. e.g.
3598/// vector_shuffle <>, <>, < 3, 4, | 10, 11, | 0, 1, | 14, 15>
3599static
3600SDOperand RewriteAsNarrowerShuffle(SDOperand V1, SDOperand V2,
3601                                MVT::ValueType VT,
3602                                SDOperand PermMask, SelectionDAG &DAG,
3603                                TargetLowering &TLI) {
3604  unsigned NumElems = PermMask.getNumOperands();
3605  unsigned NewWidth = (NumElems == 4) ? 2 : 4;
3606  MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NewWidth);
3607  MVT::ValueType NewVT = MaskVT;
3608  switch (VT) {
3609  case MVT::v4f32: NewVT = MVT::v2f64; break;
3610  case MVT::v4i32: NewVT = MVT::v2i64; break;
3611  case MVT::v8i16: NewVT = MVT::v4i32; break;
3612  case MVT::v16i8: NewVT = MVT::v4i32; break;
3613  default: assert(false && "Unexpected!");
3614  }
3615
3616  if (NewWidth == 2)
3617    if (MVT::isInteger(VT))
3618      NewVT = MVT::v2i64;
3619    else
3620      NewVT = MVT::v2f64;
3621  unsigned Scale = NumElems / NewWidth;
3622  SmallVector<SDOperand, 8> MaskVec;
3623  for (unsigned i = 0; i < NumElems; i += Scale) {
3624    unsigned StartIdx = ~0U;
3625    for (unsigned j = 0; j < Scale; ++j) {
3626      SDOperand Elt = PermMask.getOperand(i+j);
3627      if (Elt.getOpcode() == ISD::UNDEF)
3628        continue;
3629      unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue();
3630      if (StartIdx == ~0U)
3631        StartIdx = EltIdx - (EltIdx % Scale);
3632      if (EltIdx != StartIdx + j)
3633        return SDOperand();
3634    }
3635    if (StartIdx == ~0U)
3636      MaskVec.push_back(DAG.getNode(ISD::UNDEF, MVT::i32));
3637    else
3638      MaskVec.push_back(DAG.getConstant(StartIdx / Scale, MVT::i32));
3639  }
3640
3641  V1 = DAG.getNode(ISD::BIT_CONVERT, NewVT, V1);
3642  V2 = DAG.getNode(ISD::BIT_CONVERT, NewVT, V2);
3643  return DAG.getNode(ISD::VECTOR_SHUFFLE, NewVT, V1, V2,
3644                     DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
3645                                 &MaskVec[0], MaskVec.size()));
3646}
3647
3648SDOperand
3649X86TargetLowering::LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) {
3650  SDOperand V1 = Op.getOperand(0);
3651  SDOperand V2 = Op.getOperand(1);
3652  SDOperand PermMask = Op.getOperand(2);
3653  MVT::ValueType VT = Op.getValueType();
3654  unsigned NumElems = PermMask.getNumOperands();
3655  bool V1IsUndef = V1.getOpcode() == ISD::UNDEF;
3656  bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
3657  bool V1IsSplat = false;
3658  bool V2IsSplat = false;
3659
3660  if (isUndefShuffle(Op.Val))
3661    return DAG.getNode(ISD::UNDEF, VT);
3662
3663  if (isZeroShuffle(Op.Val))
3664    return getZeroVector(VT, DAG);
3665
3666  if (isIdentityMask(PermMask.Val))
3667    return V1;
3668  else if (isIdentityMask(PermMask.Val, true))
3669    return V2;
3670
3671  if (isSplatMask(PermMask.Val)) {
3672    if (NumElems <= 4) return Op;
3673    // Promote it to a v4i32 splat.
3674    return PromoteSplat(Op, DAG);
3675  }
3676
3677  // If the shuffle can be profitably rewritten as a narrower shuffle, then
3678  // do it!
3679  if (VT == MVT::v8i16 || VT == MVT::v16i8) {
3680    SDOperand NewOp= RewriteAsNarrowerShuffle(V1, V2, VT, PermMask, DAG, *this);
3681    if (NewOp.Val)
3682      return DAG.getNode(ISD::BIT_CONVERT, VT, LowerVECTOR_SHUFFLE(NewOp, DAG));
3683  } else if ((VT == MVT::v4i32 || (VT == MVT::v4f32 && Subtarget->hasSSE2()))) {
3684    // FIXME: Figure out a cleaner way to do this.
3685    // Try to make use of movq to zero out the top part.
3686    if (ISD::isBuildVectorAllZeros(V2.Val)) {
3687      SDOperand NewOp = RewriteAsNarrowerShuffle(V1, V2, VT, PermMask, DAG, *this);
3688      if (NewOp.Val) {
3689        SDOperand NewV1 = NewOp.getOperand(0);
3690        SDOperand NewV2 = NewOp.getOperand(1);
3691        SDOperand NewMask = NewOp.getOperand(2);
3692        if (isCommutedMOVL(NewMask.Val, true, false)) {
3693          NewOp = CommuteVectorShuffle(NewOp, NewV1, NewV2, NewMask, DAG);
3694          NewOp = DAG.getNode(ISD::VECTOR_SHUFFLE, NewOp.getValueType(),
3695                              NewV1, NewV2, getMOVLMask(2, DAG));
3696          return DAG.getNode(ISD::BIT_CONVERT, VT, LowerVECTOR_SHUFFLE(NewOp, DAG));
3697        }
3698      }
3699    } else if (ISD::isBuildVectorAllZeros(V1.Val)) {
3700      SDOperand NewOp= RewriteAsNarrowerShuffle(V1, V2, VT, PermMask, DAG, *this);
3701      if (NewOp.Val && X86::isMOVLMask(NewOp.getOperand(2).Val))
3702        return DAG.getNode(ISD::BIT_CONVERT, VT, LowerVECTOR_SHUFFLE(NewOp, DAG));
3703    }
3704  }
3705
3706  if (X86::isMOVLMask(PermMask.Val))
3707    return (V1IsUndef) ? V2 : Op;
3708
3709  if (X86::isMOVSHDUPMask(PermMask.Val) ||
3710      X86::isMOVSLDUPMask(PermMask.Val) ||
3711      X86::isMOVHLPSMask(PermMask.Val) ||
3712      X86::isMOVHPMask(PermMask.Val) ||
3713      X86::isMOVLPMask(PermMask.Val))
3714    return Op;
3715
3716  if (ShouldXformToMOVHLPS(PermMask.Val) ||
3717      ShouldXformToMOVLP(V1.Val, V2.Val, PermMask.Val))
3718    return CommuteVectorShuffle(Op, V1, V2, PermMask, DAG);
3719
3720  bool Commuted = false;
3721  // FIXME: This should also accept a bitcast of a splat?  Be careful, not
3722  // 1,1,1,1 -> v8i16 though.
3723  V1IsSplat = isSplatVector(V1.Val);
3724  V2IsSplat = isSplatVector(V2.Val);
3725
3726  // Canonicalize the splat or undef, if present, to be on the RHS.
3727  if ((V1IsSplat || V1IsUndef) && !(V2IsSplat || V2IsUndef)) {
3728    Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG);
3729    std::swap(V1IsSplat, V2IsSplat);
3730    std::swap(V1IsUndef, V2IsUndef);
3731    Commuted = true;
3732  }
3733
3734  // FIXME: Figure out a cleaner way to do this.
3735  if (isCommutedMOVL(PermMask.Val, V2IsSplat, V2IsUndef)) {
3736    if (V2IsUndef) return V1;
3737    Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG);
3738    if (V2IsSplat) {
3739      // V2 is a splat, so the mask may be malformed. That is, it may point
3740      // to any V2 element. The instruction selectior won't like this. Get
3741      // a corrected mask and commute to form a proper MOVS{S|D}.
3742      SDOperand NewMask = getMOVLMask(NumElems, DAG);
3743      if (NewMask.Val != PermMask.Val)
3744        Op = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask);
3745    }
3746    return Op;
3747  }
3748
3749  if (X86::isUNPCKL_v_undef_Mask(PermMask.Val) ||
3750      X86::isUNPCKH_v_undef_Mask(PermMask.Val) ||
3751      X86::isUNPCKLMask(PermMask.Val) ||
3752      X86::isUNPCKHMask(PermMask.Val))
3753    return Op;
3754
3755  if (V2IsSplat) {
3756    // Normalize mask so all entries that point to V2 points to its first
3757    // element then try to match unpck{h|l} again. If match, return a
3758    // new vector_shuffle with the corrected mask.
3759    SDOperand NewMask = NormalizeMask(PermMask, DAG);
3760    if (NewMask.Val != PermMask.Val) {
3761      if (X86::isUNPCKLMask(PermMask.Val, true)) {
3762        SDOperand NewMask = getUnpacklMask(NumElems, DAG);
3763        return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask);
3764      } else if (X86::isUNPCKHMask(PermMask.Val, true)) {
3765        SDOperand NewMask = getUnpackhMask(NumElems, DAG);
3766        return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask);
3767      }
3768    }
3769  }
3770
3771  // Normalize the node to match x86 shuffle ops if needed
3772  if (V2.getOpcode() != ISD::UNDEF && isCommutedSHUFP(PermMask.Val))
3773      Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG);
3774
3775  if (Commuted) {
3776    // Commute is back and try unpck* again.
3777    Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG);
3778    if (X86::isUNPCKL_v_undef_Mask(PermMask.Val) ||
3779        X86::isUNPCKH_v_undef_Mask(PermMask.Val) ||
3780        X86::isUNPCKLMask(PermMask.Val) ||
3781        X86::isUNPCKHMask(PermMask.Val))
3782      return Op;
3783  }
3784
3785  // If VT is integer, try PSHUF* first, then SHUFP*.
3786  if (MVT::isInteger(VT)) {
3787    // MMX doesn't have PSHUFD; it does have PSHUFW. While it's theoretically
3788    // possible to shuffle a v2i32 using PSHUFW, that's not yet implemented.
3789    if (((MVT::getSizeInBits(VT) != 64 || NumElems == 4) &&
3790         X86::isPSHUFDMask(PermMask.Val)) ||
3791        X86::isPSHUFHWMask(PermMask.Val) ||
3792        X86::isPSHUFLWMask(PermMask.Val)) {
3793      if (V2.getOpcode() != ISD::UNDEF)
3794        return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1,
3795                           DAG.getNode(ISD::UNDEF, V1.getValueType()),PermMask);
3796      return Op;
3797    }
3798
3799    if (X86::isSHUFPMask(PermMask.Val) &&
3800        MVT::getSizeInBits(VT) != 64)    // Don't do this for MMX.
3801      return Op;
3802  } else {
3803    // Floating point cases in the other order.
3804    if (X86::isSHUFPMask(PermMask.Val))
3805      return Op;
3806    if (X86::isPSHUFDMask(PermMask.Val) ||
3807        X86::isPSHUFHWMask(PermMask.Val) ||
3808        X86::isPSHUFLWMask(PermMask.Val)) {
3809      if (V2.getOpcode() != ISD::UNDEF)
3810        return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1,
3811                           DAG.getNode(ISD::UNDEF, V1.getValueType()),PermMask);
3812      return Op;
3813    }
3814  }
3815
3816  // Handle v8i16 specifically since SSE can do byte extraction and insertion.
3817  if (VT == MVT::v8i16) {
3818    SDOperand NewOp = LowerVECTOR_SHUFFLEv8i16(V1, V2, PermMask, DAG, *this);
3819    if (NewOp.Val)
3820      return NewOp;
3821  }
3822
3823  // Handle all 4 wide cases with a number of shuffles.
3824  if (NumElems == 4 && MVT::getSizeInBits(VT) != 64) {
3825    // Don't do this for MMX.
3826    MVT::ValueType MaskVT = PermMask.getValueType();
3827    MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT);
3828    SmallVector<std::pair<int, int>, 8> Locs;
3829    Locs.reserve(NumElems);
3830    SmallVector<SDOperand, 8> Mask1(NumElems,
3831                                    DAG.getNode(ISD::UNDEF, MaskEVT));
3832    SmallVector<SDOperand, 8> Mask2(NumElems,
3833                                    DAG.getNode(ISD::UNDEF, MaskEVT));
3834    unsigned NumHi = 0;
3835    unsigned NumLo = 0;
3836    // If no more than two elements come from either vector. This can be
3837    // implemented with two shuffles. First shuffle gather the elements.
3838    // The second shuffle, which takes the first shuffle as both of its
3839    // vector operands, put the elements into the right order.
3840    for (unsigned i = 0; i != NumElems; ++i) {
3841      SDOperand Elt = PermMask.getOperand(i);
3842      if (Elt.getOpcode() == ISD::UNDEF) {
3843        Locs[i] = std::make_pair(-1, -1);
3844      } else {
3845        unsigned Val = cast<ConstantSDNode>(Elt)->getValue();
3846        if (Val < NumElems) {
3847          Locs[i] = std::make_pair(0, NumLo);
3848          Mask1[NumLo] = Elt;
3849          NumLo++;
3850        } else {
3851          Locs[i] = std::make_pair(1, NumHi);
3852          if (2+NumHi < NumElems)
3853            Mask1[2+NumHi] = Elt;
3854          NumHi++;
3855        }
3856      }
3857    }
3858    if (NumLo <= 2 && NumHi <= 2) {
3859      V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2,
3860                       DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
3861                                   &Mask1[0], Mask1.size()));
3862      for (unsigned i = 0; i != NumElems; ++i) {
3863        if (Locs[i].first == -1)
3864          continue;
3865        else {
3866          unsigned Idx = (i < NumElems/2) ? 0 : NumElems;
3867          Idx += Locs[i].first * (NumElems/2) + Locs[i].second;
3868          Mask2[i] = DAG.getConstant(Idx, MaskEVT);
3869        }
3870      }
3871
3872      return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V1,
3873                         DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
3874                                     &Mask2[0], Mask2.size()));
3875    }
3876
3877    // Break it into (shuffle shuffle_hi, shuffle_lo).
3878    Locs.clear();
3879    SmallVector<SDOperand,8> LoMask(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT));
3880    SmallVector<SDOperand,8> HiMask(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT));
3881    SmallVector<SDOperand,8> *MaskPtr = &LoMask;
3882    unsigned MaskIdx = 0;
3883    unsigned LoIdx = 0;
3884    unsigned HiIdx = NumElems/2;
3885    for (unsigned i = 0; i != NumElems; ++i) {
3886      if (i == NumElems/2) {
3887        MaskPtr = &HiMask;
3888        MaskIdx = 1;
3889        LoIdx = 0;
3890        HiIdx = NumElems/2;
3891      }
3892      SDOperand Elt = PermMask.getOperand(i);
3893      if (Elt.getOpcode() == ISD::UNDEF) {
3894        Locs[i] = std::make_pair(-1, -1);
3895      } else if (cast<ConstantSDNode>(Elt)->getValue() < NumElems) {
3896        Locs[i] = std::make_pair(MaskIdx, LoIdx);
3897        (*MaskPtr)[LoIdx] = Elt;
3898        LoIdx++;
3899      } else {
3900        Locs[i] = std::make_pair(MaskIdx, HiIdx);
3901        (*MaskPtr)[HiIdx] = Elt;
3902        HiIdx++;
3903      }
3904    }
3905
3906    SDOperand LoShuffle =
3907      DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2,
3908                  DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
3909                              &LoMask[0], LoMask.size()));
3910    SDOperand HiShuffle =
3911      DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2,
3912                  DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
3913                              &HiMask[0], HiMask.size()));
3914    SmallVector<SDOperand, 8> MaskOps;
3915    for (unsigned i = 0; i != NumElems; ++i) {
3916      if (Locs[i].first == -1) {
3917        MaskOps.push_back(DAG.getNode(ISD::UNDEF, MaskEVT));
3918      } else {
3919        unsigned Idx = Locs[i].first * NumElems + Locs[i].second;
3920        MaskOps.push_back(DAG.getConstant(Idx, MaskEVT));
3921      }
3922    }
3923    return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, LoShuffle, HiShuffle,
3924                       DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
3925                                   &MaskOps[0], MaskOps.size()));
3926  }
3927
3928  return SDOperand();
3929}
3930
3931SDOperand
3932X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) {
3933  if (!isa<ConstantSDNode>(Op.getOperand(1)))
3934    return SDOperand();
3935
3936  MVT::ValueType VT = Op.getValueType();
3937  // TODO: handle v16i8.
3938  if (MVT::getSizeInBits(VT) == 16) {
3939    SDOperand Vec = Op.getOperand(0);
3940    unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue();
3941    if (Idx == 0)
3942      return DAG.getNode(ISD::TRUNCATE, MVT::i16,
3943                         DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i32,
3944                                 DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, Vec),
3945                                     Op.getOperand(1)));
3946    // Transform it so it match pextrw which produces a 32-bit result.
3947    MVT::ValueType EVT = (MVT::ValueType)(VT+1);
3948    SDOperand Extract = DAG.getNode(X86ISD::PEXTRW, EVT,
3949                                    Op.getOperand(0), Op.getOperand(1));
3950    SDOperand Assert  = DAG.getNode(ISD::AssertZext, EVT, Extract,
3951                                    DAG.getValueType(VT));
3952    return DAG.getNode(ISD::TRUNCATE, VT, Assert);
3953  } else if (MVT::getSizeInBits(VT) == 32) {
3954    unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue();
3955    if (Idx == 0)
3956      return Op;
3957    // SHUFPS the element to the lowest double word, then movss.
3958    MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4);
3959    SmallVector<SDOperand, 8> IdxVec;
3960    IdxVec.
3961      push_back(DAG.getConstant(Idx, MVT::getVectorElementType(MaskVT)));
3962    IdxVec.
3963      push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT)));
3964    IdxVec.
3965      push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT)));
3966    IdxVec.
3967      push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT)));
3968    SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
3969                                 &IdxVec[0], IdxVec.size());
3970    SDOperand Vec = Op.getOperand(0);
3971    Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(),
3972                      Vec, DAG.getNode(ISD::UNDEF, Vec.getValueType()), Mask);
3973    return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec,
3974                       DAG.getConstant(0, getPointerTy()));
3975  } else if (MVT::getSizeInBits(VT) == 64) {
3976    unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue();
3977    if (Idx == 0)
3978      return Op;
3979
3980    // UNPCKHPD the element to the lowest double word, then movsd.
3981    // Note if the lower 64 bits of the result of the UNPCKHPD is then stored
3982    // to a f64mem, the whole operation is folded into a single MOVHPDmr.
3983    MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4);
3984    SmallVector<SDOperand, 8> IdxVec;
3985    IdxVec.push_back(DAG.getConstant(1, MVT::getVectorElementType(MaskVT)));
3986    IdxVec.
3987      push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT)));
3988    SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
3989                                 &IdxVec[0], IdxVec.size());
3990    SDOperand Vec = Op.getOperand(0);
3991    Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(),
3992                      Vec, DAG.getNode(ISD::UNDEF, Vec.getValueType()), Mask);
3993    return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec,
3994                       DAG.getConstant(0, getPointerTy()));
3995  }
3996
3997  return SDOperand();
3998}
3999
4000SDOperand
4001X86TargetLowering::LowerINSERT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) {
4002  MVT::ValueType VT = Op.getValueType();
4003  MVT::ValueType EVT = MVT::getVectorElementType(VT);
4004  if (EVT == MVT::i8)
4005    return SDOperand();
4006
4007  SDOperand N0 = Op.getOperand(0);
4008  SDOperand N1 = Op.getOperand(1);
4009  SDOperand N2 = Op.getOperand(2);
4010
4011  if (MVT::getSizeInBits(EVT) == 16) {
4012    // Transform it so it match pinsrw which expects a 16-bit value in a GR32
4013    // as its second argument.
4014    if (N1.getValueType() != MVT::i32)
4015      N1 = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, N1);
4016    if (N2.getValueType() != MVT::i32)
4017      N2 = DAG.getConstant(cast<ConstantSDNode>(N2)->getValue(),getPointerTy());
4018    return DAG.getNode(X86ISD::PINSRW, VT, N0, N1, N2);
4019  }
4020
4021  N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, N1);
4022  unsigned Idx = cast<ConstantSDNode>(N2)->getValue();
4023  MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4);
4024  MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT);
4025  SmallVector<SDOperand, 4> MaskVec;
4026  for (unsigned i = 0; i < 4; ++i)
4027    MaskVec.push_back(DAG.getConstant((i == Idx) ? i+4 : i, MaskEVT));
4028  return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, N0, N1,
4029                     DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
4030                                 &MaskVec[0], MaskVec.size()));
4031}
4032
4033SDOperand
4034X86TargetLowering::LowerSCALAR_TO_VECTOR(SDOperand Op, SelectionDAG &DAG) {
4035  SDOperand AnyExt = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, Op.getOperand(0));
4036  return DAG.getNode(X86ISD::S2VEC, Op.getValueType(), AnyExt);
4037}
4038
4039// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
4040// their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is
4041// one of the above mentioned nodes. It has to be wrapped because otherwise
4042// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
4043// be used to form addressing mode. These wrapped nodes will be selected
4044// into MOV32ri.
4045SDOperand
4046X86TargetLowering::LowerConstantPool(SDOperand Op, SelectionDAG &DAG) {
4047  ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
4048  SDOperand Result = DAG.getTargetConstantPool(CP->getConstVal(),
4049                                               getPointerTy(),
4050                                               CP->getAlignment());
4051  Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result);
4052  // With PIC, the address is actually $g + Offset.
4053  if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
4054      !Subtarget->isPICStyleRIPRel()) {
4055    Result = DAG.getNode(ISD::ADD, getPointerTy(),
4056                         DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()),
4057                         Result);
4058  }
4059
4060  return Result;
4061}
4062
4063SDOperand
4064X86TargetLowering::LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG) {
4065  GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
4066  SDOperand Result = DAG.getTargetGlobalAddress(GV, getPointerTy());
4067  Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result);
4068  // With PIC, the address is actually $g + Offset.
4069  if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
4070      !Subtarget->isPICStyleRIPRel()) {
4071    Result = DAG.getNode(ISD::ADD, getPointerTy(),
4072                         DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()),
4073                         Result);
4074  }
4075
4076  // For Darwin & Mingw32, external and weak symbols are indirect, so we want to
4077  // load the value at address GV, not the value of GV itself. This means that
4078  // the GlobalAddress must be in the base or index register of the address, not
4079  // the GV offset field. Platform check is inside GVRequiresExtraLoad() call
4080  // The same applies for external symbols during PIC codegen
4081  if (Subtarget->GVRequiresExtraLoad(GV, getTargetMachine(), false))
4082    Result = DAG.getLoad(getPointerTy(), DAG.getEntryNode(), Result, NULL, 0);
4083
4084  return Result;
4085}
4086
4087// Lower ISD::GlobalTLSAddress using the "general dynamic" model
4088static SDOperand
4089LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
4090                              const MVT::ValueType PtrVT) {
4091  SDOperand InFlag;
4092  SDOperand Chain = DAG.getCopyToReg(DAG.getEntryNode(), X86::EBX,
4093                                     DAG.getNode(X86ISD::GlobalBaseReg,
4094                                                 PtrVT), InFlag);
4095  InFlag = Chain.getValue(1);
4096
4097  // emit leal symbol@TLSGD(,%ebx,1), %eax
4098  SDVTList NodeTys = DAG.getVTList(PtrVT, MVT::Other, MVT::Flag);
4099  SDOperand TGA = DAG.getTargetGlobalAddress(GA->getGlobal(),
4100                                             GA->getValueType(0),
4101                                             GA->getOffset());
4102  SDOperand Ops[] = { Chain,  TGA, InFlag };
4103  SDOperand Result = DAG.getNode(X86ISD::TLSADDR, NodeTys, Ops, 3);
4104  InFlag = Result.getValue(2);
4105  Chain = Result.getValue(1);
4106
4107  // call ___tls_get_addr. This function receives its argument in
4108  // the register EAX.
4109  Chain = DAG.getCopyToReg(Chain, X86::EAX, Result, InFlag);
4110  InFlag = Chain.getValue(1);
4111
4112  NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
4113  SDOperand Ops1[] = { Chain,
4114                      DAG.getTargetExternalSymbol("___tls_get_addr",
4115                                                  PtrVT),
4116                      DAG.getRegister(X86::EAX, PtrVT),
4117                      DAG.getRegister(X86::EBX, PtrVT),
4118                      InFlag };
4119  Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops1, 5);
4120  InFlag = Chain.getValue(1);
4121
4122  return DAG.getCopyFromReg(Chain, X86::EAX, PtrVT, InFlag);
4123}
4124
4125// Lower ISD::GlobalTLSAddress using the "initial exec" (for no-pic) or
4126// "local exec" model.
4127static SDOperand
4128LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
4129                         const MVT::ValueType PtrVT) {
4130  // Get the Thread Pointer
4131  SDOperand ThreadPointer = DAG.getNode(X86ISD::THREAD_POINTER, PtrVT);
4132  // emit "addl x@ntpoff,%eax" (local exec) or "addl x@indntpoff,%eax" (initial
4133  // exec)
4134  SDOperand TGA = DAG.getTargetGlobalAddress(GA->getGlobal(),
4135                                             GA->getValueType(0),
4136                                             GA->getOffset());
4137  SDOperand Offset = DAG.getNode(X86ISD::Wrapper, PtrVT, TGA);
4138
4139  if (GA->getGlobal()->isDeclaration()) // initial exec TLS model
4140    Offset = DAG.getLoad(PtrVT, DAG.getEntryNode(), Offset, NULL, 0);
4141
4142  // The address of the thread local variable is the add of the thread
4143  // pointer with the offset of the variable.
4144  return DAG.getNode(ISD::ADD, PtrVT, ThreadPointer, Offset);
4145}
4146
4147SDOperand
4148X86TargetLowering::LowerGlobalTLSAddress(SDOperand Op, SelectionDAG &DAG) {
4149  // TODO: implement the "local dynamic" model
4150  // TODO: implement the "initial exec"model for pic executables
4151  assert(!Subtarget->is64Bit() && Subtarget->isTargetELF() &&
4152         "TLS not implemented for non-ELF and 64-bit targets");
4153  GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
4154  // If the relocation model is PIC, use the "General Dynamic" TLS Model,
4155  // otherwise use the "Local Exec"TLS Model
4156  if (getTargetMachine().getRelocationModel() == Reloc::PIC_)
4157    return LowerToTLSGeneralDynamicModel(GA, DAG, getPointerTy());
4158  else
4159    return LowerToTLSExecModel(GA, DAG, getPointerTy());
4160}
4161
4162SDOperand
4163X86TargetLowering::LowerExternalSymbol(SDOperand Op, SelectionDAG &DAG) {
4164  const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol();
4165  SDOperand Result = DAG.getTargetExternalSymbol(Sym, getPointerTy());
4166  Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result);
4167  // With PIC, the address is actually $g + Offset.
4168  if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
4169      !Subtarget->isPICStyleRIPRel()) {
4170    Result = DAG.getNode(ISD::ADD, getPointerTy(),
4171                         DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()),
4172                         Result);
4173  }
4174
4175  return Result;
4176}
4177
4178SDOperand X86TargetLowering::LowerJumpTable(SDOperand Op, SelectionDAG &DAG) {
4179  JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
4180  SDOperand Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy());
4181  Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result);
4182  // With PIC, the address is actually $g + Offset.
4183  if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
4184      !Subtarget->isPICStyleRIPRel()) {
4185    Result = DAG.getNode(ISD::ADD, getPointerTy(),
4186                         DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()),
4187                         Result);
4188  }
4189
4190  return Result;
4191}
4192
4193/// LowerShift - Lower SRA_PARTS and friends, which return two i32 values and
4194/// take a 2 x i32 value to shift plus a shift amount.
4195SDOperand X86TargetLowering::LowerShift(SDOperand Op, SelectionDAG &DAG) {
4196  assert(Op.getNumOperands() == 3 && Op.getValueType() == MVT::i32 &&
4197         "Not an i64 shift!");
4198  bool isSRA = Op.getOpcode() == ISD::SRA_PARTS;
4199  SDOperand ShOpLo = Op.getOperand(0);
4200  SDOperand ShOpHi = Op.getOperand(1);
4201  SDOperand ShAmt  = Op.getOperand(2);
4202  SDOperand Tmp1 = isSRA ?
4203    DAG.getNode(ISD::SRA, MVT::i32, ShOpHi, DAG.getConstant(31, MVT::i8)) :
4204    DAG.getConstant(0, MVT::i32);
4205
4206  SDOperand Tmp2, Tmp3;
4207  if (Op.getOpcode() == ISD::SHL_PARTS) {
4208    Tmp2 = DAG.getNode(X86ISD::SHLD, MVT::i32, ShOpHi, ShOpLo, ShAmt);
4209    Tmp3 = DAG.getNode(ISD::SHL, MVT::i32, ShOpLo, ShAmt);
4210  } else {
4211    Tmp2 = DAG.getNode(X86ISD::SHRD, MVT::i32, ShOpLo, ShOpHi, ShAmt);
4212    Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, MVT::i32, ShOpHi, ShAmt);
4213  }
4214
4215  const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag);
4216  SDOperand AndNode = DAG.getNode(ISD::AND, MVT::i8, ShAmt,
4217                                  DAG.getConstant(32, MVT::i8));
4218  SDOperand Cond = DAG.getNode(X86ISD::CMP, MVT::i32,
4219                               AndNode, DAG.getConstant(0, MVT::i8));
4220
4221  SDOperand Hi, Lo;
4222  SDOperand CC = DAG.getConstant(X86::COND_NE, MVT::i8);
4223  VTs = DAG.getNodeValueTypes(MVT::i32, MVT::Flag);
4224  SmallVector<SDOperand, 4> Ops;
4225  if (Op.getOpcode() == ISD::SHL_PARTS) {
4226    Ops.push_back(Tmp2);
4227    Ops.push_back(Tmp3);
4228    Ops.push_back(CC);
4229    Ops.push_back(Cond);
4230    Hi = DAG.getNode(X86ISD::CMOV, MVT::i32, &Ops[0], Ops.size());
4231
4232    Ops.clear();
4233    Ops.push_back(Tmp3);
4234    Ops.push_back(Tmp1);
4235    Ops.push_back(CC);
4236    Ops.push_back(Cond);
4237    Lo = DAG.getNode(X86ISD::CMOV, MVT::i32, &Ops[0], Ops.size());
4238  } else {
4239    Ops.push_back(Tmp2);
4240    Ops.push_back(Tmp3);
4241    Ops.push_back(CC);
4242    Ops.push_back(Cond);
4243    Lo = DAG.getNode(X86ISD::CMOV, MVT::i32, &Ops[0], Ops.size());
4244
4245    Ops.clear();
4246    Ops.push_back(Tmp3);
4247    Ops.push_back(Tmp1);
4248    Ops.push_back(CC);
4249    Ops.push_back(Cond);
4250    Hi = DAG.getNode(X86ISD::CMOV, MVT::i32, &Ops[0], Ops.size());
4251  }
4252
4253  VTs = DAG.getNodeValueTypes(MVT::i32, MVT::i32);
4254  Ops.clear();
4255  Ops.push_back(Lo);
4256  Ops.push_back(Hi);
4257  return DAG.getNode(ISD::MERGE_VALUES, VTs, 2, &Ops[0], Ops.size());
4258}
4259
4260SDOperand X86TargetLowering::LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG) {
4261  assert(Op.getOperand(0).getValueType() <= MVT::i64 &&
4262         Op.getOperand(0).getValueType() >= MVT::i16 &&
4263         "Unknown SINT_TO_FP to lower!");
4264
4265  SDOperand Result;
4266  MVT::ValueType SrcVT = Op.getOperand(0).getValueType();
4267  unsigned Size = MVT::getSizeInBits(SrcVT)/8;
4268  MachineFunction &MF = DAG.getMachineFunction();
4269  int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size);
4270  SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
4271  SDOperand Chain = DAG.getStore(DAG.getEntryNode(), Op.getOperand(0),
4272                                 StackSlot, NULL, 0);
4273
4274  // These are really Legal; caller falls through into that case.
4275  if (SrcVT==MVT::i32 && Op.getValueType() == MVT::f32 && X86ScalarSSEf32)
4276    return Result;
4277  if (SrcVT==MVT::i32 && Op.getValueType() == MVT::f64 && X86ScalarSSEf64)
4278    return Result;
4279  if (SrcVT==MVT::i64 && Op.getValueType() != MVT::f80 &&
4280      Subtarget->is64Bit())
4281    return Result;
4282
4283  // Build the FILD
4284  SDVTList Tys;
4285  bool useSSE = (X86ScalarSSEf32 && Op.getValueType() == MVT::f32) ||
4286                (X86ScalarSSEf64 && Op.getValueType() == MVT::f64);
4287  if (useSSE)
4288    Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Flag);
4289  else
4290    Tys = DAG.getVTList(Op.getValueType(), MVT::Other);
4291  SmallVector<SDOperand, 8> Ops;
4292  Ops.push_back(Chain);
4293  Ops.push_back(StackSlot);
4294  Ops.push_back(DAG.getValueType(SrcVT));
4295  Result = DAG.getNode(useSSE ? X86ISD::FILD_FLAG :X86ISD::FILD,
4296                       Tys, &Ops[0], Ops.size());
4297
4298  if (useSSE) {
4299    Chain = Result.getValue(1);
4300    SDOperand InFlag = Result.getValue(2);
4301
4302    // FIXME: Currently the FST is flagged to the FILD_FLAG. This
4303    // shouldn't be necessary except that RFP cannot be live across
4304    // multiple blocks. When stackifier is fixed, they can be uncoupled.
4305    MachineFunction &MF = DAG.getMachineFunction();
4306    int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8);
4307    SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
4308    Tys = DAG.getVTList(MVT::Other);
4309    SmallVector<SDOperand, 8> Ops;
4310    Ops.push_back(Chain);
4311    Ops.push_back(Result);
4312    Ops.push_back(StackSlot);
4313    Ops.push_back(DAG.getValueType(Op.getValueType()));
4314    Ops.push_back(InFlag);
4315    Chain = DAG.getNode(X86ISD::FST, Tys, &Ops[0], Ops.size());
4316    Result = DAG.getLoad(Op.getValueType(), Chain, StackSlot, NULL, 0);
4317  }
4318
4319  return Result;
4320}
4321
4322std::pair<SDOperand,SDOperand> X86TargetLowering::
4323FP_TO_SINTHelper(SDOperand Op, SelectionDAG &DAG) {
4324  assert(Op.getValueType() <= MVT::i64 && Op.getValueType() >= MVT::i16 &&
4325         "Unknown FP_TO_SINT to lower!");
4326
4327  // These are really Legal.
4328  if (Op.getValueType() == MVT::i32 &&
4329      X86ScalarSSEf32 && Op.getOperand(0).getValueType() == MVT::f32)
4330    return std::make_pair(SDOperand(), SDOperand());
4331  if (Op.getValueType() == MVT::i32 &&
4332      X86ScalarSSEf64 && Op.getOperand(0).getValueType() == MVT::f64)
4333    return std::make_pair(SDOperand(), SDOperand());
4334  if (Subtarget->is64Bit() &&
4335      Op.getValueType() == MVT::i64 &&
4336      Op.getOperand(0).getValueType() != MVT::f80)
4337    return std::make_pair(SDOperand(), SDOperand());
4338
4339  // We lower FP->sint64 into FISTP64, followed by a load, all to a temporary
4340  // stack slot.
4341  MachineFunction &MF = DAG.getMachineFunction();
4342  unsigned MemSize = MVT::getSizeInBits(Op.getValueType())/8;
4343  int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize);
4344  SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
4345  unsigned Opc;
4346  switch (Op.getValueType()) {
4347  default: assert(0 && "Invalid FP_TO_SINT to lower!");
4348  case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break;
4349  case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break;
4350  case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break;
4351  }
4352
4353  SDOperand Chain = DAG.getEntryNode();
4354  SDOperand Value = Op.getOperand(0);
4355  if ((X86ScalarSSEf32 && Op.getOperand(0).getValueType() == MVT::f32) ||
4356      (X86ScalarSSEf64 && Op.getOperand(0).getValueType() == MVT::f64)) {
4357    assert(Op.getValueType() == MVT::i64 && "Invalid FP_TO_SINT to lower!");
4358    Chain = DAG.getStore(Chain, Value, StackSlot, NULL, 0);
4359    SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other);
4360    SDOperand Ops[] = {
4361      Chain, StackSlot, DAG.getValueType(Op.getOperand(0).getValueType())
4362    };
4363    Value = DAG.getNode(X86ISD::FLD, Tys, Ops, 3);
4364    Chain = Value.getValue(1);
4365    SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize);
4366    StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
4367  }
4368
4369  // Build the FP_TO_INT*_IN_MEM
4370  SDOperand Ops[] = { Chain, Value, StackSlot };
4371  SDOperand FIST = DAG.getNode(Opc, MVT::Other, Ops, 3);
4372
4373  return std::make_pair(FIST, StackSlot);
4374}
4375
4376SDOperand X86TargetLowering::LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG) {
4377  std::pair<SDOperand,SDOperand> Vals = FP_TO_SINTHelper(Op, DAG);
4378  SDOperand FIST = Vals.first, StackSlot = Vals.second;
4379  if (FIST.Val == 0) return SDOperand();
4380
4381  // Load the result.
4382  return DAG.getLoad(Op.getValueType(), FIST, StackSlot, NULL, 0);
4383}
4384
4385SDNode *X86TargetLowering::ExpandFP_TO_SINT(SDNode *N, SelectionDAG &DAG) {
4386  std::pair<SDOperand,SDOperand> Vals = FP_TO_SINTHelper(SDOperand(N, 0), DAG);
4387  SDOperand FIST = Vals.first, StackSlot = Vals.second;
4388  if (FIST.Val == 0) return 0;
4389
4390  // Return an i64 load from the stack slot.
4391  SDOperand Res = DAG.getLoad(MVT::i64, FIST, StackSlot, NULL, 0);
4392
4393  // Use a MERGE_VALUES node to drop the chain result value.
4394  return DAG.getNode(ISD::MERGE_VALUES, MVT::i64, Res).Val;
4395}
4396
4397SDOperand X86TargetLowering::LowerFABS(SDOperand Op, SelectionDAG &DAG) {
4398  MVT::ValueType VT = Op.getValueType();
4399  MVT::ValueType EltVT = VT;
4400  if (MVT::isVector(VT))
4401    EltVT = MVT::getVectorElementType(VT);
4402  const Type *OpNTy =  MVT::getTypeForValueType(EltVT);
4403  std::vector<Constant*> CV;
4404  if (EltVT == MVT::f64) {
4405    Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(64, ~(1ULL << 63))));
4406    CV.push_back(C);
4407    CV.push_back(C);
4408  } else {
4409    Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(32, ~(1U << 31))));
4410    CV.push_back(C);
4411    CV.push_back(C);
4412    CV.push_back(C);
4413    CV.push_back(C);
4414  }
4415  Constant *C = ConstantVector::get(CV);
4416  SDOperand CPIdx = DAG.getConstantPool(C, getPointerTy(), 4);
4417  SDOperand Mask = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx, NULL, 0,
4418                               false, 16);
4419  return DAG.getNode(X86ISD::FAND, VT, Op.getOperand(0), Mask);
4420}
4421
4422SDOperand X86TargetLowering::LowerFNEG(SDOperand Op, SelectionDAG &DAG) {
4423  MVT::ValueType VT = Op.getValueType();
4424  MVT::ValueType EltVT = VT;
4425  unsigned EltNum = 1;
4426  if (MVT::isVector(VT)) {
4427    EltVT = MVT::getVectorElementType(VT);
4428    EltNum = MVT::getVectorNumElements(VT);
4429  }
4430  const Type *OpNTy =  MVT::getTypeForValueType(EltVT);
4431  std::vector<Constant*> CV;
4432  if (EltVT == MVT::f64) {
4433    Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(64, 1ULL << 63)));
4434    CV.push_back(C);
4435    CV.push_back(C);
4436  } else {
4437    Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(32, 1U << 31)));
4438    CV.push_back(C);
4439    CV.push_back(C);
4440    CV.push_back(C);
4441    CV.push_back(C);
4442  }
4443  Constant *C = ConstantVector::get(CV);
4444  SDOperand CPIdx = DAG.getConstantPool(C, getPointerTy(), 4);
4445  SDOperand Mask = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx, NULL, 0,
4446                               false, 16);
4447  if (MVT::isVector(VT)) {
4448    return DAG.getNode(ISD::BIT_CONVERT, VT,
4449                       DAG.getNode(ISD::XOR, MVT::v2i64,
4450                    DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, Op.getOperand(0)),
4451                    DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, Mask)));
4452  } else {
4453    return DAG.getNode(X86ISD::FXOR, VT, Op.getOperand(0), Mask);
4454  }
4455}
4456
4457SDOperand X86TargetLowering::LowerFCOPYSIGN(SDOperand Op, SelectionDAG &DAG) {
4458  SDOperand Op0 = Op.getOperand(0);
4459  SDOperand Op1 = Op.getOperand(1);
4460  MVT::ValueType VT = Op.getValueType();
4461  MVT::ValueType SrcVT = Op1.getValueType();
4462  const Type *SrcTy =  MVT::getTypeForValueType(SrcVT);
4463
4464  // If second operand is smaller, extend it first.
4465  if (MVT::getSizeInBits(SrcVT) < MVT::getSizeInBits(VT)) {
4466    Op1 = DAG.getNode(ISD::FP_EXTEND, VT, Op1);
4467    SrcVT = VT;
4468    SrcTy = MVT::getTypeForValueType(SrcVT);
4469  }
4470  // And if it is bigger, shrink it first.
4471  if (MVT::getSizeInBits(SrcVT) > MVT::getSizeInBits(VT)) {
4472    Op1 = DAG.getNode(ISD::FP_ROUND, VT, Op1);
4473    SrcVT = VT;
4474    SrcTy = MVT::getTypeForValueType(SrcVT);
4475  }
4476
4477  // At this point the operands and the result should have the same
4478  // type, and that won't be f80 since that is not custom lowered.
4479
4480  // First get the sign bit of second operand.
4481  std::vector<Constant*> CV;
4482  if (SrcVT == MVT::f64) {
4483    CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, 1ULL << 63))));
4484    CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, 0))));
4485  } else {
4486    CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 1U << 31))));
4487    CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0))));
4488    CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0))));
4489    CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0))));
4490  }
4491  Constant *C = ConstantVector::get(CV);
4492  SDOperand CPIdx = DAG.getConstantPool(C, getPointerTy(), 4);
4493  SDOperand Mask1 = DAG.getLoad(SrcVT, DAG.getEntryNode(), CPIdx, NULL, 0,
4494                                false, 16);
4495  SDOperand SignBit = DAG.getNode(X86ISD::FAND, SrcVT, Op1, Mask1);
4496
4497  // Shift sign bit right or left if the two operands have different types.
4498  if (MVT::getSizeInBits(SrcVT) > MVT::getSizeInBits(VT)) {
4499    // Op0 is MVT::f32, Op1 is MVT::f64.
4500    SignBit = DAG.getNode(ISD::SCALAR_TO_VECTOR, MVT::v2f64, SignBit);
4501    SignBit = DAG.getNode(X86ISD::FSRL, MVT::v2f64, SignBit,
4502                          DAG.getConstant(32, MVT::i32));
4503    SignBit = DAG.getNode(ISD::BIT_CONVERT, MVT::v4f32, SignBit);
4504    SignBit = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::f32, SignBit,
4505                          DAG.getConstant(0, getPointerTy()));
4506  }
4507
4508  // Clear first operand sign bit.
4509  CV.clear();
4510  if (VT == MVT::f64) {
4511    CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, ~(1ULL << 63)))));
4512    CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, 0))));
4513  } else {
4514    CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, ~(1U << 31)))));
4515    CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0))));
4516    CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0))));
4517    CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0))));
4518  }
4519  C = ConstantVector::get(CV);
4520  CPIdx = DAG.getConstantPool(C, getPointerTy(), 4);
4521  SDOperand Mask2 = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx, NULL, 0,
4522                                false, 16);
4523  SDOperand Val = DAG.getNode(X86ISD::FAND, VT, Op0, Mask2);
4524
4525  // Or the value with the sign bit.
4526  return DAG.getNode(X86ISD::FOR, VT, Val, SignBit);
4527}
4528
4529SDOperand X86TargetLowering::LowerSETCC(SDOperand Op, SelectionDAG &DAG) {
4530  assert(Op.getValueType() == MVT::i8 && "SetCC type must be 8-bit integer");
4531  SDOperand Cond;
4532  SDOperand Op0 = Op.getOperand(0);
4533  SDOperand Op1 = Op.getOperand(1);
4534  SDOperand CC = Op.getOperand(2);
4535  ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
4536  bool isFP = MVT::isFloatingPoint(Op.getOperand(1).getValueType());
4537  unsigned X86CC;
4538
4539  if (translateX86CC(cast<CondCodeSDNode>(CC)->get(), isFP, X86CC,
4540                     Op0, Op1, DAG)) {
4541    Cond = DAG.getNode(X86ISD::CMP, MVT::i32, Op0, Op1);
4542    return DAG.getNode(X86ISD::SETCC, MVT::i8,
4543                       DAG.getConstant(X86CC, MVT::i8), Cond);
4544  }
4545
4546  assert(isFP && "Illegal integer SetCC!");
4547
4548  Cond = DAG.getNode(X86ISD::CMP, MVT::i32, Op0, Op1);
4549  switch (SetCCOpcode) {
4550  default: assert(false && "Illegal floating point SetCC!");
4551  case ISD::SETOEQ: {  // !PF & ZF
4552    SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, MVT::i8,
4553                                 DAG.getConstant(X86::COND_NP, MVT::i8), Cond);
4554    SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8,
4555                                 DAG.getConstant(X86::COND_E, MVT::i8), Cond);
4556    return DAG.getNode(ISD::AND, MVT::i8, Tmp1, Tmp2);
4557  }
4558  case ISD::SETUNE: {  // PF | !ZF
4559    SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, MVT::i8,
4560                                 DAG.getConstant(X86::COND_P, MVT::i8), Cond);
4561    SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8,
4562                                 DAG.getConstant(X86::COND_NE, MVT::i8), Cond);
4563    return DAG.getNode(ISD::OR, MVT::i8, Tmp1, Tmp2);
4564  }
4565  }
4566}
4567
4568
4569SDOperand X86TargetLowering::LowerSELECT(SDOperand Op, SelectionDAG &DAG) {
4570  bool addTest = true;
4571  SDOperand Cond  = Op.getOperand(0);
4572  SDOperand CC;
4573
4574  if (Cond.getOpcode() == ISD::SETCC)
4575    Cond = LowerSETCC(Cond, DAG);
4576
4577  // If condition flag is set by a X86ISD::CMP, then use it as the condition
4578  // setting operand in place of the X86ISD::SETCC.
4579  if (Cond.getOpcode() == X86ISD::SETCC) {
4580    CC = Cond.getOperand(0);
4581
4582    SDOperand Cmp = Cond.getOperand(1);
4583    unsigned Opc = Cmp.getOpcode();
4584    MVT::ValueType VT = Op.getValueType();
4585    bool IllegalFPCMov = false;
4586    if (VT == MVT::f32 && !X86ScalarSSEf32)
4587      IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSignExtended());
4588    else if (VT == MVT::f64 && !X86ScalarSSEf64)
4589      IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSignExtended());
4590    else if (VT == MVT::f80)
4591      IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSignExtended());
4592    if ((Opc == X86ISD::CMP ||
4593         Opc == X86ISD::COMI ||
4594         Opc == X86ISD::UCOMI) && !IllegalFPCMov) {
4595      Cond = Cmp;
4596      addTest = false;
4597    }
4598  }
4599
4600  if (addTest) {
4601    CC = DAG.getConstant(X86::COND_NE, MVT::i8);
4602    Cond= DAG.getNode(X86ISD::CMP, MVT::i32, Cond, DAG.getConstant(0, MVT::i8));
4603  }
4604
4605  const MVT::ValueType *VTs = DAG.getNodeValueTypes(Op.getValueType(),
4606                                                    MVT::Flag);
4607  SmallVector<SDOperand, 4> Ops;
4608  // X86ISD::CMOV means set the result (which is operand 1) to the RHS if
4609  // condition is true.
4610  Ops.push_back(Op.getOperand(2));
4611  Ops.push_back(Op.getOperand(1));
4612  Ops.push_back(CC);
4613  Ops.push_back(Cond);
4614  return DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size());
4615}
4616
4617SDOperand X86TargetLowering::LowerBRCOND(SDOperand Op, SelectionDAG &DAG) {
4618  bool addTest = true;
4619  SDOperand Chain = Op.getOperand(0);
4620  SDOperand Cond  = Op.getOperand(1);
4621  SDOperand Dest  = Op.getOperand(2);
4622  SDOperand CC;
4623
4624  if (Cond.getOpcode() == ISD::SETCC)
4625    Cond = LowerSETCC(Cond, DAG);
4626
4627  // If condition flag is set by a X86ISD::CMP, then use it as the condition
4628  // setting operand in place of the X86ISD::SETCC.
4629  if (Cond.getOpcode() == X86ISD::SETCC) {
4630    CC = Cond.getOperand(0);
4631
4632    SDOperand Cmp = Cond.getOperand(1);
4633    unsigned Opc = Cmp.getOpcode();
4634    if (Opc == X86ISD::CMP ||
4635        Opc == X86ISD::COMI ||
4636        Opc == X86ISD::UCOMI) {
4637      Cond = Cmp;
4638      addTest = false;
4639    }
4640  }
4641
4642  if (addTest) {
4643    CC = DAG.getConstant(X86::COND_NE, MVT::i8);
4644    Cond= DAG.getNode(X86ISD::CMP, MVT::i32, Cond, DAG.getConstant(0, MVT::i8));
4645  }
4646  return DAG.getNode(X86ISD::BRCOND, Op.getValueType(),
4647                     Chain, Op.getOperand(2), CC, Cond);
4648}
4649
4650SDOperand X86TargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) {
4651  unsigned CallingConv = cast<ConstantSDNode>(Op.getOperand(1))->getValue();
4652  bool isTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0;
4653
4654   if (Subtarget->is64Bit())
4655     if(CallingConv==CallingConv::Fast && isTailCall && PerformTailCallOpt)
4656       return LowerX86_TailCallTo(Op, DAG, CallingConv);
4657     else
4658       return LowerX86_64CCCCallTo(Op, DAG, CallingConv);
4659  else
4660    switch (CallingConv) {
4661    default:
4662      assert(0 && "Unsupported calling convention");
4663    case CallingConv::Fast:
4664      if (isTailCall && PerformTailCallOpt)
4665        return LowerX86_TailCallTo(Op, DAG, CallingConv);
4666      else
4667        return LowerCCCCallTo(Op,DAG, CallingConv);
4668    case CallingConv::C:
4669    case CallingConv::X86_StdCall:
4670      return LowerCCCCallTo(Op, DAG, CallingConv);
4671    case CallingConv::X86_FastCall:
4672      return LowerFastCCCallTo(Op, DAG, CallingConv);
4673    }
4674}
4675
4676
4677// Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets.
4678// Calls to _alloca is needed to probe the stack when allocating more than 4k
4679// bytes in one go. Touching the stack at 4K increments is necessary to ensure
4680// that the guard pages used by the OS virtual memory manager are allocated in
4681// correct sequence.
4682SDOperand
4683X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDOperand Op,
4684                                           SelectionDAG &DAG) {
4685  assert(Subtarget->isTargetCygMing() &&
4686         "This should be used only on Cygwin/Mingw targets");
4687
4688  // Get the inputs.
4689  SDOperand Chain = Op.getOperand(0);
4690  SDOperand Size  = Op.getOperand(1);
4691  // FIXME: Ensure alignment here
4692
4693  SDOperand Flag;
4694
4695  MVT::ValueType IntPtr = getPointerTy();
4696  MVT::ValueType SPTy = (Subtarget->is64Bit() ? MVT::i64 : MVT::i32);
4697
4698  Chain = DAG.getCopyToReg(Chain, X86::EAX, Size, Flag);
4699  Flag = Chain.getValue(1);
4700
4701  SDVTList  NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
4702  SDOperand Ops[] = { Chain,
4703                      DAG.getTargetExternalSymbol("_alloca", IntPtr),
4704                      DAG.getRegister(X86::EAX, IntPtr),
4705                      Flag };
4706  Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops, 4);
4707  Flag = Chain.getValue(1);
4708
4709  Chain = DAG.getCopyFromReg(Chain, X86StackPtr, SPTy).getValue(1);
4710
4711  std::vector<MVT::ValueType> Tys;
4712  Tys.push_back(SPTy);
4713  Tys.push_back(MVT::Other);
4714  SDOperand Ops1[2] = { Chain.getValue(0), Chain };
4715  return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops1, 2);
4716}
4717
4718SDOperand
4719X86TargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG) {
4720  MachineFunction &MF = DAG.getMachineFunction();
4721  const Function* Fn = MF.getFunction();
4722  if (Fn->hasExternalLinkage() &&
4723      Subtarget->isTargetCygMing() &&
4724      Fn->getName() == "main")
4725    MF.getInfo<X86MachineFunctionInfo>()->setForceFramePointer(true);
4726
4727  unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getValue();
4728  if (Subtarget->is64Bit())
4729    return LowerX86_64CCCArguments(Op, DAG);
4730  else
4731    switch(CC) {
4732    default:
4733      assert(0 && "Unsupported calling convention");
4734    case CallingConv::Fast:
4735      return LowerCCCArguments(Op,DAG, true);
4736      // Falls through
4737    case CallingConv::C:
4738      return LowerCCCArguments(Op, DAG);
4739    case CallingConv::X86_StdCall:
4740      MF.getInfo<X86MachineFunctionInfo>()->setDecorationStyle(StdCall);
4741      return LowerCCCArguments(Op, DAG, true);
4742    case CallingConv::X86_FastCall:
4743      MF.getInfo<X86MachineFunctionInfo>()->setDecorationStyle(FastCall);
4744      return LowerFastCCArguments(Op, DAG);
4745    }
4746}
4747
4748SDOperand X86TargetLowering::LowerMEMSET(SDOperand Op, SelectionDAG &DAG) {
4749  SDOperand InFlag(0, 0);
4750  SDOperand Chain = Op.getOperand(0);
4751  unsigned Align =
4752    (unsigned)cast<ConstantSDNode>(Op.getOperand(4))->getValue();
4753  if (Align == 0) Align = 1;
4754
4755  ConstantSDNode *I = dyn_cast<ConstantSDNode>(Op.getOperand(3));
4756  // If not DWORD aligned or size is more than the threshold, call memset.
4757  // The libc version is likely to be faster for these cases. It can use the
4758  // address value and run time information about the CPU.
4759  if ((Align & 3) != 0 ||
4760      (I && I->getValue() > Subtarget->getMaxInlineSizeThreshold())) {
4761    MVT::ValueType IntPtr = getPointerTy();
4762    const Type *IntPtrTy = getTargetData()->getIntPtrType();
4763    TargetLowering::ArgListTy Args;
4764    TargetLowering::ArgListEntry Entry;
4765    Entry.Node = Op.getOperand(1);
4766    Entry.Ty = IntPtrTy;
4767    Args.push_back(Entry);
4768    // Extend the unsigned i8 argument to be an int value for the call.
4769    Entry.Node = DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, Op.getOperand(2));
4770    Entry.Ty = IntPtrTy;
4771    Args.push_back(Entry);
4772    Entry.Node = Op.getOperand(3);
4773    Args.push_back(Entry);
4774    std::pair<SDOperand,SDOperand> CallResult =
4775      LowerCallTo(Chain, Type::VoidTy, false, false, CallingConv::C, false,
4776                  DAG.getExternalSymbol("memset", IntPtr), Args, DAG);
4777    return CallResult.second;
4778  }
4779
4780  MVT::ValueType AVT;
4781  SDOperand Count;
4782  ConstantSDNode *ValC = dyn_cast<ConstantSDNode>(Op.getOperand(2));
4783  unsigned BytesLeft = 0;
4784  bool TwoRepStos = false;
4785  if (ValC) {
4786    unsigned ValReg;
4787    uint64_t Val = ValC->getValue() & 255;
4788
4789    // If the value is a constant, then we can potentially use larger sets.
4790    switch (Align & 3) {
4791      case 2:   // WORD aligned
4792        AVT = MVT::i16;
4793        ValReg = X86::AX;
4794        Val = (Val << 8) | Val;
4795        break;
4796      case 0:  // DWORD aligned
4797        AVT = MVT::i32;
4798        ValReg = X86::EAX;
4799        Val = (Val << 8)  | Val;
4800        Val = (Val << 16) | Val;
4801        if (Subtarget->is64Bit() && ((Align & 0xF) == 0)) {  // QWORD aligned
4802          AVT = MVT::i64;
4803          ValReg = X86::RAX;
4804          Val = (Val << 32) | Val;
4805        }
4806        break;
4807      default:  // Byte aligned
4808        AVT = MVT::i8;
4809        ValReg = X86::AL;
4810        Count = Op.getOperand(3);
4811        break;
4812    }
4813
4814    if (AVT > MVT::i8) {
4815      if (I) {
4816        unsigned UBytes = MVT::getSizeInBits(AVT) / 8;
4817        Count = DAG.getConstant(I->getValue() / UBytes, getPointerTy());
4818        BytesLeft = I->getValue() % UBytes;
4819      } else {
4820        assert(AVT >= MVT::i32 &&
4821               "Do not use rep;stos if not at least DWORD aligned");
4822        Count = DAG.getNode(ISD::SRL, Op.getOperand(3).getValueType(),
4823                            Op.getOperand(3), DAG.getConstant(2, MVT::i8));
4824        TwoRepStos = true;
4825      }
4826    }
4827
4828    Chain  = DAG.getCopyToReg(Chain, ValReg, DAG.getConstant(Val, AVT),
4829                              InFlag);
4830    InFlag = Chain.getValue(1);
4831  } else {
4832    AVT = MVT::i8;
4833    Count  = Op.getOperand(3);
4834    Chain  = DAG.getCopyToReg(Chain, X86::AL, Op.getOperand(2), InFlag);
4835    InFlag = Chain.getValue(1);
4836  }
4837
4838  Chain  = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RCX : X86::ECX,
4839                            Count, InFlag);
4840  InFlag = Chain.getValue(1);
4841  Chain  = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RDI : X86::EDI,
4842                            Op.getOperand(1), InFlag);
4843  InFlag = Chain.getValue(1);
4844
4845  SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag);
4846  SmallVector<SDOperand, 8> Ops;
4847  Ops.push_back(Chain);
4848  Ops.push_back(DAG.getValueType(AVT));
4849  Ops.push_back(InFlag);
4850  Chain  = DAG.getNode(X86ISD::REP_STOS, Tys, &Ops[0], Ops.size());
4851
4852  if (TwoRepStos) {
4853    InFlag = Chain.getValue(1);
4854    Count = Op.getOperand(3);
4855    MVT::ValueType CVT = Count.getValueType();
4856    SDOperand Left = DAG.getNode(ISD::AND, CVT, Count,
4857                               DAG.getConstant((AVT == MVT::i64) ? 7 : 3, CVT));
4858    Chain  = DAG.getCopyToReg(Chain, (CVT == MVT::i64) ? X86::RCX : X86::ECX,
4859                              Left, InFlag);
4860    InFlag = Chain.getValue(1);
4861    Tys = DAG.getVTList(MVT::Other, MVT::Flag);
4862    Ops.clear();
4863    Ops.push_back(Chain);
4864    Ops.push_back(DAG.getValueType(MVT::i8));
4865    Ops.push_back(InFlag);
4866    Chain  = DAG.getNode(X86ISD::REP_STOS, Tys, &Ops[0], Ops.size());
4867  } else if (BytesLeft) {
4868    // Issue stores for the last 1 - 7 bytes.
4869    SDOperand Value;
4870    unsigned Val = ValC->getValue() & 255;
4871    unsigned Offset = I->getValue() - BytesLeft;
4872    SDOperand DstAddr = Op.getOperand(1);
4873    MVT::ValueType AddrVT = DstAddr.getValueType();
4874    if (BytesLeft >= 4) {
4875      Val = (Val << 8)  | Val;
4876      Val = (Val << 16) | Val;
4877      Value = DAG.getConstant(Val, MVT::i32);
4878      Chain = DAG.getStore(Chain, Value,
4879                           DAG.getNode(ISD::ADD, AddrVT, DstAddr,
4880                                       DAG.getConstant(Offset, AddrVT)),
4881                           NULL, 0);
4882      BytesLeft -= 4;
4883      Offset += 4;
4884    }
4885    if (BytesLeft >= 2) {
4886      Value = DAG.getConstant((Val << 8) | Val, MVT::i16);
4887      Chain = DAG.getStore(Chain, Value,
4888                           DAG.getNode(ISD::ADD, AddrVT, DstAddr,
4889                                       DAG.getConstant(Offset, AddrVT)),
4890                           NULL, 0);
4891      BytesLeft -= 2;
4892      Offset += 2;
4893    }
4894    if (BytesLeft == 1) {
4895      Value = DAG.getConstant(Val, MVT::i8);
4896      Chain = DAG.getStore(Chain, Value,
4897                           DAG.getNode(ISD::ADD, AddrVT, DstAddr,
4898                                       DAG.getConstant(Offset, AddrVT)),
4899                           NULL, 0);
4900    }
4901  }
4902
4903  return Chain;
4904}
4905
4906SDOperand X86TargetLowering::LowerMEMCPYInline(SDOperand Chain,
4907                                               SDOperand Dest,
4908                                               SDOperand Source,
4909                                               unsigned Size,
4910                                               unsigned Align,
4911                                               SelectionDAG &DAG) {
4912  MVT::ValueType AVT;
4913  unsigned BytesLeft = 0;
4914  switch (Align & 3) {
4915    case 2:   // WORD aligned
4916      AVT = MVT::i16;
4917      break;
4918    case 0:  // DWORD aligned
4919      AVT = MVT::i32;
4920      if (Subtarget->is64Bit() && ((Align & 0xF) == 0))  // QWORD aligned
4921        AVT = MVT::i64;
4922      break;
4923    default:  // Byte aligned
4924      AVT = MVT::i8;
4925      break;
4926  }
4927
4928  unsigned UBytes = MVT::getSizeInBits(AVT) / 8;
4929  SDOperand Count = DAG.getConstant(Size / UBytes, getPointerTy());
4930  BytesLeft = Size % UBytes;
4931
4932  SDOperand InFlag(0, 0);
4933  Chain  = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RCX : X86::ECX,
4934                            Count, InFlag);
4935  InFlag = Chain.getValue(1);
4936  Chain  = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RDI : X86::EDI,
4937                            Dest, InFlag);
4938  InFlag = Chain.getValue(1);
4939  Chain  = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RSI : X86::ESI,
4940                            Source, InFlag);
4941  InFlag = Chain.getValue(1);
4942
4943  SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag);
4944  SmallVector<SDOperand, 8> Ops;
4945  Ops.push_back(Chain);
4946  Ops.push_back(DAG.getValueType(AVT));
4947  Ops.push_back(InFlag);
4948  Chain = DAG.getNode(X86ISD::REP_MOVS, Tys, &Ops[0], Ops.size());
4949
4950  if (BytesLeft) {
4951    // Issue loads and stores for the last 1 - 7 bytes.
4952    unsigned Offset = Size - BytesLeft;
4953    SDOperand DstAddr = Dest;
4954    MVT::ValueType DstVT = DstAddr.getValueType();
4955    SDOperand SrcAddr = Source;
4956    MVT::ValueType SrcVT = SrcAddr.getValueType();
4957    SDOperand Value;
4958    if (BytesLeft >= 4) {
4959      Value = DAG.getLoad(MVT::i32, Chain,
4960                          DAG.getNode(ISD::ADD, SrcVT, SrcAddr,
4961                                      DAG.getConstant(Offset, SrcVT)),
4962                          NULL, 0);
4963      Chain = Value.getValue(1);
4964      Chain = DAG.getStore(Chain, Value,
4965                           DAG.getNode(ISD::ADD, DstVT, DstAddr,
4966                                       DAG.getConstant(Offset, DstVT)),
4967                           NULL, 0);
4968      BytesLeft -= 4;
4969      Offset += 4;
4970    }
4971    if (BytesLeft >= 2) {
4972      Value = DAG.getLoad(MVT::i16, Chain,
4973                          DAG.getNode(ISD::ADD, SrcVT, SrcAddr,
4974                                      DAG.getConstant(Offset, SrcVT)),
4975                          NULL, 0);
4976      Chain = Value.getValue(1);
4977      Chain = DAG.getStore(Chain, Value,
4978                           DAG.getNode(ISD::ADD, DstVT, DstAddr,
4979                                       DAG.getConstant(Offset, DstVT)),
4980                           NULL, 0);
4981      BytesLeft -= 2;
4982      Offset += 2;
4983    }
4984
4985    if (BytesLeft == 1) {
4986      Value = DAG.getLoad(MVT::i8, Chain,
4987                          DAG.getNode(ISD::ADD, SrcVT, SrcAddr,
4988                                      DAG.getConstant(Offset, SrcVT)),
4989                          NULL, 0);
4990      Chain = Value.getValue(1);
4991      Chain = DAG.getStore(Chain, Value,
4992                           DAG.getNode(ISD::ADD, DstVT, DstAddr,
4993                                       DAG.getConstant(Offset, DstVT)),
4994                           NULL, 0);
4995    }
4996  }
4997
4998  return Chain;
4999}
5000
5001/// Expand the result of: i64,outchain = READCYCLECOUNTER inchain
5002SDNode *X86TargetLowering::ExpandREADCYCLECOUNTER(SDNode *N, SelectionDAG &DAG){
5003  SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag);
5004  SDOperand TheChain = N->getOperand(0);
5005  SDOperand rd = DAG.getNode(X86ISD::RDTSC_DAG, Tys, &TheChain, 1);
5006  if (Subtarget->is64Bit()) {
5007    SDOperand rax = DAG.getCopyFromReg(rd, X86::RAX, MVT::i64, rd.getValue(1));
5008    SDOperand rdx = DAG.getCopyFromReg(rax.getValue(1), X86::RDX,
5009                                       MVT::i64, rax.getValue(2));
5010    SDOperand Tmp = DAG.getNode(ISD::SHL, MVT::i64, rdx,
5011                                DAG.getConstant(32, MVT::i8));
5012    SDOperand Ops[] = {
5013      DAG.getNode(ISD::OR, MVT::i64, rax, Tmp), rdx.getValue(1)
5014    };
5015
5016    Tys = DAG.getVTList(MVT::i64, MVT::Other);
5017    return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops, 2).Val;
5018  }
5019
5020  SDOperand eax = DAG.getCopyFromReg(rd, X86::EAX, MVT::i32, rd.getValue(1));
5021  SDOperand edx = DAG.getCopyFromReg(eax.getValue(1), X86::EDX,
5022                                       MVT::i32, eax.getValue(2));
5023  // Use a buildpair to merge the two 32-bit values into a 64-bit one.
5024  SDOperand Ops[] = { eax, edx };
5025  Ops[0] = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Ops, 2);
5026
5027  // Use a MERGE_VALUES to return the value and chain.
5028  Ops[1] = edx.getValue(1);
5029  Tys = DAG.getVTList(MVT::i64, MVT::Other);
5030  return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops, 2).Val;
5031}
5032
5033SDOperand X86TargetLowering::LowerVASTART(SDOperand Op, SelectionDAG &DAG) {
5034  SrcValueSDNode *SV = cast<SrcValueSDNode>(Op.getOperand(2));
5035
5036  if (!Subtarget->is64Bit()) {
5037    // vastart just stores the address of the VarArgsFrameIndex slot into the
5038    // memory location argument.
5039    SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy());
5040    return DAG.getStore(Op.getOperand(0), FR,Op.getOperand(1), SV->getValue(),
5041                        SV->getOffset());
5042  }
5043
5044  // __va_list_tag:
5045  //   gp_offset         (0 - 6 * 8)
5046  //   fp_offset         (48 - 48 + 8 * 16)
5047  //   overflow_arg_area (point to parameters coming in memory).
5048  //   reg_save_area
5049  SmallVector<SDOperand, 8> MemOps;
5050  SDOperand FIN = Op.getOperand(1);
5051  // Store gp_offset
5052  SDOperand Store = DAG.getStore(Op.getOperand(0),
5053                                 DAG.getConstant(VarArgsGPOffset, MVT::i32),
5054                                 FIN, SV->getValue(), SV->getOffset());
5055  MemOps.push_back(Store);
5056
5057  // Store fp_offset
5058  FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN,
5059                    DAG.getConstant(4, getPointerTy()));
5060  Store = DAG.getStore(Op.getOperand(0),
5061                       DAG.getConstant(VarArgsFPOffset, MVT::i32),
5062                       FIN, SV->getValue(), SV->getOffset());
5063  MemOps.push_back(Store);
5064
5065  // Store ptr to overflow_arg_area
5066  FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN,
5067                    DAG.getConstant(4, getPointerTy()));
5068  SDOperand OVFIN = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy());
5069  Store = DAG.getStore(Op.getOperand(0), OVFIN, FIN, SV->getValue(),
5070                       SV->getOffset());
5071  MemOps.push_back(Store);
5072
5073  // Store ptr to reg_save_area.
5074  FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN,
5075                    DAG.getConstant(8, getPointerTy()));
5076  SDOperand RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy());
5077  Store = DAG.getStore(Op.getOperand(0), RSFIN, FIN, SV->getValue(),
5078                       SV->getOffset());
5079  MemOps.push_back(Store);
5080  return DAG.getNode(ISD::TokenFactor, MVT::Other, &MemOps[0], MemOps.size());
5081}
5082
5083SDOperand X86TargetLowering::LowerVACOPY(SDOperand Op, SelectionDAG &DAG) {
5084  // X86-64 va_list is a struct { i32, i32, i8*, i8* }.
5085  SDOperand Chain = Op.getOperand(0);
5086  SDOperand DstPtr = Op.getOperand(1);
5087  SDOperand SrcPtr = Op.getOperand(2);
5088  SrcValueSDNode *DstSV = cast<SrcValueSDNode>(Op.getOperand(3));
5089  SrcValueSDNode *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4));
5090
5091  SrcPtr = DAG.getLoad(getPointerTy(), Chain, SrcPtr,
5092                       SrcSV->getValue(), SrcSV->getOffset());
5093  Chain = SrcPtr.getValue(1);
5094  for (unsigned i = 0; i < 3; ++i) {
5095    SDOperand Val = DAG.getLoad(MVT::i64, Chain, SrcPtr,
5096                                SrcSV->getValue(), SrcSV->getOffset());
5097    Chain = Val.getValue(1);
5098    Chain = DAG.getStore(Chain, Val, DstPtr,
5099                         DstSV->getValue(), DstSV->getOffset());
5100    if (i == 2)
5101      break;
5102    SrcPtr = DAG.getNode(ISD::ADD, getPointerTy(), SrcPtr,
5103                         DAG.getConstant(8, getPointerTy()));
5104    DstPtr = DAG.getNode(ISD::ADD, getPointerTy(), DstPtr,
5105                         DAG.getConstant(8, getPointerTy()));
5106  }
5107  return Chain;
5108}
5109
5110SDOperand
5111X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDOperand Op, SelectionDAG &DAG) {
5112  unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getValue();
5113  switch (IntNo) {
5114  default: return SDOperand();    // Don't custom lower most intrinsics.
5115    // Comparison intrinsics.
5116  case Intrinsic::x86_sse_comieq_ss:
5117  case Intrinsic::x86_sse_comilt_ss:
5118  case Intrinsic::x86_sse_comile_ss:
5119  case Intrinsic::x86_sse_comigt_ss:
5120  case Intrinsic::x86_sse_comige_ss:
5121  case Intrinsic::x86_sse_comineq_ss:
5122  case Intrinsic::x86_sse_ucomieq_ss:
5123  case Intrinsic::x86_sse_ucomilt_ss:
5124  case Intrinsic::x86_sse_ucomile_ss:
5125  case Intrinsic::x86_sse_ucomigt_ss:
5126  case Intrinsic::x86_sse_ucomige_ss:
5127  case Intrinsic::x86_sse_ucomineq_ss:
5128  case Intrinsic::x86_sse2_comieq_sd:
5129  case Intrinsic::x86_sse2_comilt_sd:
5130  case Intrinsic::x86_sse2_comile_sd:
5131  case Intrinsic::x86_sse2_comigt_sd:
5132  case Intrinsic::x86_sse2_comige_sd:
5133  case Intrinsic::x86_sse2_comineq_sd:
5134  case Intrinsic::x86_sse2_ucomieq_sd:
5135  case Intrinsic::x86_sse2_ucomilt_sd:
5136  case Intrinsic::x86_sse2_ucomile_sd:
5137  case Intrinsic::x86_sse2_ucomigt_sd:
5138  case Intrinsic::x86_sse2_ucomige_sd:
5139  case Intrinsic::x86_sse2_ucomineq_sd: {
5140    unsigned Opc = 0;
5141    ISD::CondCode CC = ISD::SETCC_INVALID;
5142    switch (IntNo) {
5143    default: break;
5144    case Intrinsic::x86_sse_comieq_ss:
5145    case Intrinsic::x86_sse2_comieq_sd:
5146      Opc = X86ISD::COMI;
5147      CC = ISD::SETEQ;
5148      break;
5149    case Intrinsic::x86_sse_comilt_ss:
5150    case Intrinsic::x86_sse2_comilt_sd:
5151      Opc = X86ISD::COMI;
5152      CC = ISD::SETLT;
5153      break;
5154    case Intrinsic::x86_sse_comile_ss:
5155    case Intrinsic::x86_sse2_comile_sd:
5156      Opc = X86ISD::COMI;
5157      CC = ISD::SETLE;
5158      break;
5159    case Intrinsic::x86_sse_comigt_ss:
5160    case Intrinsic::x86_sse2_comigt_sd:
5161      Opc = X86ISD::COMI;
5162      CC = ISD::SETGT;
5163      break;
5164    case Intrinsic::x86_sse_comige_ss:
5165    case Intrinsic::x86_sse2_comige_sd:
5166      Opc = X86ISD::COMI;
5167      CC = ISD::SETGE;
5168      break;
5169    case Intrinsic::x86_sse_comineq_ss:
5170    case Intrinsic::x86_sse2_comineq_sd:
5171      Opc = X86ISD::COMI;
5172      CC = ISD::SETNE;
5173      break;
5174    case Intrinsic::x86_sse_ucomieq_ss:
5175    case Intrinsic::x86_sse2_ucomieq_sd:
5176      Opc = X86ISD::UCOMI;
5177      CC = ISD::SETEQ;
5178      break;
5179    case Intrinsic::x86_sse_ucomilt_ss:
5180    case Intrinsic::x86_sse2_ucomilt_sd:
5181      Opc = X86ISD::UCOMI;
5182      CC = ISD::SETLT;
5183      break;
5184    case Intrinsic::x86_sse_ucomile_ss:
5185    case Intrinsic::x86_sse2_ucomile_sd:
5186      Opc = X86ISD::UCOMI;
5187      CC = ISD::SETLE;
5188      break;
5189    case Intrinsic::x86_sse_ucomigt_ss:
5190    case Intrinsic::x86_sse2_ucomigt_sd:
5191      Opc = X86ISD::UCOMI;
5192      CC = ISD::SETGT;
5193      break;
5194    case Intrinsic::x86_sse_ucomige_ss:
5195    case Intrinsic::x86_sse2_ucomige_sd:
5196      Opc = X86ISD::UCOMI;
5197      CC = ISD::SETGE;
5198      break;
5199    case Intrinsic::x86_sse_ucomineq_ss:
5200    case Intrinsic::x86_sse2_ucomineq_sd:
5201      Opc = X86ISD::UCOMI;
5202      CC = ISD::SETNE;
5203      break;
5204    }
5205
5206    unsigned X86CC;
5207    SDOperand LHS = Op.getOperand(1);
5208    SDOperand RHS = Op.getOperand(2);
5209    translateX86CC(CC, true, X86CC, LHS, RHS, DAG);
5210
5211    SDOperand Cond = DAG.getNode(Opc, MVT::i32, LHS, RHS);
5212    SDOperand SetCC = DAG.getNode(X86ISD::SETCC, MVT::i8,
5213                                  DAG.getConstant(X86CC, MVT::i8), Cond);
5214    return DAG.getNode(ISD::ANY_EXTEND, MVT::i32, SetCC);
5215  }
5216  }
5217}
5218
5219SDOperand X86TargetLowering::LowerRETURNADDR(SDOperand Op, SelectionDAG &DAG) {
5220  // Depths > 0 not supported yet!
5221  if (cast<ConstantSDNode>(Op.getOperand(0))->getValue() > 0)
5222    return SDOperand();
5223
5224  // Just load the return address
5225  SDOperand RetAddrFI = getReturnAddressFrameIndex(DAG);
5226  return DAG.getLoad(getPointerTy(), DAG.getEntryNode(), RetAddrFI, NULL, 0);
5227}
5228
5229SDOperand X86TargetLowering::LowerFRAMEADDR(SDOperand Op, SelectionDAG &DAG) {
5230  // Depths > 0 not supported yet!
5231  if (cast<ConstantSDNode>(Op.getOperand(0))->getValue() > 0)
5232    return SDOperand();
5233
5234  SDOperand RetAddrFI = getReturnAddressFrameIndex(DAG);
5235  return DAG.getNode(ISD::SUB, getPointerTy(), RetAddrFI,
5236                     DAG.getConstant(4, getPointerTy()));
5237}
5238
5239SDOperand X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDOperand Op,
5240                                                       SelectionDAG &DAG) {
5241  // Is not yet supported on x86-64
5242  if (Subtarget->is64Bit())
5243    return SDOperand();
5244
5245  return DAG.getConstant(8, getPointerTy());
5246}
5247
5248SDOperand X86TargetLowering::LowerEH_RETURN(SDOperand Op, SelectionDAG &DAG)
5249{
5250  assert(!Subtarget->is64Bit() &&
5251         "Lowering of eh_return builtin is not supported yet on x86-64");
5252
5253  MachineFunction &MF = DAG.getMachineFunction();
5254  SDOperand Chain     = Op.getOperand(0);
5255  SDOperand Offset    = Op.getOperand(1);
5256  SDOperand Handler   = Op.getOperand(2);
5257
5258  SDOperand Frame = DAG.getRegister(RegInfo->getFrameRegister(MF),
5259                                    getPointerTy());
5260
5261  SDOperand StoreAddr = DAG.getNode(ISD::SUB, getPointerTy(), Frame,
5262                                    DAG.getConstant(-4UL, getPointerTy()));
5263  StoreAddr = DAG.getNode(ISD::ADD, getPointerTy(), StoreAddr, Offset);
5264  Chain = DAG.getStore(Chain, Handler, StoreAddr, NULL, 0);
5265  Chain = DAG.getCopyToReg(Chain, X86::ECX, StoreAddr);
5266  MF.addLiveOut(X86::ECX);
5267
5268  return DAG.getNode(X86ISD::EH_RETURN, MVT::Other,
5269                     Chain, DAG.getRegister(X86::ECX, getPointerTy()));
5270}
5271
5272SDOperand X86TargetLowering::LowerTRAMPOLINE(SDOperand Op,
5273                                             SelectionDAG &DAG) {
5274  SDOperand Root = Op.getOperand(0);
5275  SDOperand Trmp = Op.getOperand(1); // trampoline
5276  SDOperand FPtr = Op.getOperand(2); // nested function
5277  SDOperand Nest = Op.getOperand(3); // 'nest' parameter value
5278
5279  SrcValueSDNode *TrmpSV = cast<SrcValueSDNode>(Op.getOperand(4));
5280
5281  if (Subtarget->is64Bit()) {
5282    return SDOperand(); // not yet supported
5283  } else {
5284    Function *Func = (Function *)
5285      cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue());
5286    unsigned CC = Func->getCallingConv();
5287    unsigned NestReg;
5288
5289    switch (CC) {
5290    default:
5291      assert(0 && "Unsupported calling convention");
5292    case CallingConv::C:
5293    case CallingConv::X86_StdCall: {
5294      // Pass 'nest' parameter in ECX.
5295      // Must be kept in sync with X86CallingConv.td
5296      NestReg = X86::ECX;
5297
5298      // Check that ECX wasn't needed by an 'inreg' parameter.
5299      const FunctionType *FTy = Func->getFunctionType();
5300      const ParamAttrsList *Attrs = Func->getParamAttrs();
5301
5302      if (Attrs && !Func->isVarArg()) {
5303        unsigned InRegCount = 0;
5304        unsigned Idx = 1;
5305
5306        for (FunctionType::param_iterator I = FTy->param_begin(),
5307             E = FTy->param_end(); I != E; ++I, ++Idx)
5308          if (Attrs->paramHasAttr(Idx, ParamAttr::InReg))
5309            // FIXME: should only count parameters that are lowered to integers.
5310            InRegCount += (getTargetData()->getTypeSizeInBits(*I) + 31) / 32;
5311
5312        if (InRegCount > 2) {
5313          cerr << "Nest register in use - reduce number of inreg parameters!\n";
5314          abort();
5315        }
5316      }
5317      break;
5318    }
5319    case CallingConv::X86_FastCall:
5320      // Pass 'nest' parameter in EAX.
5321      // Must be kept in sync with X86CallingConv.td
5322      NestReg = X86::EAX;
5323      break;
5324    }
5325
5326    const X86InstrInfo *TII =
5327      ((X86TargetMachine&)getTargetMachine()).getInstrInfo();
5328
5329    SDOperand OutChains[4];
5330    SDOperand Addr, Disp;
5331
5332    Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(10, MVT::i32));
5333    Disp = DAG.getNode(ISD::SUB, MVT::i32, FPtr, Addr);
5334
5335    unsigned char MOV32ri = TII->getBaseOpcodeFor(X86::MOV32ri);
5336    unsigned char N86Reg  = ((X86RegisterInfo*)RegInfo)->getX86RegNum(NestReg);
5337    OutChains[0] = DAG.getStore(Root, DAG.getConstant(MOV32ri|N86Reg, MVT::i8),
5338                                Trmp, TrmpSV->getValue(), TrmpSV->getOffset());
5339
5340    Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(1, MVT::i32));
5341    OutChains[1] = DAG.getStore(Root, Nest, Addr, TrmpSV->getValue(),
5342                                TrmpSV->getOffset() + 1, false, 1);
5343
5344    unsigned char JMP = TII->getBaseOpcodeFor(X86::JMP);
5345    Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(5, MVT::i32));
5346    OutChains[2] = DAG.getStore(Root, DAG.getConstant(JMP, MVT::i8), Addr,
5347                                TrmpSV->getValue() + 5, TrmpSV->getOffset());
5348
5349    Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(6, MVT::i32));
5350    OutChains[3] = DAG.getStore(Root, Disp, Addr, TrmpSV->getValue(),
5351                                TrmpSV->getOffset() + 6, false, 1);
5352
5353    SDOperand Ops[] =
5354      { Trmp, DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains, 4) };
5355    return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(), Ops, 2);
5356  }
5357}
5358
5359SDOperand X86TargetLowering::LowerFLT_ROUNDS(SDOperand Op, SelectionDAG &DAG) {
5360  /*
5361   The rounding mode is in bits 11:10 of FPSR, and has the following
5362   settings:
5363     00 Round to nearest
5364     01 Round to -inf
5365     10 Round to +inf
5366     11 Round to 0
5367
5368  FLT_ROUNDS, on the other hand, expects the following:
5369    -1 Undefined
5370     0 Round to 0
5371     1 Round to nearest
5372     2 Round to +inf
5373     3 Round to -inf
5374
5375  To perform the conversion, we do:
5376    (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3)
5377  */
5378
5379  MachineFunction &MF = DAG.getMachineFunction();
5380  const TargetMachine &TM = MF.getTarget();
5381  const TargetFrameInfo &TFI = *TM.getFrameInfo();
5382  unsigned StackAlignment = TFI.getStackAlignment();
5383  MVT::ValueType VT = Op.getValueType();
5384
5385  // Save FP Control Word to stack slot
5386  int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment);
5387  SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
5388
5389  SDOperand Chain = DAG.getNode(X86ISD::FNSTCW16m, MVT::Other,
5390                                DAG.getEntryNode(), StackSlot);
5391
5392  // Load FP Control Word from stack slot
5393  SDOperand CWD = DAG.getLoad(MVT::i16, Chain, StackSlot, NULL, 0);
5394
5395  // Transform as necessary
5396  SDOperand CWD1 =
5397    DAG.getNode(ISD::SRL, MVT::i16,
5398                DAG.getNode(ISD::AND, MVT::i16,
5399                            CWD, DAG.getConstant(0x800, MVT::i16)),
5400                DAG.getConstant(11, MVT::i8));
5401  SDOperand CWD2 =
5402    DAG.getNode(ISD::SRL, MVT::i16,
5403                DAG.getNode(ISD::AND, MVT::i16,
5404                            CWD, DAG.getConstant(0x400, MVT::i16)),
5405                DAG.getConstant(9, MVT::i8));
5406
5407  SDOperand RetVal =
5408    DAG.getNode(ISD::AND, MVT::i16,
5409                DAG.getNode(ISD::ADD, MVT::i16,
5410                            DAG.getNode(ISD::OR, MVT::i16, CWD1, CWD2),
5411                            DAG.getConstant(1, MVT::i16)),
5412                DAG.getConstant(3, MVT::i16));
5413
5414
5415  return DAG.getNode((MVT::getSizeInBits(VT) < 16 ?
5416                      ISD::TRUNCATE : ISD::ZERO_EXTEND), VT, RetVal);
5417}
5418
5419SDOperand X86TargetLowering::LowerCTLZ(SDOperand Op, SelectionDAG &DAG) {
5420  MVT::ValueType VT = Op.getValueType();
5421  MVT::ValueType OpVT = VT;
5422  unsigned NumBits = MVT::getSizeInBits(VT);
5423
5424  Op = Op.getOperand(0);
5425  if (VT == MVT::i8) {
5426    // Zero extend to i32 since there is not an i8 bsr.
5427    OpVT = MVT::i32;
5428    Op = DAG.getNode(ISD::ZERO_EXTEND, OpVT, Op);
5429  }
5430
5431  // Issue a bsr (scan bits in reverse) which also sets EFLAGS.
5432  SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
5433  Op = DAG.getNode(X86ISD::BSR, VTs, Op);
5434
5435  // If src is zero (i.e. bsr sets ZF), returns NumBits.
5436  SmallVector<SDOperand, 4> Ops;
5437  Ops.push_back(Op);
5438  Ops.push_back(DAG.getConstant(NumBits+NumBits-1, OpVT));
5439  Ops.push_back(DAG.getConstant(X86::COND_E, MVT::i8));
5440  Ops.push_back(Op.getValue(1));
5441  Op = DAG.getNode(X86ISD::CMOV, OpVT, &Ops[0], 4);
5442
5443  // Finally xor with NumBits-1.
5444  Op = DAG.getNode(ISD::XOR, OpVT, Op, DAG.getConstant(NumBits-1, OpVT));
5445
5446  if (VT == MVT::i8)
5447    Op = DAG.getNode(ISD::TRUNCATE, MVT::i8, Op);
5448  return Op;
5449}
5450
5451SDOperand X86TargetLowering::LowerCTTZ(SDOperand Op, SelectionDAG &DAG) {
5452  MVT::ValueType VT = Op.getValueType();
5453  MVT::ValueType OpVT = VT;
5454  unsigned NumBits = MVT::getSizeInBits(VT);
5455
5456  Op = Op.getOperand(0);
5457  if (VT == MVT::i8) {
5458    OpVT = MVT::i32;
5459    Op = DAG.getNode(ISD::ZERO_EXTEND, OpVT, Op);
5460  }
5461
5462  // Issue a bsf (scan bits forward) which also sets EFLAGS.
5463  SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
5464  Op = DAG.getNode(X86ISD::BSF, VTs, Op);
5465
5466  // If src is zero (i.e. bsf sets ZF), returns NumBits.
5467  SmallVector<SDOperand, 4> Ops;
5468  Ops.push_back(Op);
5469  Ops.push_back(DAG.getConstant(NumBits, OpVT));
5470  Ops.push_back(DAG.getConstant(X86::COND_E, MVT::i8));
5471  Ops.push_back(Op.getValue(1));
5472  Op = DAG.getNode(X86ISD::CMOV, OpVT, &Ops[0], 4);
5473
5474  if (VT == MVT::i8)
5475    Op = DAG.getNode(ISD::TRUNCATE, MVT::i8, Op);
5476  return Op;
5477}
5478
5479/// LowerOperation - Provide custom lowering hooks for some operations.
5480///
5481SDOperand X86TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
5482  switch (Op.getOpcode()) {
5483  default: assert(0 && "Should not custom lower this!");
5484  case ISD::BUILD_VECTOR:       return LowerBUILD_VECTOR(Op, DAG);
5485  case ISD::VECTOR_SHUFFLE:     return LowerVECTOR_SHUFFLE(Op, DAG);
5486  case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
5487  case ISD::INSERT_VECTOR_ELT:  return LowerINSERT_VECTOR_ELT(Op, DAG);
5488  case ISD::SCALAR_TO_VECTOR:   return LowerSCALAR_TO_VECTOR(Op, DAG);
5489  case ISD::ConstantPool:       return LowerConstantPool(Op, DAG);
5490  case ISD::GlobalAddress:      return LowerGlobalAddress(Op, DAG);
5491  case ISD::GlobalTLSAddress:   return LowerGlobalTLSAddress(Op, DAG);
5492  case ISD::ExternalSymbol:     return LowerExternalSymbol(Op, DAG);
5493  case ISD::SHL_PARTS:
5494  case ISD::SRA_PARTS:
5495  case ISD::SRL_PARTS:          return LowerShift(Op, DAG);
5496  case ISD::SINT_TO_FP:         return LowerSINT_TO_FP(Op, DAG);
5497  case ISD::FP_TO_SINT:         return LowerFP_TO_SINT(Op, DAG);
5498  case ISD::FABS:               return LowerFABS(Op, DAG);
5499  case ISD::FNEG:               return LowerFNEG(Op, DAG);
5500  case ISD::FCOPYSIGN:          return LowerFCOPYSIGN(Op, DAG);
5501  case ISD::SETCC:              return LowerSETCC(Op, DAG);
5502  case ISD::SELECT:             return LowerSELECT(Op, DAG);
5503  case ISD::BRCOND:             return LowerBRCOND(Op, DAG);
5504  case ISD::JumpTable:          return LowerJumpTable(Op, DAG);
5505  case ISD::CALL:               return LowerCALL(Op, DAG);
5506  case ISD::RET:                return LowerRET(Op, DAG);
5507  case ISD::FORMAL_ARGUMENTS:   return LowerFORMAL_ARGUMENTS(Op, DAG);
5508  case ISD::MEMSET:             return LowerMEMSET(Op, DAG);
5509  case ISD::MEMCPY:             return LowerMEMCPY(Op, DAG);
5510  case ISD::VASTART:            return LowerVASTART(Op, DAG);
5511  case ISD::VACOPY:             return LowerVACOPY(Op, DAG);
5512  case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
5513  case ISD::RETURNADDR:         return LowerRETURNADDR(Op, DAG);
5514  case ISD::FRAMEADDR:          return LowerFRAMEADDR(Op, DAG);
5515  case ISD::FRAME_TO_ARGS_OFFSET:
5516                                return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
5517  case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
5518  case ISD::EH_RETURN:          return LowerEH_RETURN(Op, DAG);
5519  case ISD::TRAMPOLINE:         return LowerTRAMPOLINE(Op, DAG);
5520  case ISD::FLT_ROUNDS:         return LowerFLT_ROUNDS(Op, DAG);
5521  case ISD::CTLZ:               return LowerCTLZ(Op, DAG);
5522  case ISD::CTTZ:               return LowerCTTZ(Op, DAG);
5523
5524  // FIXME: REMOVE THIS WHEN LegalizeDAGTypes lands.
5525  case ISD::READCYCLECOUNTER:
5526    return SDOperand(ExpandREADCYCLECOUNTER(Op.Val, DAG), 0);
5527  }
5528}
5529
5530/// ExpandOperation - Provide custom lowering hooks for expanding operations.
5531SDNode *X86TargetLowering::ExpandOperationResult(SDNode *N, SelectionDAG &DAG) {
5532  switch (N->getOpcode()) {
5533  default: assert(0 && "Should not custom lower this!");
5534  case ISD::FP_TO_SINT:         return ExpandFP_TO_SINT(N, DAG);
5535  case ISD::READCYCLECOUNTER:   return ExpandREADCYCLECOUNTER(N, DAG);
5536  }
5537}
5538
5539const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
5540  switch (Opcode) {
5541  default: return NULL;
5542  case X86ISD::BSF:                return "X86ISD::BSF";
5543  case X86ISD::BSR:                return "X86ISD::BSR";
5544  case X86ISD::SHLD:               return "X86ISD::SHLD";
5545  case X86ISD::SHRD:               return "X86ISD::SHRD";
5546  case X86ISD::FAND:               return "X86ISD::FAND";
5547  case X86ISD::FOR:                return "X86ISD::FOR";
5548  case X86ISD::FXOR:               return "X86ISD::FXOR";
5549  case X86ISD::FSRL:               return "X86ISD::FSRL";
5550  case X86ISD::FILD:               return "X86ISD::FILD";
5551  case X86ISD::FILD_FLAG:          return "X86ISD::FILD_FLAG";
5552  case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM";
5553  case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM";
5554  case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM";
5555  case X86ISD::FLD:                return "X86ISD::FLD";
5556  case X86ISD::FST:                return "X86ISD::FST";
5557  case X86ISD::FP_GET_RESULT:      return "X86ISD::FP_GET_RESULT";
5558  case X86ISD::FP_SET_RESULT:      return "X86ISD::FP_SET_RESULT";
5559  case X86ISD::CALL:               return "X86ISD::CALL";
5560  case X86ISD::TAILCALL:           return "X86ISD::TAILCALL";
5561  case X86ISD::RDTSC_DAG:          return "X86ISD::RDTSC_DAG";
5562  case X86ISD::CMP:                return "X86ISD::CMP";
5563  case X86ISD::COMI:               return "X86ISD::COMI";
5564  case X86ISD::UCOMI:              return "X86ISD::UCOMI";
5565  case X86ISD::SETCC:              return "X86ISD::SETCC";
5566  case X86ISD::CMOV:               return "X86ISD::CMOV";
5567  case X86ISD::BRCOND:             return "X86ISD::BRCOND";
5568  case X86ISD::RET_FLAG:           return "X86ISD::RET_FLAG";
5569  case X86ISD::REP_STOS:           return "X86ISD::REP_STOS";
5570  case X86ISD::REP_MOVS:           return "X86ISD::REP_MOVS";
5571  case X86ISD::GlobalBaseReg:      return "X86ISD::GlobalBaseReg";
5572  case X86ISD::Wrapper:            return "X86ISD::Wrapper";
5573  case X86ISD::S2VEC:              return "X86ISD::S2VEC";
5574  case X86ISD::PEXTRW:             return "X86ISD::PEXTRW";
5575  case X86ISD::PINSRW:             return "X86ISD::PINSRW";
5576  case X86ISD::FMAX:               return "X86ISD::FMAX";
5577  case X86ISD::FMIN:               return "X86ISD::FMIN";
5578  case X86ISD::FRSQRT:             return "X86ISD::FRSQRT";
5579  case X86ISD::FRCP:               return "X86ISD::FRCP";
5580  case X86ISD::TLSADDR:            return "X86ISD::TLSADDR";
5581  case X86ISD::THREAD_POINTER:     return "X86ISD::THREAD_POINTER";
5582  case X86ISD::EH_RETURN:          return "X86ISD::EH_RETURN";
5583  case X86ISD::TC_RETURN:          return "X86ISD::TC_RETURN";
5584  case X86ISD::FNSTCW16m:          return "X86ISD::FNSTCW16m";
5585  }
5586}
5587
5588// isLegalAddressingMode - Return true if the addressing mode represented
5589// by AM is legal for this target, for a load/store of the specified type.
5590bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM,
5591                                              const Type *Ty) const {
5592  // X86 supports extremely general addressing modes.
5593
5594  // X86 allows a sign-extended 32-bit immediate field as a displacement.
5595  if (AM.BaseOffs <= -(1LL << 32) || AM.BaseOffs >= (1LL << 32)-1)
5596    return false;
5597
5598  if (AM.BaseGV) {
5599    // We can only fold this if we don't need an extra load.
5600    if (Subtarget->GVRequiresExtraLoad(AM.BaseGV, getTargetMachine(), false))
5601      return false;
5602
5603    // X86-64 only supports addr of globals in small code model.
5604    if (Subtarget->is64Bit()) {
5605      if (getTargetMachine().getCodeModel() != CodeModel::Small)
5606        return false;
5607      // If lower 4G is not available, then we must use rip-relative addressing.
5608      if (AM.BaseOffs || AM.Scale > 1)
5609        return false;
5610    }
5611  }
5612
5613  switch (AM.Scale) {
5614  case 0:
5615  case 1:
5616  case 2:
5617  case 4:
5618  case 8:
5619    // These scales always work.
5620    break;
5621  case 3:
5622  case 5:
5623  case 9:
5624    // These scales are formed with basereg+scalereg.  Only accept if there is
5625    // no basereg yet.
5626    if (AM.HasBaseReg)
5627      return false;
5628    break;
5629  default:  // Other stuff never works.
5630    return false;
5631  }
5632
5633  return true;
5634}
5635
5636
5637bool X86TargetLowering::isTruncateFree(const Type *Ty1, const Type *Ty2) const {
5638  if (!Ty1->isInteger() || !Ty2->isInteger())
5639    return false;
5640  unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
5641  unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
5642  if (NumBits1 <= NumBits2)
5643    return false;
5644  return Subtarget->is64Bit() || NumBits1 < 64;
5645}
5646
5647bool X86TargetLowering::isTruncateFree(MVT::ValueType VT1,
5648                                       MVT::ValueType VT2) const {
5649  if (!MVT::isInteger(VT1) || !MVT::isInteger(VT2))
5650    return false;
5651  unsigned NumBits1 = MVT::getSizeInBits(VT1);
5652  unsigned NumBits2 = MVT::getSizeInBits(VT2);
5653  if (NumBits1 <= NumBits2)
5654    return false;
5655  return Subtarget->is64Bit() || NumBits1 < 64;
5656}
5657
5658/// isShuffleMaskLegal - Targets can use this to indicate that they only
5659/// support *some* VECTOR_SHUFFLE operations, those with specific masks.
5660/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
5661/// are assumed to be legal.
5662bool
5663X86TargetLowering::isShuffleMaskLegal(SDOperand Mask, MVT::ValueType VT) const {
5664  // Only do shuffles on 128-bit vector types for now.
5665  if (MVT::getSizeInBits(VT) == 64) return false;
5666  return (Mask.Val->getNumOperands() <= 4 ||
5667          isIdentityMask(Mask.Val) ||
5668          isIdentityMask(Mask.Val, true) ||
5669          isSplatMask(Mask.Val)  ||
5670          isPSHUFHW_PSHUFLWMask(Mask.Val) ||
5671          X86::isUNPCKLMask(Mask.Val) ||
5672          X86::isUNPCKHMask(Mask.Val) ||
5673          X86::isUNPCKL_v_undef_Mask(Mask.Val) ||
5674          X86::isUNPCKH_v_undef_Mask(Mask.Val));
5675}
5676
5677bool X86TargetLowering::isVectorClearMaskLegal(std::vector<SDOperand> &BVOps,
5678                                               MVT::ValueType EVT,
5679                                               SelectionDAG &DAG) const {
5680  unsigned NumElts = BVOps.size();
5681  // Only do shuffles on 128-bit vector types for now.
5682  if (MVT::getSizeInBits(EVT) * NumElts == 64) return false;
5683  if (NumElts == 2) return true;
5684  if (NumElts == 4) {
5685    return (isMOVLMask(&BVOps[0], 4)  ||
5686            isCommutedMOVL(&BVOps[0], 4, true) ||
5687            isSHUFPMask(&BVOps[0], 4) ||
5688            isCommutedSHUFP(&BVOps[0], 4));
5689  }
5690  return false;
5691}
5692
5693//===----------------------------------------------------------------------===//
5694//                           X86 Scheduler Hooks
5695//===----------------------------------------------------------------------===//
5696
5697MachineBasicBlock *
5698X86TargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI,
5699                                           MachineBasicBlock *BB) {
5700  const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
5701  switch (MI->getOpcode()) {
5702  default: assert(false && "Unexpected instr type to insert");
5703  case X86::CMOV_FR32:
5704  case X86::CMOV_FR64:
5705  case X86::CMOV_V4F32:
5706  case X86::CMOV_V2F64:
5707  case X86::CMOV_V2I64: {
5708    // To "insert" a SELECT_CC instruction, we actually have to insert the
5709    // diamond control-flow pattern.  The incoming instruction knows the
5710    // destination vreg to set, the condition code register to branch on, the
5711    // true/false values to select between, and a branch opcode to use.
5712    const BasicBlock *LLVM_BB = BB->getBasicBlock();
5713    ilist<MachineBasicBlock>::iterator It = BB;
5714    ++It;
5715
5716    //  thisMBB:
5717    //  ...
5718    //   TrueVal = ...
5719    //   cmpTY ccX, r1, r2
5720    //   bCC copy1MBB
5721    //   fallthrough --> copy0MBB
5722    MachineBasicBlock *thisMBB = BB;
5723    MachineBasicBlock *copy0MBB = new MachineBasicBlock(LLVM_BB);
5724    MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB);
5725    unsigned Opc =
5726      X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm());
5727    BuildMI(BB, TII->get(Opc)).addMBB(sinkMBB);
5728    MachineFunction *F = BB->getParent();
5729    F->getBasicBlockList().insert(It, copy0MBB);
5730    F->getBasicBlockList().insert(It, sinkMBB);
5731    // Update machine-CFG edges by first adding all successors of the current
5732    // block to the new block which will contain the Phi node for the select.
5733    for(MachineBasicBlock::succ_iterator i = BB->succ_begin(),
5734        e = BB->succ_end(); i != e; ++i)
5735      sinkMBB->addSuccessor(*i);
5736    // Next, remove all successors of the current block, and add the true
5737    // and fallthrough blocks as its successors.
5738    while(!BB->succ_empty())
5739      BB->removeSuccessor(BB->succ_begin());
5740    BB->addSuccessor(copy0MBB);
5741    BB->addSuccessor(sinkMBB);
5742
5743    //  copy0MBB:
5744    //   %FalseValue = ...
5745    //   # fallthrough to sinkMBB
5746    BB = copy0MBB;
5747
5748    // Update machine-CFG edges
5749    BB->addSuccessor(sinkMBB);
5750
5751    //  sinkMBB:
5752    //   %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
5753    //  ...
5754    BB = sinkMBB;
5755    BuildMI(BB, TII->get(X86::PHI), MI->getOperand(0).getReg())
5756      .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB)
5757      .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
5758
5759    delete MI;   // The pseudo instruction is gone now.
5760    return BB;
5761  }
5762
5763  case X86::FP32_TO_INT16_IN_MEM:
5764  case X86::FP32_TO_INT32_IN_MEM:
5765  case X86::FP32_TO_INT64_IN_MEM:
5766  case X86::FP64_TO_INT16_IN_MEM:
5767  case X86::FP64_TO_INT32_IN_MEM:
5768  case X86::FP64_TO_INT64_IN_MEM:
5769  case X86::FP80_TO_INT16_IN_MEM:
5770  case X86::FP80_TO_INT32_IN_MEM:
5771  case X86::FP80_TO_INT64_IN_MEM: {
5772    // Change the floating point control register to use "round towards zero"
5773    // mode when truncating to an integer value.
5774    MachineFunction *F = BB->getParent();
5775    int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2);
5776    addFrameReference(BuildMI(BB, TII->get(X86::FNSTCW16m)), CWFrameIdx);
5777
5778    // Load the old value of the high byte of the control word...
5779    unsigned OldCW =
5780      F->getSSARegMap()->createVirtualRegister(X86::GR16RegisterClass);
5781    addFrameReference(BuildMI(BB, TII->get(X86::MOV16rm), OldCW), CWFrameIdx);
5782
5783    // Set the high part to be round to zero...
5784    addFrameReference(BuildMI(BB, TII->get(X86::MOV16mi)), CWFrameIdx)
5785      .addImm(0xC7F);
5786
5787    // Reload the modified control word now...
5788    addFrameReference(BuildMI(BB, TII->get(X86::FLDCW16m)), CWFrameIdx);
5789
5790    // Restore the memory image of control word to original value
5791    addFrameReference(BuildMI(BB, TII->get(X86::MOV16mr)), CWFrameIdx)
5792      .addReg(OldCW);
5793
5794    // Get the X86 opcode to use.
5795    unsigned Opc;
5796    switch (MI->getOpcode()) {
5797    default: assert(0 && "illegal opcode!");
5798    case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break;
5799    case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break;
5800    case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break;
5801    case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break;
5802    case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break;
5803    case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break;
5804    case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break;
5805    case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break;
5806    case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break;
5807    }
5808
5809    X86AddressMode AM;
5810    MachineOperand &Op = MI->getOperand(0);
5811    if (Op.isRegister()) {
5812      AM.BaseType = X86AddressMode::RegBase;
5813      AM.Base.Reg = Op.getReg();
5814    } else {
5815      AM.BaseType = X86AddressMode::FrameIndexBase;
5816      AM.Base.FrameIndex = Op.getFrameIndex();
5817    }
5818    Op = MI->getOperand(1);
5819    if (Op.isImmediate())
5820      AM.Scale = Op.getImm();
5821    Op = MI->getOperand(2);
5822    if (Op.isImmediate())
5823      AM.IndexReg = Op.getImm();
5824    Op = MI->getOperand(3);
5825    if (Op.isGlobalAddress()) {
5826      AM.GV = Op.getGlobal();
5827    } else {
5828      AM.Disp = Op.getImm();
5829    }
5830    addFullAddress(BuildMI(BB, TII->get(Opc)), AM)
5831                      .addReg(MI->getOperand(4).getReg());
5832
5833    // Reload the original control word now.
5834    addFrameReference(BuildMI(BB, TII->get(X86::FLDCW16m)), CWFrameIdx);
5835
5836    delete MI;   // The pseudo instruction is gone now.
5837    return BB;
5838  }
5839  }
5840}
5841
5842//===----------------------------------------------------------------------===//
5843//                           X86 Optimization Hooks
5844//===----------------------------------------------------------------------===//
5845
5846void X86TargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op,
5847                                                       uint64_t Mask,
5848                                                       uint64_t &KnownZero,
5849                                                       uint64_t &KnownOne,
5850                                                       const SelectionDAG &DAG,
5851                                                       unsigned Depth) const {
5852  unsigned Opc = Op.getOpcode();
5853  assert((Opc >= ISD::BUILTIN_OP_END ||
5854          Opc == ISD::INTRINSIC_WO_CHAIN ||
5855          Opc == ISD::INTRINSIC_W_CHAIN ||
5856          Opc == ISD::INTRINSIC_VOID) &&
5857         "Should use MaskedValueIsZero if you don't know whether Op"
5858         " is a target node!");
5859
5860  KnownZero = KnownOne = 0;   // Don't know anything.
5861  switch (Opc) {
5862  default: break;
5863  case X86ISD::SETCC:
5864    KnownZero |= (MVT::getIntVTBitMask(Op.getValueType()) ^ 1ULL);
5865    break;
5866  }
5867}
5868
5869/// getShuffleScalarElt - Returns the scalar element that will make up the ith
5870/// element of the result of the vector shuffle.
5871static SDOperand getShuffleScalarElt(SDNode *N, unsigned i, SelectionDAG &DAG) {
5872  MVT::ValueType VT = N->getValueType(0);
5873  SDOperand PermMask = N->getOperand(2);
5874  unsigned NumElems = PermMask.getNumOperands();
5875  SDOperand V = (i < NumElems) ? N->getOperand(0) : N->getOperand(1);
5876  i %= NumElems;
5877  if (V.getOpcode() == ISD::SCALAR_TO_VECTOR) {
5878    return (i == 0)
5879     ? V.getOperand(0) : DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(VT));
5880  } else if (V.getOpcode() == ISD::VECTOR_SHUFFLE) {
5881    SDOperand Idx = PermMask.getOperand(i);
5882    if (Idx.getOpcode() == ISD::UNDEF)
5883      return DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(VT));
5884    return getShuffleScalarElt(V.Val,cast<ConstantSDNode>(Idx)->getValue(),DAG);
5885  }
5886  return SDOperand();
5887}
5888
5889/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the
5890/// node is a GlobalAddress + an offset.
5891static bool isGAPlusOffset(SDNode *N, GlobalValue* &GA, int64_t &Offset) {
5892  unsigned Opc = N->getOpcode();
5893  if (Opc == X86ISD::Wrapper) {
5894    if (dyn_cast<GlobalAddressSDNode>(N->getOperand(0))) {
5895      GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal();
5896      return true;
5897    }
5898  } else if (Opc == ISD::ADD) {
5899    SDOperand N1 = N->getOperand(0);
5900    SDOperand N2 = N->getOperand(1);
5901    if (isGAPlusOffset(N1.Val, GA, Offset)) {
5902      ConstantSDNode *V = dyn_cast<ConstantSDNode>(N2);
5903      if (V) {
5904        Offset += V->getSignExtended();
5905        return true;
5906      }
5907    } else if (isGAPlusOffset(N2.Val, GA, Offset)) {
5908      ConstantSDNode *V = dyn_cast<ConstantSDNode>(N1);
5909      if (V) {
5910        Offset += V->getSignExtended();
5911        return true;
5912      }
5913    }
5914  }
5915  return false;
5916}
5917
5918/// isConsecutiveLoad - Returns true if N is loading from an address of Base
5919/// + Dist * Size.
5920static bool isConsecutiveLoad(SDNode *N, SDNode *Base, int Dist, int Size,
5921                              MachineFrameInfo *MFI) {
5922  if (N->getOperand(0).Val != Base->getOperand(0).Val)
5923    return false;
5924
5925  SDOperand Loc = N->getOperand(1);
5926  SDOperand BaseLoc = Base->getOperand(1);
5927  if (Loc.getOpcode() == ISD::FrameIndex) {
5928    if (BaseLoc.getOpcode() != ISD::FrameIndex)
5929      return false;
5930    int FI  = cast<FrameIndexSDNode>(Loc)->getIndex();
5931    int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex();
5932    int FS  = MFI->getObjectSize(FI);
5933    int BFS = MFI->getObjectSize(BFI);
5934    if (FS != BFS || FS != Size) return false;
5935    return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Size);
5936  } else {
5937    GlobalValue *GV1 = NULL;
5938    GlobalValue *GV2 = NULL;
5939    int64_t Offset1 = 0;
5940    int64_t Offset2 = 0;
5941    bool isGA1 = isGAPlusOffset(Loc.Val, GV1, Offset1);
5942    bool isGA2 = isGAPlusOffset(BaseLoc.Val, GV2, Offset2);
5943    if (isGA1 && isGA2 && GV1 == GV2)
5944      return Offset1 == (Offset2 + Dist*Size);
5945  }
5946
5947  return false;
5948}
5949
5950static bool isBaseAlignment16(SDNode *Base, MachineFrameInfo *MFI,
5951                              const X86Subtarget *Subtarget) {
5952  GlobalValue *GV;
5953  int64_t Offset;
5954  if (isGAPlusOffset(Base, GV, Offset))
5955    return (GV->getAlignment() >= 16 && (Offset % 16) == 0);
5956  else {
5957    assert(Base->getOpcode() == ISD::FrameIndex && "Unexpected base node!");
5958    int BFI = cast<FrameIndexSDNode>(Base)->getIndex();
5959    if (BFI < 0)
5960      // Fixed objects do not specify alignment, however the offsets are known.
5961      return ((Subtarget->getStackAlignment() % 16) == 0 &&
5962              (MFI->getObjectOffset(BFI) % 16) == 0);
5963    else
5964      return MFI->getObjectAlignment(BFI) >= 16;
5965  }
5966  return false;
5967}
5968
5969
5970/// PerformShuffleCombine - Combine a vector_shuffle that is equal to
5971/// build_vector load1, load2, load3, load4, <0, 1, 2, 3> into a 128-bit load
5972/// if the load addresses are consecutive, non-overlapping, and in the right
5973/// order.
5974static SDOperand PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
5975                                       const X86Subtarget *Subtarget) {
5976  MachineFunction &MF = DAG.getMachineFunction();
5977  MachineFrameInfo *MFI = MF.getFrameInfo();
5978  MVT::ValueType VT = N->getValueType(0);
5979  MVT::ValueType EVT = MVT::getVectorElementType(VT);
5980  SDOperand PermMask = N->getOperand(2);
5981  int NumElems = (int)PermMask.getNumOperands();
5982  SDNode *Base = NULL;
5983  for (int i = 0; i < NumElems; ++i) {
5984    SDOperand Idx = PermMask.getOperand(i);
5985    if (Idx.getOpcode() == ISD::UNDEF) {
5986      if (!Base) return SDOperand();
5987    } else {
5988      SDOperand Arg =
5989        getShuffleScalarElt(N, cast<ConstantSDNode>(Idx)->getValue(), DAG);
5990      if (!Arg.Val || !ISD::isNON_EXTLoad(Arg.Val))
5991        return SDOperand();
5992      if (!Base)
5993        Base = Arg.Val;
5994      else if (!isConsecutiveLoad(Arg.Val, Base,
5995                                  i, MVT::getSizeInBits(EVT)/8,MFI))
5996        return SDOperand();
5997    }
5998  }
5999
6000  bool isAlign16 = isBaseAlignment16(Base->getOperand(1).Val, MFI, Subtarget);
6001  LoadSDNode *LD = cast<LoadSDNode>(Base);
6002  if (isAlign16) {
6003    return DAG.getLoad(VT, LD->getChain(), LD->getBasePtr(), LD->getSrcValue(),
6004                       LD->getSrcValueOffset(), LD->isVolatile());
6005  } else {
6006    return DAG.getLoad(VT, LD->getChain(), LD->getBasePtr(), LD->getSrcValue(),
6007                       LD->getSrcValueOffset(), LD->isVolatile(),
6008                       LD->getAlignment());
6009  }
6010}
6011
6012/// PerformSELECTCombine - Do target-specific dag combines on SELECT nodes.
6013static SDOperand PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
6014                                      const X86Subtarget *Subtarget) {
6015  SDOperand Cond = N->getOperand(0);
6016
6017  // If we have SSE[12] support, try to form min/max nodes.
6018  if (Subtarget->hasSSE2() &&
6019      (N->getValueType(0) == MVT::f32 || N->getValueType(0) == MVT::f64)) {
6020    if (Cond.getOpcode() == ISD::SETCC) {
6021      // Get the LHS/RHS of the select.
6022      SDOperand LHS = N->getOperand(1);
6023      SDOperand RHS = N->getOperand(2);
6024      ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
6025
6026      unsigned Opcode = 0;
6027      if (LHS == Cond.getOperand(0) && RHS == Cond.getOperand(1)) {
6028        switch (CC) {
6029        default: break;
6030        case ISD::SETOLE: // (X <= Y) ? X : Y -> min
6031        case ISD::SETULE:
6032        case ISD::SETLE:
6033          if (!UnsafeFPMath) break;
6034          // FALL THROUGH.
6035        case ISD::SETOLT:  // (X olt/lt Y) ? X : Y -> min
6036        case ISD::SETLT:
6037          Opcode = X86ISD::FMIN;
6038          break;
6039
6040        case ISD::SETOGT: // (X > Y) ? X : Y -> max
6041        case ISD::SETUGT:
6042        case ISD::SETGT:
6043          if (!UnsafeFPMath) break;
6044          // FALL THROUGH.
6045        case ISD::SETUGE:  // (X uge/ge Y) ? X : Y -> max
6046        case ISD::SETGE:
6047          Opcode = X86ISD::FMAX;
6048          break;
6049        }
6050      } else if (LHS == Cond.getOperand(1) && RHS == Cond.getOperand(0)) {
6051        switch (CC) {
6052        default: break;
6053        case ISD::SETOGT: // (X > Y) ? Y : X -> min
6054        case ISD::SETUGT:
6055        case ISD::SETGT:
6056          if (!UnsafeFPMath) break;
6057          // FALL THROUGH.
6058        case ISD::SETUGE:  // (X uge/ge Y) ? Y : X -> min
6059        case ISD::SETGE:
6060          Opcode = X86ISD::FMIN;
6061          break;
6062
6063        case ISD::SETOLE:   // (X <= Y) ? Y : X -> max
6064        case ISD::SETULE:
6065        case ISD::SETLE:
6066          if (!UnsafeFPMath) break;
6067          // FALL THROUGH.
6068        case ISD::SETOLT:   // (X olt/lt Y) ? Y : X -> max
6069        case ISD::SETLT:
6070          Opcode = X86ISD::FMAX;
6071          break;
6072        }
6073      }
6074
6075      if (Opcode)
6076        return DAG.getNode(Opcode, N->getValueType(0), LHS, RHS);
6077    }
6078
6079  }
6080
6081  return SDOperand();
6082}
6083
6084
6085SDOperand X86TargetLowering::PerformDAGCombine(SDNode *N,
6086                                               DAGCombinerInfo &DCI) const {
6087  SelectionDAG &DAG = DCI.DAG;
6088  switch (N->getOpcode()) {
6089  default: break;
6090  case ISD::VECTOR_SHUFFLE:
6091    return PerformShuffleCombine(N, DAG, Subtarget);
6092  case ISD::SELECT:
6093    return PerformSELECTCombine(N, DAG, Subtarget);
6094  }
6095
6096  return SDOperand();
6097}
6098
6099//===----------------------------------------------------------------------===//
6100//                           X86 Inline Assembly Support
6101//===----------------------------------------------------------------------===//
6102
6103/// getConstraintType - Given a constraint letter, return the type of
6104/// constraint it is for this target.
6105X86TargetLowering::ConstraintType
6106X86TargetLowering::getConstraintType(const std::string &Constraint) const {
6107  if (Constraint.size() == 1) {
6108    switch (Constraint[0]) {
6109    case 'A':
6110    case 'r':
6111    case 'R':
6112    case 'l':
6113    case 'q':
6114    case 'Q':
6115    case 'x':
6116    case 'Y':
6117      return C_RegisterClass;
6118    default:
6119      break;
6120    }
6121  }
6122  return TargetLowering::getConstraintType(Constraint);
6123}
6124
6125/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
6126/// vector.  If it is invalid, don't add anything to Ops.
6127void X86TargetLowering::LowerAsmOperandForConstraint(SDOperand Op,
6128                                                     char Constraint,
6129                                                     std::vector<SDOperand>&Ops,
6130                                                     SelectionDAG &DAG) {
6131  SDOperand Result(0, 0);
6132
6133  switch (Constraint) {
6134  default: break;
6135  case 'I':
6136    if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
6137      if (C->getValue() <= 31) {
6138        Result = DAG.getTargetConstant(C->getValue(), Op.getValueType());
6139        break;
6140      }
6141    }
6142    return;
6143  case 'N':
6144    if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
6145      if (C->getValue() <= 255) {
6146        Result = DAG.getTargetConstant(C->getValue(), Op.getValueType());
6147        break;
6148      }
6149    }
6150    return;
6151  case 'i': {
6152    // Literal immediates are always ok.
6153    if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) {
6154      Result = DAG.getTargetConstant(CST->getValue(), Op.getValueType());
6155      break;
6156    }
6157
6158    // If we are in non-pic codegen mode, we allow the address of a global (with
6159    // an optional displacement) to be used with 'i'.
6160    GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op);
6161    int64_t Offset = 0;
6162
6163    // Match either (GA) or (GA+C)
6164    if (GA) {
6165      Offset = GA->getOffset();
6166    } else if (Op.getOpcode() == ISD::ADD) {
6167      ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
6168      GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0));
6169      if (C && GA) {
6170        Offset = GA->getOffset()+C->getValue();
6171      } else {
6172        C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
6173        GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0));
6174        if (C && GA)
6175          Offset = GA->getOffset()+C->getValue();
6176        else
6177          C = 0, GA = 0;
6178      }
6179    }
6180
6181    if (GA) {
6182      // If addressing this global requires a load (e.g. in PIC mode), we can't
6183      // match.
6184      if (Subtarget->GVRequiresExtraLoad(GA->getGlobal(), getTargetMachine(),
6185                                         false))
6186        return;
6187
6188      Op = DAG.getTargetGlobalAddress(GA->getGlobal(), GA->getValueType(0),
6189                                      Offset);
6190      Result = Op;
6191      break;
6192    }
6193
6194    // Otherwise, not valid for this mode.
6195    return;
6196  }
6197  }
6198
6199  if (Result.Val) {
6200    Ops.push_back(Result);
6201    return;
6202  }
6203  return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
6204}
6205
6206std::vector<unsigned> X86TargetLowering::
6207getRegClassForInlineAsmConstraint(const std::string &Constraint,
6208                                  MVT::ValueType VT) const {
6209  if (Constraint.size() == 1) {
6210    // FIXME: not handling fp-stack yet!
6211    switch (Constraint[0]) {      // GCC X86 Constraint Letters
6212    default: break;  // Unknown constraint letter
6213    case 'A':   // EAX/EDX
6214      if (VT == MVT::i32 || VT == MVT::i64)
6215        return make_vector<unsigned>(X86::EAX, X86::EDX, 0);
6216      break;
6217    case 'q':   // Q_REGS (GENERAL_REGS in 64-bit mode)
6218    case 'Q':   // Q_REGS
6219      if (VT == MVT::i32)
6220        return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX, 0);
6221      else if (VT == MVT::i16)
6222        return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX, 0);
6223      else if (VT == MVT::i8)
6224        return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::BL, 0);
6225      else if (VT == MVT::i64)
6226        return make_vector<unsigned>(X86::RAX, X86::RDX, X86::RCX, X86::RBX, 0);
6227      break;
6228    }
6229  }
6230
6231  return std::vector<unsigned>();
6232}
6233
6234std::pair<unsigned, const TargetRegisterClass*>
6235X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
6236                                                MVT::ValueType VT) const {
6237  // First, see if this is a constraint that directly corresponds to an LLVM
6238  // register class.
6239  if (Constraint.size() == 1) {
6240    // GCC Constraint Letters
6241    switch (Constraint[0]) {
6242    default: break;
6243    case 'r':   // GENERAL_REGS
6244    case 'R':   // LEGACY_REGS
6245    case 'l':   // INDEX_REGS
6246      if (VT == MVT::i64 && Subtarget->is64Bit())
6247        return std::make_pair(0U, X86::GR64RegisterClass);
6248      if (VT == MVT::i32)
6249        return std::make_pair(0U, X86::GR32RegisterClass);
6250      else if (VT == MVT::i16)
6251        return std::make_pair(0U, X86::GR16RegisterClass);
6252      else if (VT == MVT::i8)
6253        return std::make_pair(0U, X86::GR8RegisterClass);
6254      break;
6255    case 'y':   // MMX_REGS if MMX allowed.
6256      if (!Subtarget->hasMMX()) break;
6257      return std::make_pair(0U, X86::VR64RegisterClass);
6258      break;
6259    case 'Y':   // SSE_REGS if SSE2 allowed
6260      if (!Subtarget->hasSSE2()) break;
6261      // FALL THROUGH.
6262    case 'x':   // SSE_REGS if SSE1 allowed
6263      if (!Subtarget->hasSSE1()) break;
6264
6265      switch (VT) {
6266      default: break;
6267      // Scalar SSE types.
6268      case MVT::f32:
6269      case MVT::i32:
6270        return std::make_pair(0U, X86::FR32RegisterClass);
6271      case MVT::f64:
6272      case MVT::i64:
6273        return std::make_pair(0U, X86::FR64RegisterClass);
6274      // Vector types.
6275      case MVT::v16i8:
6276      case MVT::v8i16:
6277      case MVT::v4i32:
6278      case MVT::v2i64:
6279      case MVT::v4f32:
6280      case MVT::v2f64:
6281        return std::make_pair(0U, X86::VR128RegisterClass);
6282      }
6283      break;
6284    }
6285  }
6286
6287  // Use the default implementation in TargetLowering to convert the register
6288  // constraint into a member of a register class.
6289  std::pair<unsigned, const TargetRegisterClass*> Res;
6290  Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
6291
6292  // Not found as a standard register?
6293  if (Res.second == 0) {
6294    // GCC calls "st(0)" just plain "st".
6295    if (StringsEqualNoCase("{st}", Constraint)) {
6296      Res.first = X86::ST0;
6297      Res.second = X86::RFP80RegisterClass;
6298    }
6299
6300    return Res;
6301  }
6302
6303  // Otherwise, check to see if this is a register class of the wrong value
6304  // type.  For example, we want to map "{ax},i32" -> {eax}, we don't want it to
6305  // turn into {ax},{dx}.
6306  if (Res.second->hasType(VT))
6307    return Res;   // Correct type already, nothing to do.
6308
6309  // All of the single-register GCC register classes map their values onto
6310  // 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp".  If we
6311  // really want an 8-bit or 32-bit register, map to the appropriate register
6312  // class and return the appropriate register.
6313  if (Res.second != X86::GR16RegisterClass)
6314    return Res;
6315
6316  if (VT == MVT::i8) {
6317    unsigned DestReg = 0;
6318    switch (Res.first) {
6319    default: break;
6320    case X86::AX: DestReg = X86::AL; break;
6321    case X86::DX: DestReg = X86::DL; break;
6322    case X86::CX: DestReg = X86::CL; break;
6323    case X86::BX: DestReg = X86::BL; break;
6324    }
6325    if (DestReg) {
6326      Res.first = DestReg;
6327      Res.second = Res.second = X86::GR8RegisterClass;
6328    }
6329  } else if (VT == MVT::i32) {
6330    unsigned DestReg = 0;
6331    switch (Res.first) {
6332    default: break;
6333    case X86::AX: DestReg = X86::EAX; break;
6334    case X86::DX: DestReg = X86::EDX; break;
6335    case X86::CX: DestReg = X86::ECX; break;
6336    case X86::BX: DestReg = X86::EBX; break;
6337    case X86::SI: DestReg = X86::ESI; break;
6338    case X86::DI: DestReg = X86::EDI; break;
6339    case X86::BP: DestReg = X86::EBP; break;
6340    case X86::SP: DestReg = X86::ESP; break;
6341    }
6342    if (DestReg) {
6343      Res.first = DestReg;
6344      Res.second = Res.second = X86::GR32RegisterClass;
6345    }
6346  } else if (VT == MVT::i64) {
6347    unsigned DestReg = 0;
6348    switch (Res.first) {
6349    default: break;
6350    case X86::AX: DestReg = X86::RAX; break;
6351    case X86::DX: DestReg = X86::RDX; break;
6352    case X86::CX: DestReg = X86::RCX; break;
6353    case X86::BX: DestReg = X86::RBX; break;
6354    case X86::SI: DestReg = X86::RSI; break;
6355    case X86::DI: DestReg = X86::RDI; break;
6356    case X86::BP: DestReg = X86::RBP; break;
6357    case X86::SP: DestReg = X86::RSP; break;
6358    }
6359    if (DestReg) {
6360      Res.first = DestReg;
6361      Res.second = Res.second = X86::GR64RegisterClass;
6362    }
6363  }
6364
6365  return Res;
6366}
6367