X86ISelLowering.cpp revision 22aaf1d61c6a752d66f7ee10a7a5d99c7160e007
1//===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file was developed by Chris Lattner and is distributed under
6// the University of Illinois Open Source License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines the interfaces that X86 uses to lower LLVM code into a
11// selection DAG.
12//
13//===----------------------------------------------------------------------===//
14
15#include "X86.h"
16#include "X86InstrBuilder.h"
17#include "X86ISelLowering.h"
18#include "X86MachineFunctionInfo.h"
19#include "X86TargetMachine.h"
20#include "llvm/CallingConv.h"
21#include "llvm/Constants.h"
22#include "llvm/DerivedTypes.h"
23#include "llvm/Function.h"
24#include "llvm/Intrinsics.h"
25#include "llvm/ADT/VectorExtras.h"
26#include "llvm/Analysis/ScalarEvolutionExpressions.h"
27#include "llvm/CodeGen/MachineFrameInfo.h"
28#include "llvm/CodeGen/MachineFunction.h"
29#include "llvm/CodeGen/MachineInstrBuilder.h"
30#include "llvm/CodeGen/SelectionDAG.h"
31#include "llvm/CodeGen/SSARegMap.h"
32#include "llvm/Support/MathExtras.h"
33#include "llvm/Target/TargetOptions.h"
34#include "llvm/Support/CommandLine.h"
35#include "llvm/ADT/StringExtras.h"
36using namespace llvm;
37
38// FIXME: temporary.
39static cl::opt<bool> EnableFastCC("enable-x86-fastcc", cl::Hidden,
40                                  cl::desc("Enable fastcc on X86"));
41X86TargetLowering::X86TargetLowering(TargetMachine &TM)
42  : TargetLowering(TM) {
43  Subtarget = &TM.getSubtarget<X86Subtarget>();
44  X86ScalarSSE = Subtarget->hasSSE2();
45  X86StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP;
46
47  // Set up the TargetLowering object.
48
49  // X86 is weird, it always uses i8 for shift amounts and setcc results.
50  setShiftAmountType(MVT::i8);
51  setSetCCResultType(MVT::i8);
52  setSetCCResultContents(ZeroOrOneSetCCResult);
53  setSchedulingPreference(SchedulingForRegPressure);
54  setShiftAmountFlavor(Mask);   // shl X, 32 == shl X, 0
55  setStackPointerRegisterToSaveRestore(X86StackPtr);
56
57  if (!Subtarget->isTargetDarwin())
58    // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp.
59    setUseUnderscoreSetJmpLongJmp(true);
60
61  // Add legal addressing mode scale values.
62  addLegalAddressScale(8);
63  addLegalAddressScale(4);
64  addLegalAddressScale(2);
65  // Enter the ones which require both scale + index last. These are more
66  // expensive.
67  addLegalAddressScale(9);
68  addLegalAddressScale(5);
69  addLegalAddressScale(3);
70
71  // Set up the register classes.
72  addRegisterClass(MVT::i8, X86::GR8RegisterClass);
73  addRegisterClass(MVT::i16, X86::GR16RegisterClass);
74  addRegisterClass(MVT::i32, X86::GR32RegisterClass);
75  if (Subtarget->is64Bit())
76    addRegisterClass(MVT::i64, X86::GR64RegisterClass);
77
78  setLoadXAction(ISD::SEXTLOAD, MVT::i1, Expand);
79
80  // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
81  // operation.
82  setOperationAction(ISD::UINT_TO_FP       , MVT::i1   , Promote);
83  setOperationAction(ISD::UINT_TO_FP       , MVT::i8   , Promote);
84  setOperationAction(ISD::UINT_TO_FP       , MVT::i16  , Promote);
85
86  if (Subtarget->is64Bit()) {
87    setOperationAction(ISD::UINT_TO_FP     , MVT::i64  , Expand);
88    setOperationAction(ISD::UINT_TO_FP     , MVT::i32  , Promote);
89  } else {
90    if (X86ScalarSSE)
91      // If SSE i64 SINT_TO_FP is not available, expand i32 UINT_TO_FP.
92      setOperationAction(ISD::UINT_TO_FP   , MVT::i32  , Expand);
93    else
94      setOperationAction(ISD::UINT_TO_FP   , MVT::i32  , Promote);
95  }
96
97  // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
98  // this operation.
99  setOperationAction(ISD::SINT_TO_FP       , MVT::i1   , Promote);
100  setOperationAction(ISD::SINT_TO_FP       , MVT::i8   , Promote);
101  // SSE has no i16 to fp conversion, only i32
102  if (X86ScalarSSE)
103    setOperationAction(ISD::SINT_TO_FP     , MVT::i16  , Promote);
104  else {
105    setOperationAction(ISD::SINT_TO_FP     , MVT::i16  , Custom);
106    setOperationAction(ISD::SINT_TO_FP     , MVT::i32  , Custom);
107  }
108
109  if (!Subtarget->is64Bit()) {
110    // Custom lower SINT_TO_FP and FP_TO_SINT from/to i64 in 32-bit mode.
111    setOperationAction(ISD::SINT_TO_FP     , MVT::i64  , Custom);
112    setOperationAction(ISD::FP_TO_SINT     , MVT::i64  , Custom);
113  }
114
115  // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
116  // this operation.
117  setOperationAction(ISD::FP_TO_SINT       , MVT::i1   , Promote);
118  setOperationAction(ISD::FP_TO_SINT       , MVT::i8   , Promote);
119
120  if (X86ScalarSSE) {
121    setOperationAction(ISD::FP_TO_SINT     , MVT::i16  , Promote);
122  } else {
123    setOperationAction(ISD::FP_TO_SINT     , MVT::i16  , Custom);
124    setOperationAction(ISD::FP_TO_SINT     , MVT::i32  , Custom);
125  }
126
127  // Handle FP_TO_UINT by promoting the destination to a larger signed
128  // conversion.
129  setOperationAction(ISD::FP_TO_UINT       , MVT::i1   , Promote);
130  setOperationAction(ISD::FP_TO_UINT       , MVT::i8   , Promote);
131  setOperationAction(ISD::FP_TO_UINT       , MVT::i16  , Promote);
132
133  if (Subtarget->is64Bit()) {
134    setOperationAction(ISD::FP_TO_UINT     , MVT::i64  , Expand);
135    setOperationAction(ISD::FP_TO_UINT     , MVT::i32  , Promote);
136  } else {
137    if (X86ScalarSSE && !Subtarget->hasSSE3())
138      // Expand FP_TO_UINT into a select.
139      // FIXME: We would like to use a Custom expander here eventually to do
140      // the optimal thing for SSE vs. the default expansion in the legalizer.
141      setOperationAction(ISD::FP_TO_UINT   , MVT::i32  , Expand);
142    else
143      // With SSE3 we can use fisttpll to convert to a signed i64.
144      setOperationAction(ISD::FP_TO_UINT   , MVT::i32  , Promote);
145  }
146
147  setOperationAction(ISD::BIT_CONVERT      , MVT::f32  , Expand);
148  setOperationAction(ISD::BIT_CONVERT      , MVT::i32  , Expand);
149
150  setOperationAction(ISD::BR_JT            , MVT::Other, Expand);
151  setOperationAction(ISD::BRCOND           , MVT::Other, Custom);
152  setOperationAction(ISD::BR_CC            , MVT::Other, Expand);
153  setOperationAction(ISD::SELECT_CC        , MVT::Other, Expand);
154  setOperationAction(ISD::MEMMOVE          , MVT::Other, Expand);
155  if (Subtarget->is64Bit())
156    setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Expand);
157  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16  , Expand);
158  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8   , Expand);
159  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1   , Expand);
160  setOperationAction(ISD::FP_ROUND_INREG   , MVT::f32  , Expand);
161  setOperationAction(ISD::FREM             , MVT::f64  , Expand);
162
163  setOperationAction(ISD::CTPOP            , MVT::i8   , Expand);
164  setOperationAction(ISD::CTTZ             , MVT::i8   , Expand);
165  setOperationAction(ISD::CTLZ             , MVT::i8   , Expand);
166  setOperationAction(ISD::CTPOP            , MVT::i16  , Expand);
167  setOperationAction(ISD::CTTZ             , MVT::i16  , Expand);
168  setOperationAction(ISD::CTLZ             , MVT::i16  , Expand);
169  setOperationAction(ISD::CTPOP            , MVT::i32  , Expand);
170  setOperationAction(ISD::CTTZ             , MVT::i32  , Expand);
171  setOperationAction(ISD::CTLZ             , MVT::i32  , Expand);
172  if (Subtarget->is64Bit()) {
173    setOperationAction(ISD::CTPOP          , MVT::i64  , Expand);
174    setOperationAction(ISD::CTTZ           , MVT::i64  , Expand);
175    setOperationAction(ISD::CTLZ           , MVT::i64  , Expand);
176  }
177
178  setOperationAction(ISD::READCYCLECOUNTER , MVT::i64  , Custom);
179  setOperationAction(ISD::BSWAP            , MVT::i16  , Expand);
180
181  // These should be promoted to a larger select which is supported.
182  setOperationAction(ISD::SELECT           , MVT::i1   , Promote);
183  setOperationAction(ISD::SELECT           , MVT::i8   , Promote);
184  // X86 wants to expand cmov itself.
185  setOperationAction(ISD::SELECT          , MVT::i16  , Custom);
186  setOperationAction(ISD::SELECT          , MVT::i32  , Custom);
187  setOperationAction(ISD::SELECT          , MVT::f32  , Custom);
188  setOperationAction(ISD::SELECT          , MVT::f64  , Custom);
189  setOperationAction(ISD::SETCC           , MVT::i8   , Custom);
190  setOperationAction(ISD::SETCC           , MVT::i16  , Custom);
191  setOperationAction(ISD::SETCC           , MVT::i32  , Custom);
192  setOperationAction(ISD::SETCC           , MVT::f32  , Custom);
193  setOperationAction(ISD::SETCC           , MVT::f64  , Custom);
194  if (Subtarget->is64Bit()) {
195    setOperationAction(ISD::SELECT        , MVT::i64  , Custom);
196    setOperationAction(ISD::SETCC         , MVT::i64  , Custom);
197  }
198  // X86 ret instruction may pop stack.
199  setOperationAction(ISD::RET             , MVT::Other, Custom);
200  // Darwin ABI issue.
201  setOperationAction(ISD::ConstantPool    , MVT::i32  , Custom);
202  setOperationAction(ISD::JumpTable       , MVT::i32  , Custom);
203  setOperationAction(ISD::GlobalAddress   , MVT::i32  , Custom);
204  setOperationAction(ISD::ExternalSymbol  , MVT::i32  , Custom);
205  if (Subtarget->is64Bit()) {
206    setOperationAction(ISD::ConstantPool  , MVT::i64  , Custom);
207    setOperationAction(ISD::JumpTable     , MVT::i64  , Custom);
208    setOperationAction(ISD::GlobalAddress , MVT::i64  , Custom);
209    setOperationAction(ISD::ExternalSymbol, MVT::i64  , Custom);
210  }
211  // 64-bit addm sub, shl, sra, srl (iff 32-bit x86)
212  setOperationAction(ISD::SHL_PARTS       , MVT::i32  , Custom);
213  setOperationAction(ISD::SRA_PARTS       , MVT::i32  , Custom);
214  setOperationAction(ISD::SRL_PARTS       , MVT::i32  , Custom);
215  // X86 wants to expand memset / memcpy itself.
216  setOperationAction(ISD::MEMSET          , MVT::Other, Custom);
217  setOperationAction(ISD::MEMCPY          , MVT::Other, Custom);
218
219  // We don't have line number support yet.
220  setOperationAction(ISD::LOCATION, MVT::Other, Expand);
221  setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
222  // FIXME - use subtarget debug flags
223  if (!Subtarget->isTargetDarwin() &&
224      !Subtarget->isTargetELF() &&
225      !Subtarget->isTargetCygwin())
226    setOperationAction(ISD::DEBUG_LABEL, MVT::Other, Expand);
227
228  // VASTART needs to be custom lowered to use the VarArgsFrameIndex
229  setOperationAction(ISD::VASTART           , MVT::Other, Custom);
230
231  // Use the default implementation.
232  setOperationAction(ISD::VAARG             , MVT::Other, Expand);
233  setOperationAction(ISD::VACOPY            , MVT::Other, Expand);
234  setOperationAction(ISD::VAEND             , MVT::Other, Expand);
235  setOperationAction(ISD::STACKSAVE,          MVT::Other, Expand);
236  setOperationAction(ISD::STACKRESTORE,       MVT::Other, Expand);
237  if (Subtarget->is64Bit())
238    setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand);
239  setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32  , Expand);
240
241  setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
242  setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
243
244  if (X86ScalarSSE) {
245    // Set up the FP register classes.
246    addRegisterClass(MVT::f32, X86::FR32RegisterClass);
247    addRegisterClass(MVT::f64, X86::FR64RegisterClass);
248
249    // Use ANDPD to simulate FABS.
250    setOperationAction(ISD::FABS , MVT::f64, Custom);
251    setOperationAction(ISD::FABS , MVT::f32, Custom);
252
253    // Use XORP to simulate FNEG.
254    setOperationAction(ISD::FNEG , MVT::f64, Custom);
255    setOperationAction(ISD::FNEG , MVT::f32, Custom);
256
257    // We don't support sin/cos/fmod
258    setOperationAction(ISD::FSIN , MVT::f64, Expand);
259    setOperationAction(ISD::FCOS , MVT::f64, Expand);
260    setOperationAction(ISD::FREM , MVT::f64, Expand);
261    setOperationAction(ISD::FSIN , MVT::f32, Expand);
262    setOperationAction(ISD::FCOS , MVT::f32, Expand);
263    setOperationAction(ISD::FREM , MVT::f32, Expand);
264
265    // Expand FP immediates into loads from the stack, except for the special
266    // cases we handle.
267    setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
268    setOperationAction(ISD::ConstantFP, MVT::f32, Expand);
269    addLegalFPImmediate(+0.0); // xorps / xorpd
270  } else {
271    // Set up the FP register classes.
272    addRegisterClass(MVT::f64, X86::RFPRegisterClass);
273
274    setOperationAction(ISD::UNDEF, MVT::f64, Expand);
275
276    if (!UnsafeFPMath) {
277      setOperationAction(ISD::FSIN           , MVT::f64  , Expand);
278      setOperationAction(ISD::FCOS           , MVT::f64  , Expand);
279    }
280
281    setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
282    addLegalFPImmediate(+0.0); // FLD0
283    addLegalFPImmediate(+1.0); // FLD1
284    addLegalFPImmediate(-0.0); // FLD0/FCHS
285    addLegalFPImmediate(-1.0); // FLD1/FCHS
286  }
287
288  // First set operation action for all vector types to expand. Then we
289  // will selectively turn on ones that can be effectively codegen'd.
290  for (unsigned VT = (unsigned)MVT::Vector + 1;
291       VT != (unsigned)MVT::LAST_VALUETYPE; VT++) {
292    setOperationAction(ISD::ADD , (MVT::ValueType)VT, Expand);
293    setOperationAction(ISD::SUB , (MVT::ValueType)VT, Expand);
294    setOperationAction(ISD::FADD, (MVT::ValueType)VT, Expand);
295    setOperationAction(ISD::FSUB, (MVT::ValueType)VT, Expand);
296    setOperationAction(ISD::MUL , (MVT::ValueType)VT, Expand);
297    setOperationAction(ISD::FMUL, (MVT::ValueType)VT, Expand);
298    setOperationAction(ISD::SDIV, (MVT::ValueType)VT, Expand);
299    setOperationAction(ISD::UDIV, (MVT::ValueType)VT, Expand);
300    setOperationAction(ISD::FDIV, (MVT::ValueType)VT, Expand);
301    setOperationAction(ISD::SREM, (MVT::ValueType)VT, Expand);
302    setOperationAction(ISD::UREM, (MVT::ValueType)VT, Expand);
303    setOperationAction(ISD::LOAD, (MVT::ValueType)VT, Expand);
304    setOperationAction(ISD::VECTOR_SHUFFLE,     (MVT::ValueType)VT, Expand);
305    setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Expand);
306    setOperationAction(ISD::INSERT_VECTOR_ELT,  (MVT::ValueType)VT, Expand);
307  }
308
309  if (Subtarget->hasMMX()) {
310    addRegisterClass(MVT::v8i8,  X86::VR64RegisterClass);
311    addRegisterClass(MVT::v4i16, X86::VR64RegisterClass);
312    addRegisterClass(MVT::v2i32, X86::VR64RegisterClass);
313
314    // FIXME: add MMX packed arithmetics
315    setOperationAction(ISD::BUILD_VECTOR,     MVT::v8i8,  Expand);
316    setOperationAction(ISD::BUILD_VECTOR,     MVT::v4i16, Expand);
317    setOperationAction(ISD::BUILD_VECTOR,     MVT::v2i32, Expand);
318  }
319
320  if (Subtarget->hasSSE1()) {
321    addRegisterClass(MVT::v4f32, X86::VR128RegisterClass);
322
323    setOperationAction(ISD::FADD,               MVT::v4f32, Legal);
324    setOperationAction(ISD::FSUB,               MVT::v4f32, Legal);
325    setOperationAction(ISD::FMUL,               MVT::v4f32, Legal);
326    setOperationAction(ISD::FDIV,               MVT::v4f32, Legal);
327    setOperationAction(ISD::LOAD,               MVT::v4f32, Legal);
328    setOperationAction(ISD::BUILD_VECTOR,       MVT::v4f32, Custom);
329    setOperationAction(ISD::VECTOR_SHUFFLE,     MVT::v4f32, Custom);
330    setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
331    setOperationAction(ISD::SELECT,             MVT::v4f32, Custom);
332  }
333
334  if (Subtarget->hasSSE2()) {
335    addRegisterClass(MVT::v2f64, X86::VR128RegisterClass);
336    addRegisterClass(MVT::v16i8, X86::VR128RegisterClass);
337    addRegisterClass(MVT::v8i16, X86::VR128RegisterClass);
338    addRegisterClass(MVT::v4i32, X86::VR128RegisterClass);
339    addRegisterClass(MVT::v2i64, X86::VR128RegisterClass);
340
341    setOperationAction(ISD::ADD,                MVT::v16i8, Legal);
342    setOperationAction(ISD::ADD,                MVT::v8i16, Legal);
343    setOperationAction(ISD::ADD,                MVT::v4i32, Legal);
344    setOperationAction(ISD::SUB,                MVT::v16i8, Legal);
345    setOperationAction(ISD::SUB,                MVT::v8i16, Legal);
346    setOperationAction(ISD::SUB,                MVT::v4i32, Legal);
347    setOperationAction(ISD::MUL,                MVT::v8i16, Legal);
348    setOperationAction(ISD::FADD,               MVT::v2f64, Legal);
349    setOperationAction(ISD::FSUB,               MVT::v2f64, Legal);
350    setOperationAction(ISD::FMUL,               MVT::v2f64, Legal);
351    setOperationAction(ISD::FDIV,               MVT::v2f64, Legal);
352
353    setOperationAction(ISD::SCALAR_TO_VECTOR,   MVT::v16i8, Custom);
354    setOperationAction(ISD::SCALAR_TO_VECTOR,   MVT::v8i16, Custom);
355    setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v8i16, Custom);
356    setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v4i32, Custom);
357    // Implement v4f32 insert_vector_elt in terms of SSE2 v8i16 ones.
358    setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v4f32, Custom);
359
360    // Custom lower build_vector, vector_shuffle, and extract_vector_elt.
361    for (unsigned VT = (unsigned)MVT::v16i8; VT != (unsigned)MVT::v2i64; VT++) {
362      setOperationAction(ISD::BUILD_VECTOR,        (MVT::ValueType)VT, Custom);
363      setOperationAction(ISD::VECTOR_SHUFFLE,      (MVT::ValueType)VT, Custom);
364      setOperationAction(ISD::EXTRACT_VECTOR_ELT,  (MVT::ValueType)VT, Custom);
365    }
366    setOperationAction(ISD::BUILD_VECTOR,       MVT::v2f64, Custom);
367    setOperationAction(ISD::BUILD_VECTOR,       MVT::v2i64, Custom);
368    setOperationAction(ISD::VECTOR_SHUFFLE,     MVT::v2f64, Custom);
369    setOperationAction(ISD::VECTOR_SHUFFLE,     MVT::v2i64, Custom);
370    setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom);
371    setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom);
372
373    // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64.
374    for (unsigned VT = (unsigned)MVT::v16i8; VT != (unsigned)MVT::v2i64; VT++) {
375      setOperationAction(ISD::AND,    (MVT::ValueType)VT, Promote);
376      AddPromotedToType (ISD::AND,    (MVT::ValueType)VT, MVT::v2i64);
377      setOperationAction(ISD::OR,     (MVT::ValueType)VT, Promote);
378      AddPromotedToType (ISD::OR,     (MVT::ValueType)VT, MVT::v2i64);
379      setOperationAction(ISD::XOR,    (MVT::ValueType)VT, Promote);
380      AddPromotedToType (ISD::XOR,    (MVT::ValueType)VT, MVT::v2i64);
381      setOperationAction(ISD::LOAD,   (MVT::ValueType)VT, Promote);
382      AddPromotedToType (ISD::LOAD,   (MVT::ValueType)VT, MVT::v2i64);
383      setOperationAction(ISD::SELECT, (MVT::ValueType)VT, Promote);
384      AddPromotedToType (ISD::SELECT, (MVT::ValueType)VT, MVT::v2i64);
385    }
386
387    // Custom lower v2i64 and v2f64 selects.
388    setOperationAction(ISD::LOAD,               MVT::v2f64, Legal);
389    setOperationAction(ISD::LOAD,               MVT::v2i64, Legal);
390    setOperationAction(ISD::SELECT,             MVT::v2f64, Custom);
391    setOperationAction(ISD::SELECT,             MVT::v2i64, Custom);
392  }
393
394  // We want to custom lower some of our intrinsics.
395  setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
396
397  // We have target-specific dag combine patterns for the following nodes:
398  setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
399  setTargetDAGCombine(ISD::SELECT);
400
401  computeRegisterProperties();
402
403  // FIXME: These should be based on subtarget info. Plus, the values should
404  // be smaller when we are in optimizing for size mode.
405  maxStoresPerMemset = 16; // For %llvm.memset -> sequence of stores
406  maxStoresPerMemcpy = 16; // For %llvm.memcpy -> sequence of stores
407  maxStoresPerMemmove = 16; // For %llvm.memmove -> sequence of stores
408  allowUnalignedMemoryAccesses = true; // x86 supports it!
409}
410
411//===----------------------------------------------------------------------===//
412//                    C Calling Convention implementation
413//===----------------------------------------------------------------------===//
414
415/// AddLiveIn - This helper function adds the specified physical register to the
416/// MachineFunction as a live in value.  It also creates a corresponding virtual
417/// register for it.
418static unsigned AddLiveIn(MachineFunction &MF, unsigned PReg,
419                          TargetRegisterClass *RC) {
420  assert(RC->contains(PReg) && "Not the correct regclass!");
421  unsigned VReg = MF.getSSARegMap()->createVirtualRegister(RC);
422  MF.addLiveIn(PReg, VReg);
423  return VReg;
424}
425
426/// HowToPassCCCArgument - Returns how an formal argument of the specified type
427/// should be passed. If it is through stack, returns the size of the stack
428/// slot; if it is through XMM register, returns the number of XMM registers
429/// are needed.
430static void
431HowToPassCCCArgument(MVT::ValueType ObjectVT, unsigned NumXMMRegs,
432                     unsigned &ObjSize, unsigned &ObjXMMRegs) {
433  ObjXMMRegs = 0;
434
435  switch (ObjectVT) {
436  default: assert(0 && "Unhandled argument type!");
437  case MVT::i8:  ObjSize = 1; break;
438  case MVT::i16: ObjSize = 2; break;
439  case MVT::i32: ObjSize = 4; break;
440  case MVT::i64: ObjSize = 8; break;
441  case MVT::f32: ObjSize = 4; break;
442  case MVT::f64: ObjSize = 8; break;
443  case MVT::v16i8:
444  case MVT::v8i16:
445  case MVT::v4i32:
446  case MVT::v2i64:
447  case MVT::v4f32:
448  case MVT::v2f64:
449    if (NumXMMRegs < 4)
450      ObjXMMRegs = 1;
451    else
452      ObjSize = 16;
453    break;
454  }
455}
456
457SDOperand X86TargetLowering::LowerCCCArguments(SDOperand Op, SelectionDAG &DAG) {
458  unsigned NumArgs = Op.Val->getNumValues() - 1;
459  MachineFunction &MF = DAG.getMachineFunction();
460  MachineFrameInfo *MFI = MF.getFrameInfo();
461  SDOperand Root = Op.getOperand(0);
462  std::vector<SDOperand> ArgValues;
463
464  // Add DAG nodes to load the arguments...  On entry to a function on the X86,
465  // the stack frame looks like this:
466  //
467  // [ESP] -- return address
468  // [ESP + 4] -- first argument (leftmost lexically)
469  // [ESP + 8] -- second argument, if first argument is <= 4 bytes in size
470  //    ...
471  //
472  unsigned ArgOffset = 0;   // Frame mechanisms handle retaddr slot
473  unsigned NumXMMRegs = 0;  // XMM regs used for parameter passing.
474  static const unsigned XMMArgRegs[] = {
475    X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3
476  };
477  for (unsigned i = 0; i < NumArgs; ++i) {
478    MVT::ValueType ObjectVT = Op.getValue(i).getValueType();
479    unsigned ArgIncrement = 4;
480    unsigned ObjSize = 0;
481    unsigned ObjXMMRegs = 0;
482    HowToPassCCCArgument(ObjectVT, NumXMMRegs, ObjSize, ObjXMMRegs);
483    if (ObjSize > 4)
484      ArgIncrement = ObjSize;
485
486    SDOperand ArgValue;
487    if (ObjXMMRegs) {
488      // Passed in a XMM register.
489      unsigned Reg = AddLiveIn(MF, XMMArgRegs[NumXMMRegs],
490                               X86::VR128RegisterClass);
491      ArgValue= DAG.getCopyFromReg(Root, Reg, ObjectVT);
492      ArgValues.push_back(ArgValue);
493      NumXMMRegs += ObjXMMRegs;
494    } else {
495      // XMM arguments have to be aligned on 16-byte boundary.
496      if (ObjSize == 16)
497        ArgOffset = ((ArgOffset + 15) / 16) * 16;
498      // Create the frame index object for this incoming parameter...
499      int FI = MFI->CreateFixedObject(ObjSize, ArgOffset);
500      SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy());
501      ArgValue = DAG.getLoad(Op.Val->getValueType(i), Root, FIN, NULL, 0);
502      ArgValues.push_back(ArgValue);
503      ArgOffset += ArgIncrement;   // Move on to the next argument...
504    }
505  }
506
507  ArgValues.push_back(Root);
508
509  // If the function takes variable number of arguments, make a frame index for
510  // the start of the first vararg value... for expansion of llvm.va_start.
511  bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
512  if (isVarArg)
513    VarArgsFrameIndex = MFI->CreateFixedObject(1, ArgOffset);
514  RegSaveFrameIndex = 0xAAAAAAA;  // X86-64 only.
515  ReturnAddrIndex = 0;            // No return address slot generated yet.
516  BytesToPopOnReturn = 0;         // Callee pops nothing.
517  BytesCallerReserves = ArgOffset;
518
519  // If this is a struct return on Darwin/X86, the callee pops the hidden struct
520  // pointer.
521  if (MF.getFunction()->getCallingConv() == CallingConv::CSRet &&
522      Subtarget->isTargetDarwin())
523    BytesToPopOnReturn = 4;
524
525  // Return the new list of results.
526  std::vector<MVT::ValueType> RetVTs(Op.Val->value_begin(),
527                                     Op.Val->value_end());
528  return DAG.getNode(ISD::MERGE_VALUES, RetVTs, &ArgValues[0],ArgValues.size());
529}
530
531
532SDOperand X86TargetLowering::LowerCCCCallTo(SDOperand Op, SelectionDAG &DAG) {
533  SDOperand Chain     = Op.getOperand(0);
534  unsigned CallingConv= cast<ConstantSDNode>(Op.getOperand(1))->getValue();
535  bool isVarArg       = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
536  bool isTailCall     = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0;
537  SDOperand Callee    = Op.getOperand(4);
538  MVT::ValueType RetVT= Op.Val->getValueType(0);
539  unsigned NumOps     = (Op.getNumOperands() - 5) / 2;
540
541  // Keep track of the number of XMM regs passed so far.
542  unsigned NumXMMRegs = 0;
543  static const unsigned XMMArgRegs[] = {
544    X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3
545  };
546
547  // Count how many bytes are to be pushed on the stack.
548  unsigned NumBytes = 0;
549  for (unsigned i = 0; i != NumOps; ++i) {
550    SDOperand Arg = Op.getOperand(5+2*i);
551
552    switch (Arg.getValueType()) {
553    default: assert(0 && "Unexpected ValueType for argument!");
554    case MVT::i8:
555    case MVT::i16:
556    case MVT::i32:
557    case MVT::f32:
558      NumBytes += 4;
559      break;
560    case MVT::i64:
561    case MVT::f64:
562      NumBytes += 8;
563      break;
564    case MVT::v16i8:
565    case MVT::v8i16:
566    case MVT::v4i32:
567    case MVT::v2i64:
568    case MVT::v4f32:
569    case MVT::v2f64:
570      if (NumXMMRegs < 4)
571        ++NumXMMRegs;
572      else {
573        // XMM arguments have to be aligned on 16-byte boundary.
574        NumBytes = ((NumBytes + 15) / 16) * 16;
575        NumBytes += 16;
576      }
577      break;
578    }
579  }
580
581  Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, getPointerTy()));
582
583  // Arguments go on the stack in reverse order, as specified by the ABI.
584  unsigned ArgOffset = 0;
585  NumXMMRegs = 0;
586  std::vector<std::pair<unsigned, SDOperand> > RegsToPass;
587  std::vector<SDOperand> MemOpChains;
588  SDOperand StackPtr = DAG.getRegister(X86StackPtr, getPointerTy());
589  for (unsigned i = 0; i != NumOps; ++i) {
590    SDOperand Arg = Op.getOperand(5+2*i);
591
592    switch (Arg.getValueType()) {
593    default: assert(0 && "Unexpected ValueType for argument!");
594    case MVT::i8:
595    case MVT::i16: {
596      // Promote the integer to 32 bits.  If the input type is signed use a
597      // sign extend, otherwise use a zero extend.
598      unsigned ExtOp =
599        dyn_cast<ConstantSDNode>(Op.getOperand(5+2*i+1))->getValue() ?
600        ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
601      Arg = DAG.getNode(ExtOp, MVT::i32, Arg);
602    }
603    // Fallthrough
604
605    case MVT::i32:
606    case MVT::f32: {
607      SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
608      PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff);
609      MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0));
610      ArgOffset += 4;
611      break;
612    }
613    case MVT::i64:
614    case MVT::f64: {
615      SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
616      PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff);
617      MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0));
618      ArgOffset += 8;
619      break;
620    }
621    case MVT::v16i8:
622    case MVT::v8i16:
623    case MVT::v4i32:
624    case MVT::v2i64:
625    case MVT::v4f32:
626    case MVT::v2f64:
627      if (NumXMMRegs < 4) {
628        RegsToPass.push_back(std::make_pair(XMMArgRegs[NumXMMRegs], Arg));
629        NumXMMRegs++;
630      } else {
631        // XMM arguments have to be aligned on 16-byte boundary.
632        ArgOffset = ((ArgOffset + 15) / 16) * 16;
633        SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
634        PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff);
635        MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0));
636        ArgOffset += 16;
637      }
638    }
639  }
640
641  if (!MemOpChains.empty())
642    Chain = DAG.getNode(ISD::TokenFactor, MVT::Other,
643                        &MemOpChains[0], MemOpChains.size());
644
645  // Build a sequence of copy-to-reg nodes chained together with token chain
646  // and flag operands which copy the outgoing args into registers.
647  SDOperand InFlag;
648  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
649    Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second,
650                             InFlag);
651    InFlag = Chain.getValue(1);
652  }
653
654  // If the callee is a GlobalAddress node (quite common, every direct call is)
655  // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
656  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
657    Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy());
658  else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee))
659    Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy());
660
661  std::vector<MVT::ValueType> NodeTys;
662  NodeTys.push_back(MVT::Other);   // Returns a chain
663  NodeTys.push_back(MVT::Flag);    // Returns a flag for retval copy to use.
664  std::vector<SDOperand> Ops;
665  Ops.push_back(Chain);
666  Ops.push_back(Callee);
667
668  // Add argument registers to the end of the list so that they are known live
669  // into the call.
670  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
671    Ops.push_back(DAG.getRegister(RegsToPass[i].first,
672                                  RegsToPass[i].second.getValueType()));
673
674  if (InFlag.Val)
675    Ops.push_back(InFlag);
676
677  Chain = DAG.getNode(isTailCall ? X86ISD::TAILCALL : X86ISD::CALL,
678                      NodeTys, &Ops[0], Ops.size());
679  InFlag = Chain.getValue(1);
680
681  // Create the CALLSEQ_END node.
682  unsigned NumBytesForCalleeToPush = 0;
683
684  // If this is is a call to a struct-return function on Darwin/X86, the callee
685  // pops the hidden struct pointer, so we have to push it back.
686  if (CallingConv == CallingConv::CSRet && Subtarget->isTargetDarwin())
687    NumBytesForCalleeToPush = 4;
688
689  NodeTys.clear();
690  NodeTys.push_back(MVT::Other);   // Returns a chain
691  if (RetVT != MVT::Other)
692    NodeTys.push_back(MVT::Flag);  // Returns a flag for retval copy to use.
693  Ops.clear();
694  Ops.push_back(Chain);
695  Ops.push_back(DAG.getConstant(NumBytes, getPointerTy()));
696  Ops.push_back(DAG.getConstant(NumBytesForCalleeToPush, getPointerTy()));
697  Ops.push_back(InFlag);
698  Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size());
699  if (RetVT != MVT::Other)
700    InFlag = Chain.getValue(1);
701
702  std::vector<SDOperand> ResultVals;
703  NodeTys.clear();
704  switch (RetVT) {
705  default: assert(0 && "Unknown value type to return!");
706  case MVT::Other: break;
707  case MVT::i8:
708    Chain = DAG.getCopyFromReg(Chain, X86::AL, MVT::i8, InFlag).getValue(1);
709    ResultVals.push_back(Chain.getValue(0));
710    NodeTys.push_back(MVT::i8);
711    break;
712  case MVT::i16:
713    Chain = DAG.getCopyFromReg(Chain, X86::AX, MVT::i16, InFlag).getValue(1);
714    ResultVals.push_back(Chain.getValue(0));
715    NodeTys.push_back(MVT::i16);
716    break;
717  case MVT::i32:
718    if (Op.Val->getValueType(1) == MVT::i32) {
719      Chain = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag).getValue(1);
720      ResultVals.push_back(Chain.getValue(0));
721      Chain = DAG.getCopyFromReg(Chain, X86::EDX, MVT::i32,
722                                 Chain.getValue(2)).getValue(1);
723      ResultVals.push_back(Chain.getValue(0));
724      NodeTys.push_back(MVT::i32);
725    } else {
726      Chain = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag).getValue(1);
727      ResultVals.push_back(Chain.getValue(0));
728    }
729    NodeTys.push_back(MVT::i32);
730    break;
731  case MVT::v16i8:
732  case MVT::v8i16:
733  case MVT::v4i32:
734  case MVT::v2i64:
735  case MVT::v4f32:
736  case MVT::v2f64:
737    Chain = DAG.getCopyFromReg(Chain, X86::XMM0, RetVT, InFlag).getValue(1);
738    ResultVals.push_back(Chain.getValue(0));
739    NodeTys.push_back(RetVT);
740    break;
741  case MVT::f32:
742  case MVT::f64: {
743    std::vector<MVT::ValueType> Tys;
744    Tys.push_back(MVT::f64);
745    Tys.push_back(MVT::Other);
746    Tys.push_back(MVT::Flag);
747    std::vector<SDOperand> Ops;
748    Ops.push_back(Chain);
749    Ops.push_back(InFlag);
750    SDOperand RetVal = DAG.getNode(X86ISD::FP_GET_RESULT, Tys,
751                                   &Ops[0], Ops.size());
752    Chain  = RetVal.getValue(1);
753    InFlag = RetVal.getValue(2);
754    if (X86ScalarSSE) {
755      // FIXME: Currently the FST is flagged to the FP_GET_RESULT. This
756      // shouldn't be necessary except that RFP cannot be live across
757      // multiple blocks. When stackifier is fixed, they can be uncoupled.
758      MachineFunction &MF = DAG.getMachineFunction();
759      int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8);
760      SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
761      Tys.clear();
762      Tys.push_back(MVT::Other);
763      Ops.clear();
764      Ops.push_back(Chain);
765      Ops.push_back(RetVal);
766      Ops.push_back(StackSlot);
767      Ops.push_back(DAG.getValueType(RetVT));
768      Ops.push_back(InFlag);
769      Chain = DAG.getNode(X86ISD::FST, Tys, &Ops[0], Ops.size());
770      RetVal = DAG.getLoad(RetVT, Chain, StackSlot, NULL, 0);
771      Chain = RetVal.getValue(1);
772    }
773
774    if (RetVT == MVT::f32 && !X86ScalarSSE)
775      // FIXME: we would really like to remember that this FP_ROUND
776      // operation is okay to eliminate if we allow excess FP precision.
777      RetVal = DAG.getNode(ISD::FP_ROUND, MVT::f32, RetVal);
778    ResultVals.push_back(RetVal);
779    NodeTys.push_back(RetVT);
780    break;
781  }
782  }
783
784  // If the function returns void, just return the chain.
785  if (ResultVals.empty())
786    return Chain;
787
788  // Otherwise, merge everything together with a MERGE_VALUES node.
789  NodeTys.push_back(MVT::Other);
790  ResultVals.push_back(Chain);
791  SDOperand Res = DAG.getNode(ISD::MERGE_VALUES, NodeTys,
792                              &ResultVals[0], ResultVals.size());
793  return Res.getValue(Op.ResNo);
794}
795
796
797//===----------------------------------------------------------------------===//
798//                 X86-64 C Calling Convention implementation
799//===----------------------------------------------------------------------===//
800
801/// HowToPassX86_64CCCArgument - Returns how an formal argument of the specified
802/// type should be passed. If it is through stack, returns the size of the stack
803/// slot; if it is through integer or XMM register, returns the number of
804/// integer or XMM registers are needed.
805static void
806HowToPassX86_64CCCArgument(MVT::ValueType ObjectVT,
807                           unsigned NumIntRegs, unsigned NumXMMRegs,
808                           unsigned &ObjSize, unsigned &ObjIntRegs,
809                           unsigned &ObjXMMRegs) {
810  ObjSize = 0;
811  ObjIntRegs = 0;
812  ObjXMMRegs = 0;
813
814  switch (ObjectVT) {
815  default: assert(0 && "Unhandled argument type!");
816  case MVT::i8:
817  case MVT::i16:
818  case MVT::i32:
819  case MVT::i64:
820    if (NumIntRegs < 6)
821      ObjIntRegs = 1;
822    else {
823      switch (ObjectVT) {
824      default: break;
825      case MVT::i8:  ObjSize = 1; break;
826      case MVT::i16: ObjSize = 2; break;
827      case MVT::i32: ObjSize = 4; break;
828      case MVT::i64: ObjSize = 8; break;
829      }
830    }
831    break;
832  case MVT::f32:
833  case MVT::f64:
834  case MVT::v16i8:
835  case MVT::v8i16:
836  case MVT::v4i32:
837  case MVT::v2i64:
838  case MVT::v4f32:
839  case MVT::v2f64:
840    if (NumXMMRegs < 8)
841      ObjXMMRegs = 1;
842    else {
843      switch (ObjectVT) {
844      default: break;
845      case MVT::f32:  ObjSize = 4; break;
846      case MVT::f64:  ObjSize = 8; break;
847      case MVT::v16i8:
848      case MVT::v8i16:
849      case MVT::v4i32:
850      case MVT::v2i64:
851      case MVT::v4f32:
852      case MVT::v2f64: ObjSize = 16; break;
853    }
854    break;
855  }
856  }
857}
858
859SDOperand
860X86TargetLowering::LowerX86_64CCCArguments(SDOperand Op, SelectionDAG &DAG) {
861  unsigned NumArgs = Op.Val->getNumValues() - 1;
862  MachineFunction &MF = DAG.getMachineFunction();
863  MachineFrameInfo *MFI = MF.getFrameInfo();
864  SDOperand Root = Op.getOperand(0);
865  bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
866  std::vector<SDOperand> ArgValues;
867
868  // Add DAG nodes to load the arguments...  On entry to a function on the X86,
869  // the stack frame looks like this:
870  //
871  // [RSP] -- return address
872  // [RSP + 8] -- first nonreg argument (leftmost lexically)
873  // [RSP +16] -- second nonreg argument, if 1st argument is <= 8 bytes in size
874  //    ...
875  //
876  unsigned ArgOffset = 0;   // Frame mechanisms handle retaddr slot
877  unsigned NumIntRegs = 0;  // Int regs used for parameter passing.
878  unsigned NumXMMRegs = 0;  // XMM regs used for parameter passing.
879
880  static const unsigned GPR8ArgRegs[] = {
881    X86::DIL, X86::SIL, X86::DL,  X86::CL,  X86::R8B, X86::R9B
882  };
883  static const unsigned GPR16ArgRegs[] = {
884    X86::DI,  X86::SI,  X86::DX,  X86::CX,  X86::R8W, X86::R9W
885  };
886  static const unsigned GPR32ArgRegs[] = {
887    X86::EDI, X86::ESI, X86::EDX, X86::ECX, X86::R8D, X86::R9D
888  };
889  static const unsigned GPR64ArgRegs[] = {
890    X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8,  X86::R9
891  };
892  static const unsigned XMMArgRegs[] = {
893    X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
894    X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
895  };
896
897  for (unsigned i = 0; i < NumArgs; ++i) {
898    MVT::ValueType ObjectVT = Op.getValue(i).getValueType();
899    unsigned ArgIncrement = 8;
900    unsigned ObjSize = 0;
901    unsigned ObjIntRegs = 0;
902    unsigned ObjXMMRegs = 0;
903
904    // FIXME: __int128 and long double support?
905    HowToPassX86_64CCCArgument(ObjectVT, NumIntRegs, NumXMMRegs,
906                               ObjSize, ObjIntRegs, ObjXMMRegs);
907    if (ObjSize > 8)
908      ArgIncrement = ObjSize;
909
910    unsigned Reg = 0;
911    SDOperand ArgValue;
912    if (ObjIntRegs || ObjXMMRegs) {
913      switch (ObjectVT) {
914      default: assert(0 && "Unhandled argument type!");
915      case MVT::i8:
916      case MVT::i16:
917      case MVT::i32:
918      case MVT::i64: {
919        TargetRegisterClass *RC = NULL;
920        switch (ObjectVT) {
921        default: break;
922        case MVT::i8:
923          RC = X86::GR8RegisterClass;
924          Reg = GPR8ArgRegs[NumIntRegs];
925          break;
926        case MVT::i16:
927          RC = X86::GR16RegisterClass;
928          Reg = GPR16ArgRegs[NumIntRegs];
929          break;
930        case MVT::i32:
931          RC = X86::GR32RegisterClass;
932          Reg = GPR32ArgRegs[NumIntRegs];
933          break;
934        case MVT::i64:
935          RC = X86::GR64RegisterClass;
936          Reg = GPR64ArgRegs[NumIntRegs];
937          break;
938        }
939        Reg = AddLiveIn(MF, Reg, RC);
940        ArgValue = DAG.getCopyFromReg(Root, Reg, ObjectVT);
941        break;
942      }
943      case MVT::f32:
944      case MVT::f64:
945      case MVT::v16i8:
946      case MVT::v8i16:
947      case MVT::v4i32:
948      case MVT::v2i64:
949      case MVT::v4f32:
950      case MVT::v2f64: {
951        TargetRegisterClass *RC= (ObjectVT == MVT::f32) ?
952          X86::FR32RegisterClass : ((ObjectVT == MVT::f64) ?
953                              X86::FR64RegisterClass : X86::VR128RegisterClass);
954        Reg = AddLiveIn(MF, XMMArgRegs[NumXMMRegs], RC);
955        ArgValue = DAG.getCopyFromReg(Root, Reg, ObjectVT);
956        break;
957      }
958      }
959      NumIntRegs += ObjIntRegs;
960      NumXMMRegs += ObjXMMRegs;
961    } else if (ObjSize) {
962      // XMM arguments have to be aligned on 16-byte boundary.
963      if (ObjSize == 16)
964        ArgOffset = ((ArgOffset + 15) / 16) * 16;
965      // Create the SelectionDAG nodes corresponding to a load from this
966      // parameter.
967      int FI = MFI->CreateFixedObject(ObjSize, ArgOffset);
968      SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy());
969      ArgValue = DAG.getLoad(Op.Val->getValueType(i), Root, FIN, NULL, 0);
970      ArgOffset += ArgIncrement;   // Move on to the next argument.
971    }
972
973    ArgValues.push_back(ArgValue);
974  }
975
976  // If the function takes variable number of arguments, make a frame index for
977  // the start of the first vararg value... for expansion of llvm.va_start.
978  if (isVarArg) {
979    // For X86-64, if there are vararg parameters that are passed via
980    // registers, then we must store them to their spots on the stack so they
981    // may be loaded by deferencing the result of va_next.
982    VarArgsGPOffset = NumIntRegs * 8;
983    VarArgsFPOffset = 6 * 8 + NumXMMRegs * 16;
984    VarArgsFrameIndex = MFI->CreateFixedObject(1, ArgOffset);
985    RegSaveFrameIndex = MFI->CreateStackObject(6 * 8 + 8 * 16, 16);
986
987    // Store the integer parameter registers.
988    std::vector<SDOperand> MemOps;
989    SDOperand RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy());
990    SDOperand FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN,
991                              DAG.getConstant(VarArgsGPOffset, getPointerTy()));
992    for (; NumIntRegs != 6; ++NumIntRegs) {
993      unsigned VReg = AddLiveIn(MF, GPR64ArgRegs[NumIntRegs],
994                                X86::GR64RegisterClass);
995      SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::i64);
996      SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0);
997      MemOps.push_back(Store);
998      FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN,
999                        DAG.getConstant(8, getPointerTy()));
1000    }
1001
1002    // Now store the XMM (fp + vector) parameter registers.
1003    FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN,
1004                      DAG.getConstant(VarArgsFPOffset, getPointerTy()));
1005    for (; NumXMMRegs != 8; ++NumXMMRegs) {
1006      unsigned VReg = AddLiveIn(MF, XMMArgRegs[NumXMMRegs],
1007                                X86::VR128RegisterClass);
1008      SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::v4f32);
1009      SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0);
1010      MemOps.push_back(Store);
1011      FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN,
1012                        DAG.getConstant(16, getPointerTy()));
1013    }
1014    if (!MemOps.empty())
1015        Root = DAG.getNode(ISD::TokenFactor, MVT::Other,
1016                           &MemOps[0], MemOps.size());
1017  }
1018
1019  ArgValues.push_back(Root);
1020
1021  ReturnAddrIndex = 0;     // No return address slot generated yet.
1022  BytesToPopOnReturn = 0;  // Callee pops nothing.
1023  BytesCallerReserves = ArgOffset;
1024
1025  // Return the new list of results.
1026  std::vector<MVT::ValueType> RetVTs(Op.Val->value_begin(),
1027                                     Op.Val->value_end());
1028  return DAG.getNode(ISD::MERGE_VALUES, RetVTs, &ArgValues[0],ArgValues.size());
1029}
1030
1031SDOperand
1032X86TargetLowering::LowerX86_64CCCCallTo(SDOperand Op, SelectionDAG &DAG) {
1033  SDOperand Chain     = Op.getOperand(0);
1034  unsigned CallingConv= cast<ConstantSDNode>(Op.getOperand(1))->getValue();
1035  bool isVarArg       = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
1036  bool isTailCall     = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0;
1037  SDOperand Callee    = Op.getOperand(4);
1038  MVT::ValueType RetVT= Op.Val->getValueType(0);
1039  unsigned NumOps     = (Op.getNumOperands() - 5) / 2;
1040
1041  // Count how many bytes are to be pushed on the stack.
1042  unsigned NumBytes = 0;
1043  unsigned NumIntRegs = 0;  // Int regs used for parameter passing.
1044  unsigned NumXMMRegs = 0;  // XMM regs used for parameter passing.
1045
1046  static const unsigned GPR8ArgRegs[] = {
1047    X86::DIL, X86::SIL, X86::DL,  X86::CL,  X86::R8B, X86::R9B
1048  };
1049  static const unsigned GPR16ArgRegs[] = {
1050    X86::DI,  X86::SI,  X86::DX,  X86::CX,  X86::R8W, X86::R9W
1051  };
1052  static const unsigned GPR32ArgRegs[] = {
1053    X86::EDI, X86::ESI, X86::EDX, X86::ECX, X86::R8D, X86::R9D
1054  };
1055  static const unsigned GPR64ArgRegs[] = {
1056    X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8,  X86::R9
1057  };
1058  static const unsigned XMMArgRegs[] = {
1059    X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
1060    X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
1061  };
1062
1063  for (unsigned i = 0; i != NumOps; ++i) {
1064    SDOperand Arg = Op.getOperand(5+2*i);
1065    MVT::ValueType ArgVT = Arg.getValueType();
1066
1067    switch (ArgVT) {
1068    default: assert(0 && "Unknown value type!");
1069    case MVT::i8:
1070    case MVT::i16:
1071    case MVT::i32:
1072    case MVT::i64:
1073      if (NumIntRegs < 6)
1074        ++NumIntRegs;
1075      else
1076        NumBytes += 8;
1077      break;
1078    case MVT::f32:
1079    case MVT::f64:
1080    case MVT::v16i8:
1081    case MVT::v8i16:
1082    case MVT::v4i32:
1083    case MVT::v2i64:
1084    case MVT::v4f32:
1085    case MVT::v2f64:
1086      if (NumXMMRegs < 8)
1087        NumXMMRegs++;
1088      else if (ArgVT == MVT::f32 || ArgVT == MVT::f64)
1089        NumBytes += 8;
1090      else {
1091        // XMM arguments have to be aligned on 16-byte boundary.
1092        NumBytes = ((NumBytes + 15) / 16) * 16;
1093        NumBytes += 16;
1094      }
1095      break;
1096    }
1097  }
1098
1099  Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, getPointerTy()));
1100
1101  // Arguments go on the stack in reverse order, as specified by the ABI.
1102  unsigned ArgOffset = 0;
1103  NumIntRegs = 0;
1104  NumXMMRegs = 0;
1105  std::vector<std::pair<unsigned, SDOperand> > RegsToPass;
1106  std::vector<SDOperand> MemOpChains;
1107  SDOperand StackPtr = DAG.getRegister(X86StackPtr, getPointerTy());
1108  for (unsigned i = 0; i != NumOps; ++i) {
1109    SDOperand Arg = Op.getOperand(5+2*i);
1110    MVT::ValueType ArgVT = Arg.getValueType();
1111
1112    switch (ArgVT) {
1113    default: assert(0 && "Unexpected ValueType for argument!");
1114    case MVT::i8:
1115    case MVT::i16:
1116    case MVT::i32:
1117    case MVT::i64:
1118      if (NumIntRegs < 6) {
1119        unsigned Reg = 0;
1120        switch (ArgVT) {
1121        default: break;
1122        case MVT::i8:  Reg = GPR8ArgRegs[NumIntRegs];  break;
1123        case MVT::i16: Reg = GPR16ArgRegs[NumIntRegs]; break;
1124        case MVT::i32: Reg = GPR32ArgRegs[NumIntRegs]; break;
1125        case MVT::i64: Reg = GPR64ArgRegs[NumIntRegs]; break;
1126        }
1127        RegsToPass.push_back(std::make_pair(Reg, Arg));
1128        ++NumIntRegs;
1129      } else {
1130        SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
1131        PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff);
1132        MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0));
1133        ArgOffset += 8;
1134      }
1135      break;
1136    case MVT::f32:
1137    case MVT::f64:
1138    case MVT::v16i8:
1139    case MVT::v8i16:
1140    case MVT::v4i32:
1141    case MVT::v2i64:
1142    case MVT::v4f32:
1143    case MVT::v2f64:
1144      if (NumXMMRegs < 8) {
1145        RegsToPass.push_back(std::make_pair(XMMArgRegs[NumXMMRegs], Arg));
1146        NumXMMRegs++;
1147      } else {
1148        if (ArgVT != MVT::f32 && ArgVT != MVT::f64) {
1149          // XMM arguments have to be aligned on 16-byte boundary.
1150          ArgOffset = ((ArgOffset + 15) / 16) * 16;
1151        }
1152        SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
1153        PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff);
1154        MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0));
1155        if (ArgVT == MVT::f32 || ArgVT == MVT::f64)
1156          ArgOffset += 8;
1157        else
1158          ArgOffset += 16;
1159      }
1160    }
1161  }
1162
1163  if (!MemOpChains.empty())
1164    Chain = DAG.getNode(ISD::TokenFactor, MVT::Other,
1165                        &MemOpChains[0], MemOpChains.size());
1166
1167  // Build a sequence of copy-to-reg nodes chained together with token chain
1168  // and flag operands which copy the outgoing args into registers.
1169  SDOperand InFlag;
1170  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1171    Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second,
1172                             InFlag);
1173    InFlag = Chain.getValue(1);
1174  }
1175
1176  if (isVarArg) {
1177    // From AMD64 ABI document:
1178    // For calls that may call functions that use varargs or stdargs
1179    // (prototype-less calls or calls to functions containing ellipsis (...) in
1180    // the declaration) %al is used as hidden argument to specify the number
1181    // of SSE registers used. The contents of %al do not need to match exactly
1182    // the number of registers, but must be an ubound on the number of SSE
1183    // registers used and is in the range 0 - 8 inclusive.
1184    Chain = DAG.getCopyToReg(Chain, X86::AL,
1185                             DAG.getConstant(NumXMMRegs, MVT::i8), InFlag);
1186    InFlag = Chain.getValue(1);
1187  }
1188
1189  // If the callee is a GlobalAddress node (quite common, every direct call is)
1190  // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1191  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1192    Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy());
1193  else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee))
1194    Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy());
1195
1196  std::vector<MVT::ValueType> NodeTys;
1197  NodeTys.push_back(MVT::Other);   // Returns a chain
1198  NodeTys.push_back(MVT::Flag);    // Returns a flag for retval copy to use.
1199  std::vector<SDOperand> Ops;
1200  Ops.push_back(Chain);
1201  Ops.push_back(Callee);
1202
1203  // Add argument registers to the end of the list so that they are known live
1204  // into the call.
1205  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1206    Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1207                                  RegsToPass[i].second.getValueType()));
1208
1209  if (InFlag.Val)
1210    Ops.push_back(InFlag);
1211
1212  // FIXME: Do not generate X86ISD::TAILCALL for now.
1213  Chain = DAG.getNode(isTailCall ? X86ISD::TAILCALL : X86ISD::CALL,
1214                      NodeTys, &Ops[0], Ops.size());
1215  InFlag = Chain.getValue(1);
1216
1217  NodeTys.clear();
1218  NodeTys.push_back(MVT::Other);   // Returns a chain
1219  if (RetVT != MVT::Other)
1220    NodeTys.push_back(MVT::Flag);  // Returns a flag for retval copy to use.
1221  Ops.clear();
1222  Ops.push_back(Chain);
1223  Ops.push_back(DAG.getConstant(NumBytes, getPointerTy()));
1224  Ops.push_back(DAG.getConstant(0, getPointerTy()));
1225  Ops.push_back(InFlag);
1226  Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size());
1227  if (RetVT != MVT::Other)
1228    InFlag = Chain.getValue(1);
1229
1230  std::vector<SDOperand> ResultVals;
1231  NodeTys.clear();
1232  switch (RetVT) {
1233  default: assert(0 && "Unknown value type to return!");
1234  case MVT::Other: break;
1235  case MVT::i8:
1236    Chain = DAG.getCopyFromReg(Chain, X86::AL, MVT::i8, InFlag).getValue(1);
1237    ResultVals.push_back(Chain.getValue(0));
1238    NodeTys.push_back(MVT::i8);
1239    break;
1240  case MVT::i16:
1241    Chain = DAG.getCopyFromReg(Chain, X86::AX, MVT::i16, InFlag).getValue(1);
1242    ResultVals.push_back(Chain.getValue(0));
1243    NodeTys.push_back(MVT::i16);
1244    break;
1245  case MVT::i32:
1246    Chain = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag).getValue(1);
1247    ResultVals.push_back(Chain.getValue(0));
1248    NodeTys.push_back(MVT::i32);
1249    break;
1250  case MVT::i64:
1251    if (Op.Val->getValueType(1) == MVT::i64) {
1252      // FIXME: __int128 support?
1253      Chain = DAG.getCopyFromReg(Chain, X86::RAX, MVT::i64, InFlag).getValue(1);
1254      ResultVals.push_back(Chain.getValue(0));
1255      Chain = DAG.getCopyFromReg(Chain, X86::RDX, MVT::i64,
1256                                 Chain.getValue(2)).getValue(1);
1257      ResultVals.push_back(Chain.getValue(0));
1258      NodeTys.push_back(MVT::i64);
1259    } else {
1260      Chain = DAG.getCopyFromReg(Chain, X86::RAX, MVT::i64, InFlag).getValue(1);
1261      ResultVals.push_back(Chain.getValue(0));
1262    }
1263    NodeTys.push_back(MVT::i64);
1264    break;
1265  case MVT::f32:
1266  case MVT::f64:
1267  case MVT::v16i8:
1268  case MVT::v8i16:
1269  case MVT::v4i32:
1270  case MVT::v2i64:
1271  case MVT::v4f32:
1272  case MVT::v2f64:
1273    // FIXME: long double support?
1274    Chain = DAG.getCopyFromReg(Chain, X86::XMM0, RetVT, InFlag).getValue(1);
1275    ResultVals.push_back(Chain.getValue(0));
1276    NodeTys.push_back(RetVT);
1277    break;
1278  }
1279
1280  // If the function returns void, just return the chain.
1281  if (ResultVals.empty())
1282    return Chain;
1283
1284  // Otherwise, merge everything together with a MERGE_VALUES node.
1285  NodeTys.push_back(MVT::Other);
1286  ResultVals.push_back(Chain);
1287  SDOperand Res = DAG.getNode(ISD::MERGE_VALUES, NodeTys,
1288                              &ResultVals[0], ResultVals.size());
1289  return Res.getValue(Op.ResNo);
1290}
1291
1292//===----------------------------------------------------------------------===//
1293//                    Fast Calling Convention implementation
1294//===----------------------------------------------------------------------===//
1295//
1296// The X86 'fast' calling convention passes up to two integer arguments in
1297// registers (an appropriate portion of EAX/EDX), passes arguments in C order,
1298// and requires that the callee pop its arguments off the stack (allowing proper
1299// tail calls), and has the same return value conventions as C calling convs.
1300//
1301// This calling convention always arranges for the callee pop value to be 8n+4
1302// bytes, which is needed for tail recursion elimination and stack alignment
1303// reasons.
1304//
1305// Note that this can be enhanced in the future to pass fp vals in registers
1306// (when we have a global fp allocator) and do other tricks.
1307//
1308
1309/// HowToPassFastCCArgument - Returns how an formal argument of the specified
1310/// type should be passed. If it is through stack, returns the size of the stack
1311/// slot; if it is through integer or XMM register, returns the number of
1312/// integer or XMM registers are needed.
1313static void
1314HowToPassFastCCArgument(MVT::ValueType ObjectVT,
1315                        unsigned NumIntRegs, unsigned NumXMMRegs,
1316                        unsigned &ObjSize, unsigned &ObjIntRegs,
1317                        unsigned &ObjXMMRegs) {
1318  ObjSize = 0;
1319  ObjIntRegs = 0;
1320  ObjXMMRegs = 0;
1321
1322  switch (ObjectVT) {
1323  default: assert(0 && "Unhandled argument type!");
1324  case MVT::i8:
1325#if FASTCC_NUM_INT_ARGS_INREGS > 0
1326    if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS)
1327      ObjIntRegs = 1;
1328    else
1329#endif
1330      ObjSize = 1;
1331    break;
1332  case MVT::i16:
1333#if FASTCC_NUM_INT_ARGS_INREGS > 0
1334    if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS)
1335      ObjIntRegs = 1;
1336    else
1337#endif
1338      ObjSize = 2;
1339    break;
1340  case MVT::i32:
1341#if FASTCC_NUM_INT_ARGS_INREGS > 0
1342    if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS)
1343      ObjIntRegs = 1;
1344    else
1345#endif
1346      ObjSize = 4;
1347    break;
1348  case MVT::i64:
1349#if FASTCC_NUM_INT_ARGS_INREGS > 0
1350    if (NumIntRegs+2 <= FASTCC_NUM_INT_ARGS_INREGS) {
1351      ObjIntRegs = 2;
1352    } else if (NumIntRegs+1 <= FASTCC_NUM_INT_ARGS_INREGS) {
1353      ObjIntRegs = 1;
1354      ObjSize = 4;
1355    } else
1356#endif
1357      ObjSize = 8;
1358  case MVT::f32:
1359    ObjSize = 4;
1360    break;
1361  case MVT::f64:
1362    ObjSize = 8;
1363    break;
1364  case MVT::v16i8:
1365  case MVT::v8i16:
1366  case MVT::v4i32:
1367  case MVT::v2i64:
1368  case MVT::v4f32:
1369  case MVT::v2f64:
1370    if (NumXMMRegs < 4)
1371      ObjXMMRegs = 1;
1372    else
1373      ObjSize = 16;
1374    break;
1375  }
1376}
1377
1378SDOperand
1379X86TargetLowering::LowerFastCCArguments(SDOperand Op, SelectionDAG &DAG) {
1380  unsigned NumArgs = Op.Val->getNumValues()-1;
1381  MachineFunction &MF = DAG.getMachineFunction();
1382  MachineFrameInfo *MFI = MF.getFrameInfo();
1383  SDOperand Root = Op.getOperand(0);
1384  std::vector<SDOperand> ArgValues;
1385
1386  // Add DAG nodes to load the arguments...  On entry to a function the stack
1387  // frame looks like this:
1388  //
1389  // [ESP] -- return address
1390  // [ESP + 4] -- first nonreg argument (leftmost lexically)
1391  // [ESP + 8] -- second nonreg argument, if 1st argument is <= 4 bytes in size
1392  //    ...
1393  unsigned ArgOffset = 0;   // Frame mechanisms handle retaddr slot
1394
1395  // Keep track of the number of integer regs passed so far.  This can be either
1396  // 0 (neither EAX or EDX used), 1 (EAX is used) or 2 (EAX and EDX are both
1397  // used).
1398  unsigned NumIntRegs = 0;
1399  unsigned NumXMMRegs = 0;  // XMM regs used for parameter passing.
1400
1401  static const unsigned XMMArgRegs[] = {
1402    X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3
1403  };
1404
1405  for (unsigned i = 0; i < NumArgs; ++i) {
1406    MVT::ValueType ObjectVT = Op.getValue(i).getValueType();
1407    unsigned ArgIncrement = 4;
1408    unsigned ObjSize = 0;
1409    unsigned ObjIntRegs = 0;
1410    unsigned ObjXMMRegs = 0;
1411
1412    HowToPassFastCCArgument(ObjectVT, NumIntRegs, NumXMMRegs,
1413                            ObjSize, ObjIntRegs, ObjXMMRegs);
1414    if (ObjSize > 4)
1415      ArgIncrement = ObjSize;
1416
1417    unsigned Reg = 0;
1418    SDOperand ArgValue;
1419    if (ObjIntRegs || ObjXMMRegs) {
1420      switch (ObjectVT) {
1421      default: assert(0 && "Unhandled argument type!");
1422      case MVT::i8:
1423        Reg = AddLiveIn(MF, NumIntRegs ? X86::DL : X86::AL,
1424                        X86::GR8RegisterClass);
1425        ArgValue = DAG.getCopyFromReg(Root, Reg, MVT::i8);
1426        break;
1427      case MVT::i16:
1428        Reg = AddLiveIn(MF, NumIntRegs ? X86::DX : X86::AX,
1429                        X86::GR16RegisterClass);
1430        ArgValue = DAG.getCopyFromReg(Root, Reg, MVT::i16);
1431        break;
1432      case MVT::i32:
1433        Reg = AddLiveIn(MF, NumIntRegs ? X86::EDX : X86::EAX,
1434                        X86::GR32RegisterClass);
1435        ArgValue = DAG.getCopyFromReg(Root, Reg, MVT::i32);
1436        break;
1437      case MVT::i64:
1438        Reg = AddLiveIn(MF, NumIntRegs ? X86::EDX : X86::EAX,
1439                        X86::GR32RegisterClass);
1440        ArgValue = DAG.getCopyFromReg(Root, Reg, MVT::i32);
1441        if (ObjIntRegs == 2) {
1442          Reg = AddLiveIn(MF, X86::EDX, X86::GR32RegisterClass);
1443          SDOperand ArgValue2 = DAG.getCopyFromReg(Root, Reg, MVT::i32);
1444          ArgValue= DAG.getNode(ISD::BUILD_PAIR, MVT::i64, ArgValue, ArgValue2);
1445        }
1446        break;
1447      case MVT::v16i8:
1448      case MVT::v8i16:
1449      case MVT::v4i32:
1450      case MVT::v2i64:
1451      case MVT::v4f32:
1452      case MVT::v2f64:
1453        Reg = AddLiveIn(MF, XMMArgRegs[NumXMMRegs], X86::VR128RegisterClass);
1454        ArgValue = DAG.getCopyFromReg(Root, Reg, ObjectVT);
1455        break;
1456      }
1457      NumIntRegs += ObjIntRegs;
1458      NumXMMRegs += ObjXMMRegs;
1459    }
1460
1461    if (ObjSize) {
1462      // XMM arguments have to be aligned on 16-byte boundary.
1463      if (ObjSize == 16)
1464        ArgOffset = ((ArgOffset + 15) / 16) * 16;
1465      // Create the SelectionDAG nodes corresponding to a load from this
1466      // parameter.
1467      int FI = MFI->CreateFixedObject(ObjSize, ArgOffset);
1468      SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy());
1469      if (ObjectVT == MVT::i64 && ObjIntRegs) {
1470        SDOperand ArgValue2 = DAG.getLoad(Op.Val->getValueType(i), Root, FIN,
1471                                          NULL, 0);
1472        ArgValue = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, ArgValue, ArgValue2);
1473      } else
1474        ArgValue = DAG.getLoad(Op.Val->getValueType(i), Root, FIN, NULL, 0);
1475      ArgOffset += ArgIncrement;   // Move on to the next argument.
1476    }
1477
1478    ArgValues.push_back(ArgValue);
1479  }
1480
1481  ArgValues.push_back(Root);
1482
1483  // Make sure the instruction takes 8n+4 bytes to make sure the start of the
1484  // arguments and the arguments after the retaddr has been pushed are aligned.
1485  if ((ArgOffset & 7) == 0)
1486    ArgOffset += 4;
1487
1488  VarArgsFrameIndex = 0xAAAAAAA;   // fastcc functions can't have varargs.
1489  RegSaveFrameIndex = 0xAAAAAAA;   // X86-64 only.
1490  ReturnAddrIndex = 0;             // No return address slot generated yet.
1491  BytesToPopOnReturn = ArgOffset;  // Callee pops all stack arguments.
1492  BytesCallerReserves = 0;
1493
1494  // Finally, inform the code generator which regs we return values in.
1495  switch (getValueType(MF.getFunction()->getReturnType())) {
1496  default: assert(0 && "Unknown type!");
1497  case MVT::isVoid: break;
1498  case MVT::i1:
1499  case MVT::i8:
1500  case MVT::i16:
1501  case MVT::i32:
1502    MF.addLiveOut(X86::EAX);
1503    break;
1504  case MVT::i64:
1505    MF.addLiveOut(X86::EAX);
1506    MF.addLiveOut(X86::EDX);
1507    break;
1508  case MVT::f32:
1509  case MVT::f64:
1510    MF.addLiveOut(X86::ST0);
1511    break;
1512  case MVT::v16i8:
1513  case MVT::v8i16:
1514  case MVT::v4i32:
1515  case MVT::v2i64:
1516  case MVT::v4f32:
1517  case MVT::v2f64:
1518    MF.addLiveOut(X86::XMM0);
1519    break;
1520  }
1521
1522  // Return the new list of results.
1523  std::vector<MVT::ValueType> RetVTs(Op.Val->value_begin(),
1524                                     Op.Val->value_end());
1525  return DAG.getNode(ISD::MERGE_VALUES, RetVTs, &ArgValues[0],ArgValues.size());
1526}
1527
1528SDOperand X86TargetLowering::LowerFastCCCallTo(SDOperand Op, SelectionDAG &DAG,
1529                                               bool isFastCall) {
1530  SDOperand Chain     = Op.getOperand(0);
1531  unsigned CallingConv= cast<ConstantSDNode>(Op.getOperand(1))->getValue();
1532  bool isVarArg       = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
1533  bool isTailCall     = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0;
1534  SDOperand Callee    = Op.getOperand(4);
1535  MVT::ValueType RetVT= Op.Val->getValueType(0);
1536  unsigned NumOps     = (Op.getNumOperands() - 5) / 2;
1537
1538  // Count how many bytes are to be pushed on the stack.
1539  unsigned NumBytes = 0;
1540
1541  // Keep track of the number of integer regs passed so far.  This can be either
1542  // 0 (neither EAX or EDX used), 1 (EAX is used) or 2 (EAX and EDX are both
1543  // used).
1544  unsigned NumIntRegs = 0;
1545  unsigned NumXMMRegs = 0;  // XMM regs used for parameter passing.
1546
1547  static const unsigned GPRArgRegs[][2] = {
1548    { X86::AL,  X86::DL },
1549    { X86::AX,  X86::DX },
1550    { X86::EAX, X86::EDX }
1551  };
1552  static const unsigned FastCallGPRArgRegs[][2] = {
1553    { X86::CL,  X86::DL },
1554    { X86::CX,  X86::DX },
1555    { X86::ECX, X86::EDX }
1556  };
1557  static const unsigned XMMArgRegs[] = {
1558    X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3
1559  };
1560
1561  for (unsigned i = 0; i != NumOps; ++i) {
1562    SDOperand Arg = Op.getOperand(5+2*i);
1563
1564    switch (Arg.getValueType()) {
1565    default: assert(0 && "Unknown value type!");
1566    case MVT::i8:
1567    case MVT::i16:
1568    case MVT::i32: {
1569     unsigned MaxNumIntRegs = (isFastCall ? 2 : FASTCC_NUM_INT_ARGS_INREGS);
1570     if (NumIntRegs < MaxNumIntRegs) {
1571       ++NumIntRegs;
1572       break;
1573     }
1574     } // Fall through
1575    case MVT::f32:
1576      NumBytes += 4;
1577      break;
1578    case MVT::f64:
1579      NumBytes += 8;
1580      break;
1581    case MVT::v16i8:
1582    case MVT::v8i16:
1583    case MVT::v4i32:
1584    case MVT::v2i64:
1585    case MVT::v4f32:
1586    case MVT::v2f64:
1587     if (isFastCall) {
1588      assert(0 && "Unknown value type!");
1589     } else {
1590       if (NumXMMRegs < 4)
1591         NumXMMRegs++;
1592       else {
1593         // XMM arguments have to be aligned on 16-byte boundary.
1594         NumBytes = ((NumBytes + 15) / 16) * 16;
1595         NumBytes += 16;
1596       }
1597     }
1598     break;
1599    }
1600  }
1601
1602  // Make sure the instruction takes 8n+4 bytes to make sure the start of the
1603  // arguments and the arguments after the retaddr has been pushed are aligned.
1604  if ((NumBytes & 7) == 0)
1605    NumBytes += 4;
1606
1607  Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, getPointerTy()));
1608
1609  // Arguments go on the stack in reverse order, as specified by the ABI.
1610  unsigned ArgOffset = 0;
1611  NumIntRegs = 0;
1612  std::vector<std::pair<unsigned, SDOperand> > RegsToPass;
1613  std::vector<SDOperand> MemOpChains;
1614  SDOperand StackPtr = DAG.getRegister(X86StackPtr, getPointerTy());
1615  for (unsigned i = 0; i != NumOps; ++i) {
1616    SDOperand Arg = Op.getOperand(5+2*i);
1617
1618    switch (Arg.getValueType()) {
1619    default: assert(0 && "Unexpected ValueType for argument!");
1620    case MVT::i8:
1621    case MVT::i16:
1622    case MVT::i32: {
1623     unsigned MaxNumIntRegs = (isFastCall ? 2 : FASTCC_NUM_INT_ARGS_INREGS);
1624     if (NumIntRegs < MaxNumIntRegs) {
1625       RegsToPass.push_back(
1626         std::make_pair(GPRArgRegs[Arg.getValueType()-MVT::i8][NumIntRegs],
1627                        Arg));
1628       ++NumIntRegs;
1629       break;
1630     }
1631     } // Fall through
1632    case MVT::f32: {
1633      SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
1634      PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff);
1635      MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0));
1636      ArgOffset += 4;
1637      break;
1638    }
1639    case MVT::f64: {
1640      SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
1641      PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff);
1642      MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0));
1643      ArgOffset += 8;
1644      break;
1645    }
1646    case MVT::v16i8:
1647    case MVT::v8i16:
1648    case MVT::v4i32:
1649    case MVT::v2i64:
1650    case MVT::v4f32:
1651    case MVT::v2f64:
1652     if (isFastCall) {
1653       assert(0 && "Unexpected ValueType for argument!");
1654     } else {
1655       if (NumXMMRegs < 4) {
1656         RegsToPass.push_back(std::make_pair(XMMArgRegs[NumXMMRegs], Arg));
1657         NumXMMRegs++;
1658       } else {
1659         // XMM arguments have to be aligned on 16-byte boundary.
1660         ArgOffset = ((ArgOffset + 15) / 16) * 16;
1661         SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
1662         PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff);
1663         MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0));
1664         ArgOffset += 16;
1665       }
1666     }
1667     break;
1668    }
1669  }
1670
1671  if (!MemOpChains.empty())
1672    Chain = DAG.getNode(ISD::TokenFactor, MVT::Other,
1673                        &MemOpChains[0], MemOpChains.size());
1674
1675  // Build a sequence of copy-to-reg nodes chained together with token chain
1676  // and flag operands which copy the outgoing args into registers.
1677  SDOperand InFlag;
1678  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1679    Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second,
1680                             InFlag);
1681    InFlag = Chain.getValue(1);
1682  }
1683
1684  // If the callee is a GlobalAddress node (quite common, every direct call is)
1685  // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1686  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1687    Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy());
1688  else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee))
1689    Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy());
1690
1691  std::vector<MVT::ValueType> NodeTys;
1692  NodeTys.push_back(MVT::Other);   // Returns a chain
1693  NodeTys.push_back(MVT::Flag);    // Returns a flag for retval copy to use.
1694  std::vector<SDOperand> Ops;
1695  Ops.push_back(Chain);
1696  Ops.push_back(Callee);
1697
1698  // Add argument registers to the end of the list so that they are known live
1699  // into the call.
1700  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1701    Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1702                                  RegsToPass[i].second.getValueType()));
1703
1704  if (InFlag.Val)
1705    Ops.push_back(InFlag);
1706
1707  // FIXME: Do not generate X86ISD::TAILCALL for now.
1708  Chain = DAG.getNode(isTailCall ? X86ISD::TAILCALL : X86ISD::CALL,
1709                      NodeTys, &Ops[0], Ops.size());
1710  InFlag = Chain.getValue(1);
1711
1712  NodeTys.clear();
1713  NodeTys.push_back(MVT::Other);   // Returns a chain
1714  if (RetVT != MVT::Other)
1715    NodeTys.push_back(MVT::Flag);  // Returns a flag for retval copy to use.
1716  Ops.clear();
1717  Ops.push_back(Chain);
1718  Ops.push_back(DAG.getConstant(NumBytes, getPointerTy()));
1719  Ops.push_back(DAG.getConstant(NumBytes, getPointerTy()));
1720  Ops.push_back(InFlag);
1721  Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size());
1722  if (RetVT != MVT::Other)
1723    InFlag = Chain.getValue(1);
1724
1725  std::vector<SDOperand> ResultVals;
1726  NodeTys.clear();
1727  switch (RetVT) {
1728  default: assert(0 && "Unknown value type to return!");
1729  case MVT::Other: break;
1730  case MVT::i8:
1731    Chain = DAG.getCopyFromReg(Chain, X86::AL, MVT::i8, InFlag).getValue(1);
1732    ResultVals.push_back(Chain.getValue(0));
1733    NodeTys.push_back(MVT::i8);
1734    break;
1735  case MVT::i16:
1736    Chain = DAG.getCopyFromReg(Chain, X86::AX, MVT::i16, InFlag).getValue(1);
1737    ResultVals.push_back(Chain.getValue(0));
1738    NodeTys.push_back(MVT::i16);
1739    break;
1740  case MVT::i32:
1741    if (Op.Val->getValueType(1) == MVT::i32) {
1742      Chain = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag).getValue(1);
1743      ResultVals.push_back(Chain.getValue(0));
1744      Chain = DAG.getCopyFromReg(Chain, X86::EDX, MVT::i32,
1745                                 Chain.getValue(2)).getValue(1);
1746      ResultVals.push_back(Chain.getValue(0));
1747      NodeTys.push_back(MVT::i32);
1748    } else {
1749      Chain = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag).getValue(1);
1750      ResultVals.push_back(Chain.getValue(0));
1751    }
1752    NodeTys.push_back(MVT::i32);
1753    break;
1754  case MVT::v16i8:
1755  case MVT::v8i16:
1756  case MVT::v4i32:
1757  case MVT::v2i64:
1758  case MVT::v4f32:
1759  case MVT::v2f64:
1760   if (isFastCall) {
1761     assert(0 && "Unknown value type to return!");
1762   } else {
1763     Chain = DAG.getCopyFromReg(Chain, X86::XMM0, RetVT, InFlag).getValue(1);
1764     ResultVals.push_back(Chain.getValue(0));
1765     NodeTys.push_back(RetVT);
1766   }
1767   break;
1768  case MVT::f32:
1769  case MVT::f64: {
1770    std::vector<MVT::ValueType> Tys;
1771    Tys.push_back(MVT::f64);
1772    Tys.push_back(MVT::Other);
1773    Tys.push_back(MVT::Flag);
1774    std::vector<SDOperand> Ops;
1775    Ops.push_back(Chain);
1776    Ops.push_back(InFlag);
1777    SDOperand RetVal = DAG.getNode(X86ISD::FP_GET_RESULT, Tys,
1778                                   &Ops[0], Ops.size());
1779    Chain  = RetVal.getValue(1);
1780    InFlag = RetVal.getValue(2);
1781    if (X86ScalarSSE) {
1782      // FIXME: Currently the FST is flagged to the FP_GET_RESULT. This
1783      // shouldn't be necessary except that RFP cannot be live across
1784      // multiple blocks. When stackifier is fixed, they can be uncoupled.
1785      MachineFunction &MF = DAG.getMachineFunction();
1786      int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8);
1787      SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
1788      Tys.clear();
1789      Tys.push_back(MVT::Other);
1790      Ops.clear();
1791      Ops.push_back(Chain);
1792      Ops.push_back(RetVal);
1793      Ops.push_back(StackSlot);
1794      Ops.push_back(DAG.getValueType(RetVT));
1795      Ops.push_back(InFlag);
1796      Chain = DAG.getNode(X86ISD::FST, Tys, &Ops[0], Ops.size());
1797      RetVal = DAG.getLoad(RetVT, Chain, StackSlot, NULL, 0);
1798      Chain = RetVal.getValue(1);
1799    }
1800
1801    if (RetVT == MVT::f32 && !X86ScalarSSE)
1802      // FIXME: we would really like to remember that this FP_ROUND
1803      // operation is okay to eliminate if we allow excess FP precision.
1804      RetVal = DAG.getNode(ISD::FP_ROUND, MVT::f32, RetVal);
1805    ResultVals.push_back(RetVal);
1806    NodeTys.push_back(RetVT);
1807    break;
1808  }
1809  }
1810
1811
1812  // If the function returns void, just return the chain.
1813  if (ResultVals.empty())
1814    return Chain;
1815
1816  // Otherwise, merge everything together with a MERGE_VALUES node.
1817  NodeTys.push_back(MVT::Other);
1818  ResultVals.push_back(Chain);
1819  SDOperand Res = DAG.getNode(ISD::MERGE_VALUES, NodeTys,
1820                              &ResultVals[0], ResultVals.size());
1821  return Res.getValue(Op.ResNo);
1822}
1823
1824//===----------------------------------------------------------------------===//
1825//                  StdCall Calling Convention implementation
1826//===----------------------------------------------------------------------===//
1827//  StdCall calling convention seems to be standard for many Windows' API
1828//  routines and around. It differs from C calling convention just a little:
1829//  callee should clean up the stack, not caller. Symbols should be also
1830//  decorated in some fancy way :) It doesn't support any vector arguments.
1831
1832/// HowToPassStdCallCCArgument - Returns how an formal argument of the specified
1833/// type should be passed. Returns the size of the stack slot
1834static void
1835HowToPassStdCallCCArgument(MVT::ValueType ObjectVT, unsigned &ObjSize) {
1836  switch (ObjectVT) {
1837  default: assert(0 && "Unhandled argument type!");
1838  case MVT::i8:  ObjSize = 1; break;
1839  case MVT::i16: ObjSize = 2; break;
1840  case MVT::i32: ObjSize = 4; break;
1841  case MVT::i64: ObjSize = 8; break;
1842  case MVT::f32: ObjSize = 4; break;
1843  case MVT::f64: ObjSize = 8; break;
1844  }
1845}
1846
1847SDOperand X86TargetLowering::LowerStdCallCCArguments(SDOperand Op,
1848                                                     SelectionDAG &DAG) {
1849  unsigned NumArgs = Op.Val->getNumValues() - 1;
1850  MachineFunction &MF = DAG.getMachineFunction();
1851  MachineFrameInfo *MFI = MF.getFrameInfo();
1852  SDOperand Root = Op.getOperand(0);
1853  std::vector<SDOperand> ArgValues;
1854
1855  // Add DAG nodes to load the arguments...  On entry to a function on the X86,
1856  // the stack frame looks like this:
1857  //
1858  // [ESP] -- return address
1859  // [ESP + 4] -- first argument (leftmost lexically)
1860  // [ESP + 8] -- second argument, if first argument is <= 4 bytes in size
1861  //    ...
1862  //
1863  unsigned ArgOffset = 0;   // Frame mechanisms handle retaddr slot
1864  for (unsigned i = 0; i < NumArgs; ++i) {
1865    MVT::ValueType ObjectVT = Op.getValue(i).getValueType();
1866    unsigned ArgIncrement = 4;
1867    unsigned ObjSize = 0;
1868    HowToPassStdCallCCArgument(ObjectVT, ObjSize);
1869    if (ObjSize > 4)
1870      ArgIncrement = ObjSize;
1871
1872    SDOperand ArgValue;
1873    // Create the frame index object for this incoming parameter...
1874    int FI = MFI->CreateFixedObject(ObjSize, ArgOffset);
1875    SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy());
1876    ArgValue = DAG.getLoad(Op.Val->getValueType(i), Root, FIN, NULL, 0);
1877    ArgValues.push_back(ArgValue);
1878    ArgOffset += ArgIncrement;   // Move on to the next argument...
1879  }
1880
1881  ArgValues.push_back(Root);
1882
1883  // If the function takes variable number of arguments, make a frame index for
1884  // the start of the first vararg value... for expansion of llvm.va_start.
1885  bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
1886  if (isVarArg) {
1887    BytesToPopOnReturn = 0;         // Callee pops nothing.
1888    BytesCallerReserves = ArgOffset;
1889    VarArgsFrameIndex = MFI->CreateFixedObject(1, ArgOffset);
1890  } else {
1891    BytesToPopOnReturn = ArgOffset; // Callee pops everything..
1892    BytesCallerReserves = 0;
1893  }
1894  RegSaveFrameIndex = 0xAAAAAAA;    // X86-64 only.
1895  ReturnAddrIndex = 0;              // No return address slot generated yet.
1896
1897  MF.getInfo<X86FunctionInfo>()->setBytesToPopOnReturn(BytesToPopOnReturn);
1898
1899  // Return the new list of results.
1900  std::vector<MVT::ValueType> RetVTs(Op.Val->value_begin(),
1901                                     Op.Val->value_end());
1902  return DAG.getNode(ISD::MERGE_VALUES, RetVTs, &ArgValues[0],ArgValues.size());
1903}
1904
1905
1906SDOperand X86TargetLowering::LowerStdCallCCCallTo(SDOperand Op,
1907                                                  SelectionDAG &DAG) {
1908  SDOperand Chain     = Op.getOperand(0);
1909  unsigned CallingConv= cast<ConstantSDNode>(Op.getOperand(1))->getValue();
1910  bool isVarArg       = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
1911  bool isTailCall     = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0;
1912  SDOperand Callee    = Op.getOperand(4);
1913  MVT::ValueType RetVT= Op.Val->getValueType(0);
1914  unsigned NumOps     = (Op.getNumOperands() - 5) / 2;
1915
1916  // Count how many bytes are to be pushed on the stack.
1917  unsigned NumBytes = 0;
1918  for (unsigned i = 0; i != NumOps; ++i) {
1919    SDOperand Arg = Op.getOperand(5+2*i);
1920
1921    switch (Arg.getValueType()) {
1922    default: assert(0 && "Unexpected ValueType for argument!");
1923    case MVT::i8:
1924    case MVT::i16:
1925    case MVT::i32:
1926    case MVT::f32:
1927      NumBytes += 4;
1928      break;
1929    case MVT::i64:
1930    case MVT::f64:
1931      NumBytes += 8;
1932      break;
1933    }
1934  }
1935
1936  Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, getPointerTy()));
1937
1938  // Arguments go on the stack in reverse order, as specified by the ABI.
1939  unsigned ArgOffset = 0;
1940  std::vector<SDOperand> MemOpChains;
1941  SDOperand StackPtr = DAG.getRegister(X86StackPtr, getPointerTy());
1942  for (unsigned i = 0; i != NumOps; ++i) {
1943    SDOperand Arg = Op.getOperand(5+2*i);
1944
1945    switch (Arg.getValueType()) {
1946    default: assert(0 && "Unexpected ValueType for argument!");
1947    case MVT::i8:
1948    case MVT::i16: {
1949      // Promote the integer to 32 bits.  If the input type is signed use a
1950      // sign extend, otherwise use a zero extend.
1951      unsigned ExtOp =
1952        dyn_cast<ConstantSDNode>(Op.getOperand(5+2*i+1))->getValue() ?
1953        ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
1954      Arg = DAG.getNode(ExtOp, MVT::i32, Arg);
1955    }
1956    // Fallthrough
1957
1958    case MVT::i32:
1959    case MVT::f32: {
1960      SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
1961      PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff);
1962      MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0));
1963      ArgOffset += 4;
1964      break;
1965    }
1966    case MVT::i64:
1967    case MVT::f64: {
1968      SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
1969      PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff);
1970      MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0));
1971      ArgOffset += 8;
1972      break;
1973    }
1974    }
1975  }
1976
1977  if (!MemOpChains.empty())
1978    Chain = DAG.getNode(ISD::TokenFactor, MVT::Other,
1979                        &MemOpChains[0], MemOpChains.size());
1980
1981  // If the callee is a GlobalAddress node (quite common, every direct call is)
1982  // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1983  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1984    Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy());
1985  else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee))
1986    Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy());
1987
1988  std::vector<MVT::ValueType> NodeTys;
1989  NodeTys.push_back(MVT::Other);   // Returns a chain
1990  NodeTys.push_back(MVT::Flag);    // Returns a flag for retval copy to use.
1991  std::vector<SDOperand> Ops;
1992  Ops.push_back(Chain);
1993  Ops.push_back(Callee);
1994
1995  Chain = DAG.getNode(isTailCall ? X86ISD::TAILCALL : X86ISD::CALL,
1996                      NodeTys, &Ops[0], Ops.size());
1997  SDOperand InFlag = Chain.getValue(1);
1998
1999  // Create the CALLSEQ_END node.
2000  unsigned NumBytesForCalleeToPush;
2001
2002  if (isVarArg) {
2003    NumBytesForCalleeToPush = 0;
2004  } else {
2005    NumBytesForCalleeToPush = NumBytes;
2006  }
2007
2008  NodeTys.clear();
2009  NodeTys.push_back(MVT::Other);   // Returns a chain
2010  if (RetVT != MVT::Other)
2011    NodeTys.push_back(MVT::Flag);  // Returns a flag for retval copy to use.
2012  Ops.clear();
2013  Ops.push_back(Chain);
2014  Ops.push_back(DAG.getConstant(NumBytes, getPointerTy()));
2015  Ops.push_back(DAG.getConstant(NumBytesForCalleeToPush, getPointerTy()));
2016  Ops.push_back(InFlag);
2017  Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size());
2018  if (RetVT != MVT::Other)
2019    InFlag = Chain.getValue(1);
2020
2021  std::vector<SDOperand> ResultVals;
2022  NodeTys.clear();
2023  switch (RetVT) {
2024  default: assert(0 && "Unknown value type to return!");
2025  case MVT::Other: break;
2026  case MVT::i8:
2027    Chain = DAG.getCopyFromReg(Chain, X86::AL, MVT::i8, InFlag).getValue(1);
2028    ResultVals.push_back(Chain.getValue(0));
2029    NodeTys.push_back(MVT::i8);
2030    break;
2031  case MVT::i16:
2032    Chain = DAG.getCopyFromReg(Chain, X86::AX, MVT::i16, InFlag).getValue(1);
2033    ResultVals.push_back(Chain.getValue(0));
2034    NodeTys.push_back(MVT::i16);
2035    break;
2036  case MVT::i32:
2037    if (Op.Val->getValueType(1) == MVT::i32) {
2038      Chain = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag).getValue(1);
2039      ResultVals.push_back(Chain.getValue(0));
2040      Chain = DAG.getCopyFromReg(Chain, X86::EDX, MVT::i32,
2041                                 Chain.getValue(2)).getValue(1);
2042      ResultVals.push_back(Chain.getValue(0));
2043      NodeTys.push_back(MVT::i32);
2044    } else {
2045      Chain = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag).getValue(1);
2046      ResultVals.push_back(Chain.getValue(0));
2047    }
2048    NodeTys.push_back(MVT::i32);
2049    break;
2050  case MVT::f32:
2051  case MVT::f64: {
2052    std::vector<MVT::ValueType> Tys;
2053    Tys.push_back(MVT::f64);
2054    Tys.push_back(MVT::Other);
2055    Tys.push_back(MVT::Flag);
2056    std::vector<SDOperand> Ops;
2057    Ops.push_back(Chain);
2058    Ops.push_back(InFlag);
2059    SDOperand RetVal = DAG.getNode(X86ISD::FP_GET_RESULT, Tys,
2060                                   &Ops[0], Ops.size());
2061    Chain  = RetVal.getValue(1);
2062    InFlag = RetVal.getValue(2);
2063    if (X86ScalarSSE) {
2064      // FIXME: Currently the FST is flagged to the FP_GET_RESULT. This
2065      // shouldn't be necessary except that RFP cannot be live across
2066      // multiple blocks. When stackifier is fixed, they can be uncoupled.
2067      MachineFunction &MF = DAG.getMachineFunction();
2068      int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8);
2069      SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
2070      Tys.clear();
2071      Tys.push_back(MVT::Other);
2072      Ops.clear();
2073      Ops.push_back(Chain);
2074      Ops.push_back(RetVal);
2075      Ops.push_back(StackSlot);
2076      Ops.push_back(DAG.getValueType(RetVT));
2077      Ops.push_back(InFlag);
2078      Chain = DAG.getNode(X86ISD::FST, Tys, &Ops[0], Ops.size());
2079      RetVal = DAG.getLoad(RetVT, Chain, StackSlot, NULL, 0);
2080      Chain = RetVal.getValue(1);
2081    }
2082
2083    if (RetVT == MVT::f32 && !X86ScalarSSE)
2084      // FIXME: we would really like to remember that this FP_ROUND
2085      // operation is okay to eliminate if we allow excess FP precision.
2086      RetVal = DAG.getNode(ISD::FP_ROUND, MVT::f32, RetVal);
2087    ResultVals.push_back(RetVal);
2088    NodeTys.push_back(RetVT);
2089    break;
2090  }
2091  }
2092
2093  // If the function returns void, just return the chain.
2094  if (ResultVals.empty())
2095    return Chain;
2096
2097  // Otherwise, merge everything together with a MERGE_VALUES node.
2098  NodeTys.push_back(MVT::Other);
2099  ResultVals.push_back(Chain);
2100  SDOperand Res = DAG.getNode(ISD::MERGE_VALUES, NodeTys,
2101                              &ResultVals[0], ResultVals.size());
2102  return Res.getValue(Op.ResNo);
2103}
2104
2105//===----------------------------------------------------------------------===//
2106//                  FastCall Calling Convention implementation
2107//===----------------------------------------------------------------------===//
2108//
2109// The X86 'fastcall' calling convention passes up to two integer arguments in
2110// registers (an appropriate portion of ECX/EDX), passes arguments in C order,
2111// and requires that the callee pop its arguments off the stack (allowing proper
2112// tail calls), and has the same return value conventions as C calling convs.
2113//
2114// This calling convention always arranges for the callee pop value to be 8n+4
2115// bytes, which is needed for tail recursion elimination and stack alignment
2116// reasons.
2117//
2118
2119/// HowToPassFastCallCCArgument - Returns how an formal argument of the
2120/// specified type should be passed. If it is through stack, returns the size of
2121/// the stack slot; if it is through integer register, returns the number of
2122/// integer registers are needed.
2123static void
2124HowToPassFastCallCCArgument(MVT::ValueType ObjectVT,
2125                            unsigned NumIntRegs,
2126                            unsigned &ObjSize,
2127                            unsigned &ObjIntRegs)
2128{
2129  ObjSize = 0;
2130  ObjIntRegs = 0;
2131
2132  switch (ObjectVT) {
2133  default: assert(0 && "Unhandled argument type!");
2134  case MVT::i8:
2135   if (NumIntRegs < 2)
2136     ObjIntRegs = 1;
2137   else
2138     ObjSize = 1;
2139   break;
2140  case MVT::i16:
2141   if (NumIntRegs < 2)
2142     ObjIntRegs = 1;
2143   else
2144     ObjSize = 2;
2145   break;
2146  case MVT::i32:
2147   if (NumIntRegs < 2)
2148     ObjIntRegs = 1;
2149   else
2150     ObjSize = 4;
2151    break;
2152  case MVT::i64:
2153   if (NumIntRegs+2 <= 2) {
2154     ObjIntRegs = 2;
2155   } else if (NumIntRegs+1 <= 2) {
2156     ObjIntRegs = 1;
2157     ObjSize = 4;
2158   } else
2159     ObjSize = 8;
2160   case MVT::f32:
2161    ObjSize = 4;
2162    break;
2163   case MVT::f64:
2164    ObjSize = 8;
2165    break;
2166  }
2167}
2168
2169SDOperand
2170X86TargetLowering::LowerFastCallCCArguments(SDOperand Op, SelectionDAG &DAG) {
2171  unsigned NumArgs = Op.Val->getNumValues()-1;
2172  MachineFunction &MF = DAG.getMachineFunction();
2173  MachineFrameInfo *MFI = MF.getFrameInfo();
2174  SDOperand Root = Op.getOperand(0);
2175  std::vector<SDOperand> ArgValues;
2176
2177  // Add DAG nodes to load the arguments...  On entry to a function the stack
2178  // frame looks like this:
2179  //
2180  // [ESP] -- return address
2181  // [ESP + 4] -- first nonreg argument (leftmost lexically)
2182  // [ESP + 8] -- second nonreg argument, if 1st argument is <= 4 bytes in size
2183  //    ...
2184  unsigned ArgOffset = 0;   // Frame mechanisms handle retaddr slot
2185
2186  // Keep track of the number of integer regs passed so far.  This can be either
2187  // 0 (neither ECX or EDX used), 1 (ECX is used) or 2 (ECX and EDX are both
2188  // used).
2189  unsigned NumIntRegs = 0;
2190
2191  for (unsigned i = 0; i < NumArgs; ++i) {
2192    MVT::ValueType ObjectVT = Op.getValue(i).getValueType();
2193    unsigned ArgIncrement = 4;
2194    unsigned ObjSize = 0;
2195    unsigned ObjIntRegs = 0;
2196
2197    HowToPassFastCallCCArgument(ObjectVT, NumIntRegs, ObjSize, ObjIntRegs);
2198    if (ObjSize > 4)
2199      ArgIncrement = ObjSize;
2200
2201    unsigned Reg = 0;
2202    SDOperand ArgValue;
2203    if (ObjIntRegs) {
2204      switch (ObjectVT) {
2205      default: assert(0 && "Unhandled argument type!");
2206      case MVT::i8:
2207        Reg = AddLiveIn(MF, NumIntRegs ? X86::DL : X86::CL,
2208                        X86::GR8RegisterClass);
2209        ArgValue = DAG.getCopyFromReg(Root, Reg, MVT::i8);
2210        break;
2211      case MVT::i16:
2212        Reg = AddLiveIn(MF, NumIntRegs ? X86::DX : X86::CX,
2213                        X86::GR16RegisterClass);
2214        ArgValue = DAG.getCopyFromReg(Root, Reg, MVT::i16);
2215        break;
2216      case MVT::i32:
2217        Reg = AddLiveIn(MF, NumIntRegs ? X86::EDX : X86::ECX,
2218                        X86::GR32RegisterClass);
2219        ArgValue = DAG.getCopyFromReg(Root, Reg, MVT::i32);
2220        break;
2221      case MVT::i64:
2222        Reg = AddLiveIn(MF, NumIntRegs ? X86::EDX : X86::ECX,
2223                        X86::GR32RegisterClass);
2224        ArgValue = DAG.getCopyFromReg(Root, Reg, MVT::i32);
2225        if (ObjIntRegs == 2) {
2226          Reg = AddLiveIn(MF, X86::EDX, X86::GR32RegisterClass);
2227          SDOperand ArgValue2 = DAG.getCopyFromReg(Root, Reg, MVT::i32);
2228          ArgValue= DAG.getNode(ISD::BUILD_PAIR, MVT::i64, ArgValue, ArgValue2);
2229        }
2230        break;
2231      }
2232
2233      NumIntRegs += ObjIntRegs;
2234    }
2235
2236    if (ObjSize) {
2237      // Create the SelectionDAG nodes corresponding to a load from this
2238      // parameter.
2239      int FI = MFI->CreateFixedObject(ObjSize, ArgOffset);
2240      SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy());
2241      if (ObjectVT == MVT::i64 && ObjIntRegs) {
2242        SDOperand ArgValue2 = DAG.getLoad(Op.Val->getValueType(i), Root, FIN,
2243                                          NULL, 0);
2244        ArgValue = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, ArgValue, ArgValue2);
2245      } else
2246        ArgValue = DAG.getLoad(Op.Val->getValueType(i), Root, FIN, NULL, 0);
2247      ArgOffset += ArgIncrement;   // Move on to the next argument.
2248    }
2249
2250    ArgValues.push_back(ArgValue);
2251  }
2252
2253  ArgValues.push_back(Root);
2254
2255  // Make sure the instruction takes 8n+4 bytes to make sure the start of the
2256  // arguments and the arguments after the retaddr has been pushed are aligned.
2257  if ((ArgOffset & 7) == 0)
2258    ArgOffset += 4;
2259
2260  VarArgsFrameIndex = 0xAAAAAAA;   // fastcc functions can't have varargs.
2261  RegSaveFrameIndex = 0xAAAAAAA;   // X86-64 only.
2262  ReturnAddrIndex = 0;             // No return address slot generated yet.
2263  BytesToPopOnReturn = ArgOffset;  // Callee pops all stack arguments.
2264  BytesCallerReserves = 0;
2265
2266  MF.getInfo<X86FunctionInfo>()->setBytesToPopOnReturn(BytesToPopOnReturn);
2267
2268  // Finally, inform the code generator which regs we return values in.
2269  switch (getValueType(MF.getFunction()->getReturnType())) {
2270  default: assert(0 && "Unknown type!");
2271  case MVT::isVoid: break;
2272  case MVT::i1:
2273  case MVT::i8:
2274  case MVT::i16:
2275  case MVT::i32:
2276    MF.addLiveOut(X86::ECX);
2277    break;
2278  case MVT::i64:
2279    MF.addLiveOut(X86::ECX);
2280    MF.addLiveOut(X86::EDX);
2281    break;
2282  case MVT::f32:
2283  case MVT::f64:
2284    MF.addLiveOut(X86::ST0);
2285    break;
2286  }
2287
2288  // Return the new list of results.
2289  std::vector<MVT::ValueType> RetVTs(Op.Val->value_begin(),
2290                                     Op.Val->value_end());
2291  return DAG.getNode(ISD::MERGE_VALUES, RetVTs, &ArgValues[0],ArgValues.size());
2292}
2293
2294SDOperand X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) {
2295  if (ReturnAddrIndex == 0) {
2296    // Set up a frame object for the return address.
2297    MachineFunction &MF = DAG.getMachineFunction();
2298    if (Subtarget->is64Bit())
2299      ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(8, -8);
2300    else
2301      ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(4, -4);
2302  }
2303
2304  return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy());
2305}
2306
2307
2308
2309std::pair<SDOperand, SDOperand> X86TargetLowering::
2310LowerFrameReturnAddress(bool isFrameAddress, SDOperand Chain, unsigned Depth,
2311                        SelectionDAG &DAG) {
2312  SDOperand Result;
2313  if (Depth)        // Depths > 0 not supported yet!
2314    Result = DAG.getConstant(0, getPointerTy());
2315  else {
2316    SDOperand RetAddrFI = getReturnAddressFrameIndex(DAG);
2317    if (!isFrameAddress)
2318      // Just load the return address
2319      Result = DAG.getLoad(getPointerTy(), DAG.getEntryNode(), RetAddrFI,
2320                           NULL, 0);
2321    else
2322      Result = DAG.getNode(ISD::SUB, getPointerTy(), RetAddrFI,
2323                           DAG.getConstant(4, getPointerTy()));
2324  }
2325  return std::make_pair(Result, Chain);
2326}
2327
2328/// translateX86CC - do a one to one translation of a ISD::CondCode to the X86
2329/// specific condition code. It returns a false if it cannot do a direct
2330/// translation. X86CC is the translated CondCode.  LHS/RHS are modified as
2331/// needed.
2332static bool translateX86CC(ISD::CondCode SetCCOpcode, bool isFP,
2333                           unsigned &X86CC, SDOperand &LHS, SDOperand &RHS,
2334                           SelectionDAG &DAG) {
2335  X86CC = X86::COND_INVALID;
2336  if (!isFP) {
2337    if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
2338      if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) {
2339        // X > -1   -> X == 0, jump !sign.
2340        RHS = DAG.getConstant(0, RHS.getValueType());
2341        X86CC = X86::COND_NS;
2342        return true;
2343      } else if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) {
2344        // X < 0   -> X == 0, jump on sign.
2345        X86CC = X86::COND_S;
2346        return true;
2347      }
2348    }
2349
2350    switch (SetCCOpcode) {
2351    default: break;
2352    case ISD::SETEQ:  X86CC = X86::COND_E;  break;
2353    case ISD::SETGT:  X86CC = X86::COND_G;  break;
2354    case ISD::SETGE:  X86CC = X86::COND_GE; break;
2355    case ISD::SETLT:  X86CC = X86::COND_L;  break;
2356    case ISD::SETLE:  X86CC = X86::COND_LE; break;
2357    case ISD::SETNE:  X86CC = X86::COND_NE; break;
2358    case ISD::SETULT: X86CC = X86::COND_B;  break;
2359    case ISD::SETUGT: X86CC = X86::COND_A;  break;
2360    case ISD::SETULE: X86CC = X86::COND_BE; break;
2361    case ISD::SETUGE: X86CC = X86::COND_AE; break;
2362    }
2363  } else {
2364    // On a floating point condition, the flags are set as follows:
2365    // ZF  PF  CF   op
2366    //  0 | 0 | 0 | X > Y
2367    //  0 | 0 | 1 | X < Y
2368    //  1 | 0 | 0 | X == Y
2369    //  1 | 1 | 1 | unordered
2370    bool Flip = false;
2371    switch (SetCCOpcode) {
2372    default: break;
2373    case ISD::SETUEQ:
2374    case ISD::SETEQ: X86CC = X86::COND_E;  break;
2375    case ISD::SETOLT: Flip = true; // Fallthrough
2376    case ISD::SETOGT:
2377    case ISD::SETGT: X86CC = X86::COND_A;  break;
2378    case ISD::SETOLE: Flip = true; // Fallthrough
2379    case ISD::SETOGE:
2380    case ISD::SETGE: X86CC = X86::COND_AE; break;
2381    case ISD::SETUGT: Flip = true; // Fallthrough
2382    case ISD::SETULT:
2383    case ISD::SETLT: X86CC = X86::COND_B;  break;
2384    case ISD::SETUGE: Flip = true; // Fallthrough
2385    case ISD::SETULE:
2386    case ISD::SETLE: X86CC = X86::COND_BE; break;
2387    case ISD::SETONE:
2388    case ISD::SETNE: X86CC = X86::COND_NE; break;
2389    case ISD::SETUO: X86CC = X86::COND_P;  break;
2390    case ISD::SETO:  X86CC = X86::COND_NP; break;
2391    }
2392    if (Flip)
2393      std::swap(LHS, RHS);
2394  }
2395
2396  return X86CC != X86::COND_INVALID;
2397}
2398
2399/// hasFPCMov - is there a floating point cmov for the specific X86 condition
2400/// code. Current x86 isa includes the following FP cmov instructions:
2401/// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu.
2402static bool hasFPCMov(unsigned X86CC) {
2403  switch (X86CC) {
2404  default:
2405    return false;
2406  case X86::COND_B:
2407  case X86::COND_BE:
2408  case X86::COND_E:
2409  case X86::COND_P:
2410  case X86::COND_A:
2411  case X86::COND_AE:
2412  case X86::COND_NE:
2413  case X86::COND_NP:
2414    return true;
2415  }
2416}
2417
2418/// DarwinGVRequiresExtraLoad - true if accessing the GV requires an extra
2419/// load. For Darwin, external and weak symbols are indirect, loading the value
2420/// at address GV rather then the value of GV itself. This means that the
2421/// GlobalAddress must be in the base or index register of the address, not the
2422/// GV offset field.
2423static bool DarwinGVRequiresExtraLoad(GlobalValue *GV) {
2424  return (GV->hasWeakLinkage() || GV->hasLinkOnceLinkage() ||
2425          (GV->isExternal() && !GV->hasNotBeenReadFromBytecode()));
2426}
2427
2428/// WindowsGVRequiresExtraLoad - true if accessing the GV requires an extra
2429/// load. For Windows, dllimported symbols are indirect, loading the value at
2430/// address GV rather then the value of GV itself. This means that the
2431/// GlobalAddress must be in the base or index register of the address, not the
2432/// GV offset field.
2433static bool WindowsGVRequiresExtraLoad(GlobalValue *GV) {
2434  return (GV->hasDLLImportLinkage());
2435}
2436
2437/// isUndefOrInRange - Op is either an undef node or a ConstantSDNode.  Return
2438/// true if Op is undef or if its value falls within the specified range (L, H].
2439static bool isUndefOrInRange(SDOperand Op, unsigned Low, unsigned Hi) {
2440  if (Op.getOpcode() == ISD::UNDEF)
2441    return true;
2442
2443  unsigned Val = cast<ConstantSDNode>(Op)->getValue();
2444  return (Val >= Low && Val < Hi);
2445}
2446
2447/// isUndefOrEqual - Op is either an undef node or a ConstantSDNode.  Return
2448/// true if Op is undef or if its value equal to the specified value.
2449static bool isUndefOrEqual(SDOperand Op, unsigned Val) {
2450  if (Op.getOpcode() == ISD::UNDEF)
2451    return true;
2452  return cast<ConstantSDNode>(Op)->getValue() == Val;
2453}
2454
2455/// isPSHUFDMask - Return true if the specified VECTOR_SHUFFLE operand
2456/// specifies a shuffle of elements that is suitable for input to PSHUFD.
2457bool X86::isPSHUFDMask(SDNode *N) {
2458  assert(N->getOpcode() == ISD::BUILD_VECTOR);
2459
2460  if (N->getNumOperands() != 4)
2461    return false;
2462
2463  // Check if the value doesn't reference the second vector.
2464  for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
2465    SDOperand Arg = N->getOperand(i);
2466    if (Arg.getOpcode() == ISD::UNDEF) continue;
2467    assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
2468    if (cast<ConstantSDNode>(Arg)->getValue() >= 4)
2469      return false;
2470  }
2471
2472  return true;
2473}
2474
2475/// isPSHUFHWMask - Return true if the specified VECTOR_SHUFFLE operand
2476/// specifies a shuffle of elements that is suitable for input to PSHUFHW.
2477bool X86::isPSHUFHWMask(SDNode *N) {
2478  assert(N->getOpcode() == ISD::BUILD_VECTOR);
2479
2480  if (N->getNumOperands() != 8)
2481    return false;
2482
2483  // Lower quadword copied in order.
2484  for (unsigned i = 0; i != 4; ++i) {
2485    SDOperand Arg = N->getOperand(i);
2486    if (Arg.getOpcode() == ISD::UNDEF) continue;
2487    assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
2488    if (cast<ConstantSDNode>(Arg)->getValue() != i)
2489      return false;
2490  }
2491
2492  // Upper quadword shuffled.
2493  for (unsigned i = 4; i != 8; ++i) {
2494    SDOperand Arg = N->getOperand(i);
2495    if (Arg.getOpcode() == ISD::UNDEF) continue;
2496    assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
2497    unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
2498    if (Val < 4 || Val > 7)
2499      return false;
2500  }
2501
2502  return true;
2503}
2504
2505/// isPSHUFLWMask - Return true if the specified VECTOR_SHUFFLE operand
2506/// specifies a shuffle of elements that is suitable for input to PSHUFLW.
2507bool X86::isPSHUFLWMask(SDNode *N) {
2508  assert(N->getOpcode() == ISD::BUILD_VECTOR);
2509
2510  if (N->getNumOperands() != 8)
2511    return false;
2512
2513  // Upper quadword copied in order.
2514  for (unsigned i = 4; i != 8; ++i)
2515    if (!isUndefOrEqual(N->getOperand(i), i))
2516      return false;
2517
2518  // Lower quadword shuffled.
2519  for (unsigned i = 0; i != 4; ++i)
2520    if (!isUndefOrInRange(N->getOperand(i), 0, 4))
2521      return false;
2522
2523  return true;
2524}
2525
2526/// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand
2527/// specifies a shuffle of elements that is suitable for input to SHUFP*.
2528static bool isSHUFPMask(std::vector<SDOperand> &N) {
2529  unsigned NumElems = N.size();
2530  if (NumElems != 2 && NumElems != 4) return false;
2531
2532  unsigned Half = NumElems / 2;
2533  for (unsigned i = 0; i < Half; ++i)
2534    if (!isUndefOrInRange(N[i], 0, NumElems))
2535      return false;
2536  for (unsigned i = Half; i < NumElems; ++i)
2537    if (!isUndefOrInRange(N[i], NumElems, NumElems*2))
2538      return false;
2539
2540  return true;
2541}
2542
2543bool X86::isSHUFPMask(SDNode *N) {
2544  assert(N->getOpcode() == ISD::BUILD_VECTOR);
2545  std::vector<SDOperand> Ops(N->op_begin(), N->op_end());
2546  return ::isSHUFPMask(Ops);
2547}
2548
2549/// isCommutedSHUFP - Returns true if the shuffle mask is except
2550/// the reverse of what x86 shuffles want. x86 shuffles requires the lower
2551/// half elements to come from vector 1 (which would equal the dest.) and
2552/// the upper half to come from vector 2.
2553static bool isCommutedSHUFP(std::vector<SDOperand> &Ops) {
2554  unsigned NumElems = Ops.size();
2555  if (NumElems != 2 && NumElems != 4) return false;
2556
2557  unsigned Half = NumElems / 2;
2558  for (unsigned i = 0; i < Half; ++i)
2559    if (!isUndefOrInRange(Ops[i], NumElems, NumElems*2))
2560      return false;
2561  for (unsigned i = Half; i < NumElems; ++i)
2562    if (!isUndefOrInRange(Ops[i], 0, NumElems))
2563      return false;
2564  return true;
2565}
2566
2567static bool isCommutedSHUFP(SDNode *N) {
2568  assert(N->getOpcode() == ISD::BUILD_VECTOR);
2569  std::vector<SDOperand> Ops(N->op_begin(), N->op_end());
2570  return isCommutedSHUFP(Ops);
2571}
2572
2573/// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand
2574/// specifies a shuffle of elements that is suitable for input to MOVHLPS.
2575bool X86::isMOVHLPSMask(SDNode *N) {
2576  assert(N->getOpcode() == ISD::BUILD_VECTOR);
2577
2578  if (N->getNumOperands() != 4)
2579    return false;
2580
2581  // Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3
2582  return isUndefOrEqual(N->getOperand(0), 6) &&
2583         isUndefOrEqual(N->getOperand(1), 7) &&
2584         isUndefOrEqual(N->getOperand(2), 2) &&
2585         isUndefOrEqual(N->getOperand(3), 3);
2586}
2587
2588/// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand
2589/// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}.
2590bool X86::isMOVLPMask(SDNode *N) {
2591  assert(N->getOpcode() == ISD::BUILD_VECTOR);
2592
2593  unsigned NumElems = N->getNumOperands();
2594  if (NumElems != 2 && NumElems != 4)
2595    return false;
2596
2597  for (unsigned i = 0; i < NumElems/2; ++i)
2598    if (!isUndefOrEqual(N->getOperand(i), i + NumElems))
2599      return false;
2600
2601  for (unsigned i = NumElems/2; i < NumElems; ++i)
2602    if (!isUndefOrEqual(N->getOperand(i), i))
2603      return false;
2604
2605  return true;
2606}
2607
2608/// isMOVHPMask - Return true if the specified VECTOR_SHUFFLE operand
2609/// specifies a shuffle of elements that is suitable for input to MOVHP{S|D}
2610/// and MOVLHPS.
2611bool X86::isMOVHPMask(SDNode *N) {
2612  assert(N->getOpcode() == ISD::BUILD_VECTOR);
2613
2614  unsigned NumElems = N->getNumOperands();
2615  if (NumElems != 2 && NumElems != 4)
2616    return false;
2617
2618  for (unsigned i = 0; i < NumElems/2; ++i)
2619    if (!isUndefOrEqual(N->getOperand(i), i))
2620      return false;
2621
2622  for (unsigned i = 0; i < NumElems/2; ++i) {
2623    SDOperand Arg = N->getOperand(i + NumElems/2);
2624    if (!isUndefOrEqual(Arg, i + NumElems))
2625      return false;
2626  }
2627
2628  return true;
2629}
2630
2631/// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand
2632/// specifies a shuffle of elements that is suitable for input to UNPCKL.
2633bool static isUNPCKLMask(std::vector<SDOperand> &N, bool V2IsSplat = false) {
2634  unsigned NumElems = N.size();
2635  if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16)
2636    return false;
2637
2638  for (unsigned i = 0, j = 0; i != NumElems; i += 2, ++j) {
2639    SDOperand BitI  = N[i];
2640    SDOperand BitI1 = N[i+1];
2641    if (!isUndefOrEqual(BitI, j))
2642      return false;
2643    if (V2IsSplat) {
2644      if (isUndefOrEqual(BitI1, NumElems))
2645        return false;
2646    } else {
2647      if (!isUndefOrEqual(BitI1, j + NumElems))
2648        return false;
2649    }
2650  }
2651
2652  return true;
2653}
2654
2655bool X86::isUNPCKLMask(SDNode *N, bool V2IsSplat) {
2656  assert(N->getOpcode() == ISD::BUILD_VECTOR);
2657  std::vector<SDOperand> Ops(N->op_begin(), N->op_end());
2658  return ::isUNPCKLMask(Ops, V2IsSplat);
2659}
2660
2661/// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand
2662/// specifies a shuffle of elements that is suitable for input to UNPCKH.
2663bool static isUNPCKHMask(std::vector<SDOperand> &N, bool V2IsSplat = false) {
2664  unsigned NumElems = N.size();
2665  if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16)
2666    return false;
2667
2668  for (unsigned i = 0, j = 0; i != NumElems; i += 2, ++j) {
2669    SDOperand BitI  = N[i];
2670    SDOperand BitI1 = N[i+1];
2671    if (!isUndefOrEqual(BitI, j + NumElems/2))
2672      return false;
2673    if (V2IsSplat) {
2674      if (isUndefOrEqual(BitI1, NumElems))
2675        return false;
2676    } else {
2677      if (!isUndefOrEqual(BitI1, j + NumElems/2 + NumElems))
2678        return false;
2679    }
2680  }
2681
2682  return true;
2683}
2684
2685bool X86::isUNPCKHMask(SDNode *N, bool V2IsSplat) {
2686  assert(N->getOpcode() == ISD::BUILD_VECTOR);
2687  std::vector<SDOperand> Ops(N->op_begin(), N->op_end());
2688  return ::isUNPCKHMask(Ops, V2IsSplat);
2689}
2690
2691/// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form
2692/// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef,
2693/// <0, 0, 1, 1>
2694bool X86::isUNPCKL_v_undef_Mask(SDNode *N) {
2695  assert(N->getOpcode() == ISD::BUILD_VECTOR);
2696
2697  unsigned NumElems = N->getNumOperands();
2698  if (NumElems != 4 && NumElems != 8 && NumElems != 16)
2699    return false;
2700
2701  for (unsigned i = 0, j = 0; i != NumElems; i += 2, ++j) {
2702    SDOperand BitI  = N->getOperand(i);
2703    SDOperand BitI1 = N->getOperand(i+1);
2704
2705    if (!isUndefOrEqual(BitI, j))
2706      return false;
2707    if (!isUndefOrEqual(BitI1, j))
2708      return false;
2709  }
2710
2711  return true;
2712}
2713
2714/// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand
2715/// specifies a shuffle of elements that is suitable for input to MOVSS,
2716/// MOVSD, and MOVD, i.e. setting the lowest element.
2717static bool isMOVLMask(std::vector<SDOperand> &N) {
2718  unsigned NumElems = N.size();
2719  if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16)
2720    return false;
2721
2722  if (!isUndefOrEqual(N[0], NumElems))
2723    return false;
2724
2725  for (unsigned i = 1; i < NumElems; ++i) {
2726    SDOperand Arg = N[i];
2727    if (!isUndefOrEqual(Arg, i))
2728      return false;
2729  }
2730
2731  return true;
2732}
2733
2734bool X86::isMOVLMask(SDNode *N) {
2735  assert(N->getOpcode() == ISD::BUILD_VECTOR);
2736  std::vector<SDOperand> Ops(N->op_begin(), N->op_end());
2737  return ::isMOVLMask(Ops);
2738}
2739
2740/// isCommutedMOVL - Returns true if the shuffle mask is except the reverse
2741/// of what x86 movss want. X86 movs requires the lowest  element to be lowest
2742/// element of vector 2 and the other elements to come from vector 1 in order.
2743static bool isCommutedMOVL(std::vector<SDOperand> &Ops, bool V2IsSplat = false,
2744                           bool V2IsUndef = false) {
2745  unsigned NumElems = Ops.size();
2746  if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16)
2747    return false;
2748
2749  if (!isUndefOrEqual(Ops[0], 0))
2750    return false;
2751
2752  for (unsigned i = 1; i < NumElems; ++i) {
2753    SDOperand Arg = Ops[i];
2754    if (!(isUndefOrEqual(Arg, i+NumElems) ||
2755          (V2IsUndef && isUndefOrInRange(Arg, NumElems, NumElems*2)) ||
2756          (V2IsSplat && isUndefOrEqual(Arg, NumElems))))
2757      return false;
2758  }
2759
2760  return true;
2761}
2762
2763static bool isCommutedMOVL(SDNode *N, bool V2IsSplat = false,
2764                           bool V2IsUndef = false) {
2765  assert(N->getOpcode() == ISD::BUILD_VECTOR);
2766  std::vector<SDOperand> Ops(N->op_begin(), N->op_end());
2767  return isCommutedMOVL(Ops, V2IsSplat, V2IsUndef);
2768}
2769
2770/// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand
2771/// specifies a shuffle of elements that is suitable for input to MOVSHDUP.
2772bool X86::isMOVSHDUPMask(SDNode *N) {
2773  assert(N->getOpcode() == ISD::BUILD_VECTOR);
2774
2775  if (N->getNumOperands() != 4)
2776    return false;
2777
2778  // Expect 1, 1, 3, 3
2779  for (unsigned i = 0; i < 2; ++i) {
2780    SDOperand Arg = N->getOperand(i);
2781    if (Arg.getOpcode() == ISD::UNDEF) continue;
2782    assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
2783    unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
2784    if (Val != 1) return false;
2785  }
2786
2787  bool HasHi = false;
2788  for (unsigned i = 2; i < 4; ++i) {
2789    SDOperand Arg = N->getOperand(i);
2790    if (Arg.getOpcode() == ISD::UNDEF) continue;
2791    assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
2792    unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
2793    if (Val != 3) return false;
2794    HasHi = true;
2795  }
2796
2797  // Don't use movshdup if it can be done with a shufps.
2798  return HasHi;
2799}
2800
2801/// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand
2802/// specifies a shuffle of elements that is suitable for input to MOVSLDUP.
2803bool X86::isMOVSLDUPMask(SDNode *N) {
2804  assert(N->getOpcode() == ISD::BUILD_VECTOR);
2805
2806  if (N->getNumOperands() != 4)
2807    return false;
2808
2809  // Expect 0, 0, 2, 2
2810  for (unsigned i = 0; i < 2; ++i) {
2811    SDOperand Arg = N->getOperand(i);
2812    if (Arg.getOpcode() == ISD::UNDEF) continue;
2813    assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
2814    unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
2815    if (Val != 0) return false;
2816  }
2817
2818  bool HasHi = false;
2819  for (unsigned i = 2; i < 4; ++i) {
2820    SDOperand Arg = N->getOperand(i);
2821    if (Arg.getOpcode() == ISD::UNDEF) continue;
2822    assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
2823    unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
2824    if (Val != 2) return false;
2825    HasHi = true;
2826  }
2827
2828  // Don't use movshdup if it can be done with a shufps.
2829  return HasHi;
2830}
2831
2832/// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand specifies
2833/// a splat of a single element.
2834static bool isSplatMask(SDNode *N) {
2835  assert(N->getOpcode() == ISD::BUILD_VECTOR);
2836
2837  // This is a splat operation if each element of the permute is the same, and
2838  // if the value doesn't reference the second vector.
2839  unsigned NumElems = N->getNumOperands();
2840  SDOperand ElementBase;
2841  unsigned i = 0;
2842  for (; i != NumElems; ++i) {
2843    SDOperand Elt = N->getOperand(i);
2844    if (ConstantSDNode *EltV = dyn_cast<ConstantSDNode>(Elt)) {
2845      ElementBase = Elt;
2846      break;
2847    }
2848  }
2849
2850  if (!ElementBase.Val)
2851    return false;
2852
2853  for (; i != NumElems; ++i) {
2854    SDOperand Arg = N->getOperand(i);
2855    if (Arg.getOpcode() == ISD::UNDEF) continue;
2856    assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
2857    if (Arg != ElementBase) return false;
2858  }
2859
2860  // Make sure it is a splat of the first vector operand.
2861  return cast<ConstantSDNode>(ElementBase)->getValue() < NumElems;
2862}
2863
2864/// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand specifies
2865/// a splat of a single element and it's a 2 or 4 element mask.
2866bool X86::isSplatMask(SDNode *N) {
2867  assert(N->getOpcode() == ISD::BUILD_VECTOR);
2868
2869  // We can only splat 64-bit, and 32-bit quantities with a single instruction.
2870  if (N->getNumOperands() != 4 && N->getNumOperands() != 2)
2871    return false;
2872  return ::isSplatMask(N);
2873}
2874
2875/// isSplatLoMask - Return true if the specified VECTOR_SHUFFLE operand
2876/// specifies a splat of zero element.
2877bool X86::isSplatLoMask(SDNode *N) {
2878  assert(N->getOpcode() == ISD::BUILD_VECTOR);
2879
2880  for (unsigned i = 0, e = N->getNumOperands(); i < e; ++i)
2881    if (!isUndefOrEqual(N->getOperand(i), 0))
2882      return false;
2883  return true;
2884}
2885
2886/// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle
2887/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUF* and SHUFP*
2888/// instructions.
2889unsigned X86::getShuffleSHUFImmediate(SDNode *N) {
2890  unsigned NumOperands = N->getNumOperands();
2891  unsigned Shift = (NumOperands == 4) ? 2 : 1;
2892  unsigned Mask = 0;
2893  for (unsigned i = 0; i < NumOperands; ++i) {
2894    unsigned Val = 0;
2895    SDOperand Arg = N->getOperand(NumOperands-i-1);
2896    if (Arg.getOpcode() != ISD::UNDEF)
2897      Val = cast<ConstantSDNode>(Arg)->getValue();
2898    if (Val >= NumOperands) Val -= NumOperands;
2899    Mask |= Val;
2900    if (i != NumOperands - 1)
2901      Mask <<= Shift;
2902  }
2903
2904  return Mask;
2905}
2906
2907/// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle
2908/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFHW
2909/// instructions.
2910unsigned X86::getShufflePSHUFHWImmediate(SDNode *N) {
2911  unsigned Mask = 0;
2912  // 8 nodes, but we only care about the last 4.
2913  for (unsigned i = 7; i >= 4; --i) {
2914    unsigned Val = 0;
2915    SDOperand Arg = N->getOperand(i);
2916    if (Arg.getOpcode() != ISD::UNDEF)
2917      Val = cast<ConstantSDNode>(Arg)->getValue();
2918    Mask |= (Val - 4);
2919    if (i != 4)
2920      Mask <<= 2;
2921  }
2922
2923  return Mask;
2924}
2925
2926/// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle
2927/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFLW
2928/// instructions.
2929unsigned X86::getShufflePSHUFLWImmediate(SDNode *N) {
2930  unsigned Mask = 0;
2931  // 8 nodes, but we only care about the first 4.
2932  for (int i = 3; i >= 0; --i) {
2933    unsigned Val = 0;
2934    SDOperand Arg = N->getOperand(i);
2935    if (Arg.getOpcode() != ISD::UNDEF)
2936      Val = cast<ConstantSDNode>(Arg)->getValue();
2937    Mask |= Val;
2938    if (i != 0)
2939      Mask <<= 2;
2940  }
2941
2942  return Mask;
2943}
2944
2945/// isPSHUFHW_PSHUFLWMask - true if the specified VECTOR_SHUFFLE operand
2946/// specifies a 8 element shuffle that can be broken into a pair of
2947/// PSHUFHW and PSHUFLW.
2948static bool isPSHUFHW_PSHUFLWMask(SDNode *N) {
2949  assert(N->getOpcode() == ISD::BUILD_VECTOR);
2950
2951  if (N->getNumOperands() != 8)
2952    return false;
2953
2954  // Lower quadword shuffled.
2955  for (unsigned i = 0; i != 4; ++i) {
2956    SDOperand Arg = N->getOperand(i);
2957    if (Arg.getOpcode() == ISD::UNDEF) continue;
2958    assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
2959    unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
2960    if (Val > 4)
2961      return false;
2962  }
2963
2964  // Upper quadword shuffled.
2965  for (unsigned i = 4; i != 8; ++i) {
2966    SDOperand Arg = N->getOperand(i);
2967    if (Arg.getOpcode() == ISD::UNDEF) continue;
2968    assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
2969    unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
2970    if (Val < 4 || Val > 7)
2971      return false;
2972  }
2973
2974  return true;
2975}
2976
2977/// CommuteVectorShuffle - Swap vector_shuffle operandsas well as
2978/// values in ther permute mask.
2979static SDOperand CommuteVectorShuffle(SDOperand Op, SDOperand &V1,
2980                                      SDOperand &V2, SDOperand &Mask,
2981                                      SelectionDAG &DAG) {
2982  MVT::ValueType VT = Op.getValueType();
2983  MVT::ValueType MaskVT = Mask.getValueType();
2984  MVT::ValueType EltVT = MVT::getVectorBaseType(MaskVT);
2985  unsigned NumElems = Mask.getNumOperands();
2986  std::vector<SDOperand> MaskVec;
2987
2988  for (unsigned i = 0; i != NumElems; ++i) {
2989    SDOperand Arg = Mask.getOperand(i);
2990    if (Arg.getOpcode() == ISD::UNDEF) {
2991      MaskVec.push_back(DAG.getNode(ISD::UNDEF, EltVT));
2992      continue;
2993    }
2994    assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
2995    unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
2996    if (Val < NumElems)
2997      MaskVec.push_back(DAG.getConstant(Val + NumElems, EltVT));
2998    else
2999      MaskVec.push_back(DAG.getConstant(Val - NumElems, EltVT));
3000  }
3001
3002  std::swap(V1, V2);
3003  Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size());
3004  return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask);
3005}
3006
3007/// ShouldXformToMOVHLPS - Return true if the node should be transformed to
3008/// match movhlps. The lower half elements should come from upper half of
3009/// V1 (and in order), and the upper half elements should come from the upper
3010/// half of V2 (and in order).
3011static bool ShouldXformToMOVHLPS(SDNode *Mask) {
3012  unsigned NumElems = Mask->getNumOperands();
3013  if (NumElems != 4)
3014    return false;
3015  for (unsigned i = 0, e = 2; i != e; ++i)
3016    if (!isUndefOrEqual(Mask->getOperand(i), i+2))
3017      return false;
3018  for (unsigned i = 2; i != 4; ++i)
3019    if (!isUndefOrEqual(Mask->getOperand(i), i+4))
3020      return false;
3021  return true;
3022}
3023
3024/// isScalarLoadToVector - Returns true if the node is a scalar load that
3025/// is promoted to a vector.
3026static inline bool isScalarLoadToVector(SDNode *N) {
3027  if (N->getOpcode() == ISD::SCALAR_TO_VECTOR) {
3028    N = N->getOperand(0).Val;
3029    return ISD::isNON_EXTLoad(N);
3030  }
3031  return false;
3032}
3033
3034/// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to
3035/// match movlp{s|d}. The lower half elements should come from lower half of
3036/// V1 (and in order), and the upper half elements should come from the upper
3037/// half of V2 (and in order). And since V1 will become the source of the
3038/// MOVLP, it must be either a vector load or a scalar load to vector.
3039static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2, SDNode *Mask) {
3040  if (!ISD::isNON_EXTLoad(V1) && !isScalarLoadToVector(V1))
3041    return false;
3042  // Is V2 is a vector load, don't do this transformation. We will try to use
3043  // load folding shufps op.
3044  if (ISD::isNON_EXTLoad(V2))
3045    return false;
3046
3047  unsigned NumElems = Mask->getNumOperands();
3048  if (NumElems != 2 && NumElems != 4)
3049    return false;
3050  for (unsigned i = 0, e = NumElems/2; i != e; ++i)
3051    if (!isUndefOrEqual(Mask->getOperand(i), i))
3052      return false;
3053  for (unsigned i = NumElems/2; i != NumElems; ++i)
3054    if (!isUndefOrEqual(Mask->getOperand(i), i+NumElems))
3055      return false;
3056  return true;
3057}
3058
3059/// isSplatVector - Returns true if N is a BUILD_VECTOR node whose elements are
3060/// all the same.
3061static bool isSplatVector(SDNode *N) {
3062  if (N->getOpcode() != ISD::BUILD_VECTOR)
3063    return false;
3064
3065  SDOperand SplatValue = N->getOperand(0);
3066  for (unsigned i = 1, e = N->getNumOperands(); i != e; ++i)
3067    if (N->getOperand(i) != SplatValue)
3068      return false;
3069  return true;
3070}
3071
3072/// isUndefShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved
3073/// to an undef.
3074static bool isUndefShuffle(SDNode *N) {
3075  if (N->getOpcode() != ISD::BUILD_VECTOR)
3076    return false;
3077
3078  SDOperand V1 = N->getOperand(0);
3079  SDOperand V2 = N->getOperand(1);
3080  SDOperand Mask = N->getOperand(2);
3081  unsigned NumElems = Mask.getNumOperands();
3082  for (unsigned i = 0; i != NumElems; ++i) {
3083    SDOperand Arg = Mask.getOperand(i);
3084    if (Arg.getOpcode() != ISD::UNDEF) {
3085      unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
3086      if (Val < NumElems && V1.getOpcode() != ISD::UNDEF)
3087        return false;
3088      else if (Val >= NumElems && V2.getOpcode() != ISD::UNDEF)
3089        return false;
3090    }
3091  }
3092  return true;
3093}
3094
3095/// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements
3096/// that point to V2 points to its first element.
3097static SDOperand NormalizeMask(SDOperand Mask, SelectionDAG &DAG) {
3098  assert(Mask.getOpcode() == ISD::BUILD_VECTOR);
3099
3100  bool Changed = false;
3101  std::vector<SDOperand> MaskVec;
3102  unsigned NumElems = Mask.getNumOperands();
3103  for (unsigned i = 0; i != NumElems; ++i) {
3104    SDOperand Arg = Mask.getOperand(i);
3105    if (Arg.getOpcode() != ISD::UNDEF) {
3106      unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
3107      if (Val > NumElems) {
3108        Arg = DAG.getConstant(NumElems, Arg.getValueType());
3109        Changed = true;
3110      }
3111    }
3112    MaskVec.push_back(Arg);
3113  }
3114
3115  if (Changed)
3116    Mask = DAG.getNode(ISD::BUILD_VECTOR, Mask.getValueType(),
3117                       &MaskVec[0], MaskVec.size());
3118  return Mask;
3119}
3120
3121/// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd
3122/// operation of specified width.
3123static SDOperand getMOVLMask(unsigned NumElems, SelectionDAG &DAG) {
3124  MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
3125  MVT::ValueType BaseVT = MVT::getVectorBaseType(MaskVT);
3126
3127  std::vector<SDOperand> MaskVec;
3128  MaskVec.push_back(DAG.getConstant(NumElems, BaseVT));
3129  for (unsigned i = 1; i != NumElems; ++i)
3130    MaskVec.push_back(DAG.getConstant(i, BaseVT));
3131  return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size());
3132}
3133
3134/// getUnpacklMask - Returns a vector_shuffle mask for an unpackl operation
3135/// of specified width.
3136static SDOperand getUnpacklMask(unsigned NumElems, SelectionDAG &DAG) {
3137  MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
3138  MVT::ValueType BaseVT = MVT::getVectorBaseType(MaskVT);
3139  std::vector<SDOperand> MaskVec;
3140  for (unsigned i = 0, e = NumElems/2; i != e; ++i) {
3141    MaskVec.push_back(DAG.getConstant(i,            BaseVT));
3142    MaskVec.push_back(DAG.getConstant(i + NumElems, BaseVT));
3143  }
3144  return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size());
3145}
3146
3147/// getUnpackhMask - Returns a vector_shuffle mask for an unpackh operation
3148/// of specified width.
3149static SDOperand getUnpackhMask(unsigned NumElems, SelectionDAG &DAG) {
3150  MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
3151  MVT::ValueType BaseVT = MVT::getVectorBaseType(MaskVT);
3152  unsigned Half = NumElems/2;
3153  std::vector<SDOperand> MaskVec;
3154  for (unsigned i = 0; i != Half; ++i) {
3155    MaskVec.push_back(DAG.getConstant(i + Half,            BaseVT));
3156    MaskVec.push_back(DAG.getConstant(i + NumElems + Half, BaseVT));
3157  }
3158  return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size());
3159}
3160
3161/// getZeroVector - Returns a vector of specified type with all zero elements.
3162///
3163static SDOperand getZeroVector(MVT::ValueType VT, SelectionDAG &DAG) {
3164  assert(MVT::isVector(VT) && "Expected a vector type");
3165  unsigned NumElems = getVectorNumElements(VT);
3166  MVT::ValueType EVT = MVT::getVectorBaseType(VT);
3167  bool isFP = MVT::isFloatingPoint(EVT);
3168  SDOperand Zero = isFP ? DAG.getConstantFP(0.0, EVT) : DAG.getConstant(0, EVT);
3169  std::vector<SDOperand> ZeroVec(NumElems, Zero);
3170  return DAG.getNode(ISD::BUILD_VECTOR, VT, &ZeroVec[0], ZeroVec.size());
3171}
3172
3173/// PromoteSplat - Promote a splat of v8i16 or v16i8 to v4i32.
3174///
3175static SDOperand PromoteSplat(SDOperand Op, SelectionDAG &DAG) {
3176  SDOperand V1 = Op.getOperand(0);
3177  SDOperand Mask = Op.getOperand(2);
3178  MVT::ValueType VT = Op.getValueType();
3179  unsigned NumElems = Mask.getNumOperands();
3180  Mask = getUnpacklMask(NumElems, DAG);
3181  while (NumElems != 4) {
3182    V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V1, Mask);
3183    NumElems >>= 1;
3184  }
3185  V1 = DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, V1);
3186
3187  MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4);
3188  Mask = getZeroVector(MaskVT, DAG);
3189  SDOperand Shuffle = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v4i32, V1,
3190                                  DAG.getNode(ISD::UNDEF, MVT::v4i32), Mask);
3191  return DAG.getNode(ISD::BIT_CONVERT, VT, Shuffle);
3192}
3193
3194/// isZeroNode - Returns true if Elt is a constant zero or a floating point
3195/// constant +0.0.
3196static inline bool isZeroNode(SDOperand Elt) {
3197  return ((isa<ConstantSDNode>(Elt) &&
3198           cast<ConstantSDNode>(Elt)->getValue() == 0) ||
3199          (isa<ConstantFPSDNode>(Elt) &&
3200           cast<ConstantFPSDNode>(Elt)->isExactlyValue(0.0)));
3201}
3202
3203/// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified
3204/// vector and zero or undef vector.
3205static SDOperand getShuffleVectorZeroOrUndef(SDOperand V2, MVT::ValueType VT,
3206                                             unsigned NumElems, unsigned Idx,
3207                                             bool isZero, SelectionDAG &DAG) {
3208  SDOperand V1 = isZero ? getZeroVector(VT, DAG) : DAG.getNode(ISD::UNDEF, VT);
3209  MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
3210  MVT::ValueType EVT = MVT::getVectorBaseType(MaskVT);
3211  SDOperand Zero = DAG.getConstant(0, EVT);
3212  std::vector<SDOperand> MaskVec(NumElems, Zero);
3213  MaskVec[Idx] = DAG.getConstant(NumElems, EVT);
3214  SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
3215                               &MaskVec[0], MaskVec.size());
3216  return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask);
3217}
3218
3219/// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8.
3220///
3221static SDOperand LowerBuildVectorv16i8(SDOperand Op, unsigned NonZeros,
3222                                       unsigned NumNonZero, unsigned NumZero,
3223                                       SelectionDAG &DAG, TargetLowering &TLI) {
3224  if (NumNonZero > 8)
3225    return SDOperand();
3226
3227  SDOperand V(0, 0);
3228  bool First = true;
3229  for (unsigned i = 0; i < 16; ++i) {
3230    bool ThisIsNonZero = (NonZeros & (1 << i)) != 0;
3231    if (ThisIsNonZero && First) {
3232      if (NumZero)
3233        V = getZeroVector(MVT::v8i16, DAG);
3234      else
3235        V = DAG.getNode(ISD::UNDEF, MVT::v8i16);
3236      First = false;
3237    }
3238
3239    if ((i & 1) != 0) {
3240      SDOperand ThisElt(0, 0), LastElt(0, 0);
3241      bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0;
3242      if (LastIsNonZero) {
3243        LastElt = DAG.getNode(ISD::ZERO_EXTEND, MVT::i16, Op.getOperand(i-1));
3244      }
3245      if (ThisIsNonZero) {
3246        ThisElt = DAG.getNode(ISD::ZERO_EXTEND, MVT::i16, Op.getOperand(i));
3247        ThisElt = DAG.getNode(ISD::SHL, MVT::i16,
3248                              ThisElt, DAG.getConstant(8, MVT::i8));
3249        if (LastIsNonZero)
3250          ThisElt = DAG.getNode(ISD::OR, MVT::i16, ThisElt, LastElt);
3251      } else
3252        ThisElt = LastElt;
3253
3254      if (ThisElt.Val)
3255        V = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, V, ThisElt,
3256                        DAG.getConstant(i/2, TLI.getPointerTy()));
3257    }
3258  }
3259
3260  return DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, V);
3261}
3262
3263/// LowerBuildVectorv16i8 - Custom lower build_vector of v8i16.
3264///
3265static SDOperand LowerBuildVectorv8i16(SDOperand Op, unsigned NonZeros,
3266                                       unsigned NumNonZero, unsigned NumZero,
3267                                       SelectionDAG &DAG, TargetLowering &TLI) {
3268  if (NumNonZero > 4)
3269    return SDOperand();
3270
3271  SDOperand V(0, 0);
3272  bool First = true;
3273  for (unsigned i = 0; i < 8; ++i) {
3274    bool isNonZero = (NonZeros & (1 << i)) != 0;
3275    if (isNonZero) {
3276      if (First) {
3277        if (NumZero)
3278          V = getZeroVector(MVT::v8i16, DAG);
3279        else
3280          V = DAG.getNode(ISD::UNDEF, MVT::v8i16);
3281        First = false;
3282      }
3283      V = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, V, Op.getOperand(i),
3284                      DAG.getConstant(i, TLI.getPointerTy()));
3285    }
3286  }
3287
3288  return V;
3289}
3290
3291SDOperand
3292X86TargetLowering::LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) {
3293  // All zero's are handled with pxor.
3294  if (ISD::isBuildVectorAllZeros(Op.Val))
3295    return Op;
3296
3297  // All one's are handled with pcmpeqd.
3298  if (ISD::isBuildVectorAllOnes(Op.Val))
3299    return Op;
3300
3301  MVT::ValueType VT = Op.getValueType();
3302  MVT::ValueType EVT = MVT::getVectorBaseType(VT);
3303  unsigned EVTBits = MVT::getSizeInBits(EVT);
3304
3305  unsigned NumElems = Op.getNumOperands();
3306  unsigned NumZero  = 0;
3307  unsigned NumNonZero = 0;
3308  unsigned NonZeros = 0;
3309  std::set<SDOperand> Values;
3310  for (unsigned i = 0; i < NumElems; ++i) {
3311    SDOperand Elt = Op.getOperand(i);
3312    if (Elt.getOpcode() != ISD::UNDEF) {
3313      Values.insert(Elt);
3314      if (isZeroNode(Elt))
3315        NumZero++;
3316      else {
3317        NonZeros |= (1 << i);
3318        NumNonZero++;
3319      }
3320    }
3321  }
3322
3323  if (NumNonZero == 0)
3324    // Must be a mix of zero and undef. Return a zero vector.
3325    return getZeroVector(VT, DAG);
3326
3327  // Splat is obviously ok. Let legalizer expand it to a shuffle.
3328  if (Values.size() == 1)
3329    return SDOperand();
3330
3331  // Special case for single non-zero element.
3332  if (NumNonZero == 1) {
3333    unsigned Idx = CountTrailingZeros_32(NonZeros);
3334    SDOperand Item = Op.getOperand(Idx);
3335    Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Item);
3336    if (Idx == 0)
3337      // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector.
3338      return getShuffleVectorZeroOrUndef(Item, VT, NumElems, Idx,
3339                                         NumZero > 0, DAG);
3340
3341    if (EVTBits == 32) {
3342      // Turn it into a shuffle of zero and zero-extended scalar to vector.
3343      Item = getShuffleVectorZeroOrUndef(Item, VT, NumElems, 0, NumZero > 0,
3344                                         DAG);
3345      MVT::ValueType MaskVT  = MVT::getIntVectorWithNumElements(NumElems);
3346      MVT::ValueType MaskEVT = MVT::getVectorBaseType(MaskVT);
3347      std::vector<SDOperand> MaskVec;
3348      for (unsigned i = 0; i < NumElems; i++)
3349        MaskVec.push_back(DAG.getConstant((i == Idx) ? 0 : 1, MaskEVT));
3350      SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
3351                                   &MaskVec[0], MaskVec.size());
3352      return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, Item,
3353                         DAG.getNode(ISD::UNDEF, VT), Mask);
3354    }
3355  }
3356
3357  // Let legalizer expand 2-wide build_vector's.
3358  if (EVTBits == 64)
3359    return SDOperand();
3360
3361  // If element VT is < 32 bits, convert it to inserts into a zero vector.
3362  if (EVTBits == 8) {
3363    SDOperand V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG,
3364                                        *this);
3365    if (V.Val) return V;
3366  }
3367
3368  if (EVTBits == 16) {
3369    SDOperand V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG,
3370                                        *this);
3371    if (V.Val) return V;
3372  }
3373
3374  // If element VT is == 32 bits, turn it into a number of shuffles.
3375  std::vector<SDOperand> V(NumElems);
3376  if (NumElems == 4 && NumZero > 0) {
3377    for (unsigned i = 0; i < 4; ++i) {
3378      bool isZero = !(NonZeros & (1 << i));
3379      if (isZero)
3380        V[i] = getZeroVector(VT, DAG);
3381      else
3382        V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(i));
3383    }
3384
3385    for (unsigned i = 0; i < 2; ++i) {
3386      switch ((NonZeros & (0x3 << i*2)) >> (i*2)) {
3387        default: break;
3388        case 0:
3389          V[i] = V[i*2];  // Must be a zero vector.
3390          break;
3391        case 1:
3392          V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2+1], V[i*2],
3393                             getMOVLMask(NumElems, DAG));
3394          break;
3395        case 2:
3396          V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2], V[i*2+1],
3397                             getMOVLMask(NumElems, DAG));
3398          break;
3399        case 3:
3400          V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2], V[i*2+1],
3401                             getUnpacklMask(NumElems, DAG));
3402          break;
3403      }
3404    }
3405
3406    // Take advantage of the fact GR32 to VR128 scalar_to_vector (i.e. movd)
3407    // clears the upper bits.
3408    // FIXME: we can do the same for v4f32 case when we know both parts of
3409    // the lower half come from scalar_to_vector (loadf32). We should do
3410    // that in post legalizer dag combiner with target specific hooks.
3411    if (MVT::isInteger(EVT) && (NonZeros & (0x3 << 2)) == 0)
3412      return V[0];
3413    MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
3414    MVT::ValueType EVT = MVT::getVectorBaseType(MaskVT);
3415    std::vector<SDOperand> MaskVec;
3416    bool Reverse = (NonZeros & 0x3) == 2;
3417    for (unsigned i = 0; i < 2; ++i)
3418      if (Reverse)
3419        MaskVec.push_back(DAG.getConstant(1-i, EVT));
3420      else
3421        MaskVec.push_back(DAG.getConstant(i, EVT));
3422    Reverse = ((NonZeros & (0x3 << 2)) >> 2) == 2;
3423    for (unsigned i = 0; i < 2; ++i)
3424      if (Reverse)
3425        MaskVec.push_back(DAG.getConstant(1-i+NumElems, EVT));
3426      else
3427        MaskVec.push_back(DAG.getConstant(i+NumElems, EVT));
3428    SDOperand ShufMask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
3429                                     &MaskVec[0], MaskVec.size());
3430    return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[0], V[1], ShufMask);
3431  }
3432
3433  if (Values.size() > 2) {
3434    // Expand into a number of unpckl*.
3435    // e.g. for v4f32
3436    //   Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0>
3437    //         : unpcklps 1, 3 ==> Y: <?, ?, 3, 1>
3438    //   Step 2: unpcklps X, Y ==>    <3, 2, 1, 0>
3439    SDOperand UnpckMask = getUnpacklMask(NumElems, DAG);
3440    for (unsigned i = 0; i < NumElems; ++i)
3441      V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(i));
3442    NumElems >>= 1;
3443    while (NumElems != 0) {
3444      for (unsigned i = 0; i < NumElems; ++i)
3445        V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i], V[i + NumElems],
3446                           UnpckMask);
3447      NumElems >>= 1;
3448    }
3449    return V[0];
3450  }
3451
3452  return SDOperand();
3453}
3454
3455SDOperand
3456X86TargetLowering::LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) {
3457  SDOperand V1 = Op.getOperand(0);
3458  SDOperand V2 = Op.getOperand(1);
3459  SDOperand PermMask = Op.getOperand(2);
3460  MVT::ValueType VT = Op.getValueType();
3461  unsigned NumElems = PermMask.getNumOperands();
3462  bool V1IsUndef = V1.getOpcode() == ISD::UNDEF;
3463  bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
3464  bool V1IsSplat = false;
3465  bool V2IsSplat = false;
3466
3467  if (isUndefShuffle(Op.Val))
3468    return DAG.getNode(ISD::UNDEF, VT);
3469
3470  if (isSplatMask(PermMask.Val)) {
3471    if (NumElems <= 4) return Op;
3472    // Promote it to a v4i32 splat.
3473    return PromoteSplat(Op, DAG);
3474  }
3475
3476  if (X86::isMOVLMask(PermMask.Val))
3477    return (V1IsUndef) ? V2 : Op;
3478
3479  if (X86::isMOVSHDUPMask(PermMask.Val) ||
3480      X86::isMOVSLDUPMask(PermMask.Val) ||
3481      X86::isMOVHLPSMask(PermMask.Val) ||
3482      X86::isMOVHPMask(PermMask.Val) ||
3483      X86::isMOVLPMask(PermMask.Val))
3484    return Op;
3485
3486  if (ShouldXformToMOVHLPS(PermMask.Val) ||
3487      ShouldXformToMOVLP(V1.Val, V2.Val, PermMask.Val))
3488    return CommuteVectorShuffle(Op, V1, V2, PermMask, DAG);
3489
3490  bool Commuted = false;
3491  V1IsSplat = isSplatVector(V1.Val);
3492  V2IsSplat = isSplatVector(V2.Val);
3493  if ((V1IsSplat || V1IsUndef) && !(V2IsSplat || V2IsUndef)) {
3494    Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG);
3495    std::swap(V1IsSplat, V2IsSplat);
3496    std::swap(V1IsUndef, V2IsUndef);
3497    Commuted = true;
3498  }
3499
3500  if (isCommutedMOVL(PermMask.Val, V2IsSplat, V2IsUndef)) {
3501    if (V2IsUndef) return V1;
3502    Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG);
3503    if (V2IsSplat) {
3504      // V2 is a splat, so the mask may be malformed. That is, it may point
3505      // to any V2 element. The instruction selectior won't like this. Get
3506      // a corrected mask and commute to form a proper MOVS{S|D}.
3507      SDOperand NewMask = getMOVLMask(NumElems, DAG);
3508      if (NewMask.Val != PermMask.Val)
3509        Op = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask);
3510    }
3511    return Op;
3512  }
3513
3514  if (X86::isUNPCKL_v_undef_Mask(PermMask.Val) ||
3515      X86::isUNPCKLMask(PermMask.Val) ||
3516      X86::isUNPCKHMask(PermMask.Val))
3517    return Op;
3518
3519  if (V2IsSplat) {
3520    // Normalize mask so all entries that point to V2 points to its first
3521    // element then try to match unpck{h|l} again. If match, return a
3522    // new vector_shuffle with the corrected mask.
3523    SDOperand NewMask = NormalizeMask(PermMask, DAG);
3524    if (NewMask.Val != PermMask.Val) {
3525      if (X86::isUNPCKLMask(PermMask.Val, true)) {
3526        SDOperand NewMask = getUnpacklMask(NumElems, DAG);
3527        return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask);
3528      } else if (X86::isUNPCKHMask(PermMask.Val, true)) {
3529        SDOperand NewMask = getUnpackhMask(NumElems, DAG);
3530        return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask);
3531      }
3532    }
3533  }
3534
3535  // Normalize the node to match x86 shuffle ops if needed
3536  if (V2.getOpcode() != ISD::UNDEF && isCommutedSHUFP(PermMask.Val))
3537      Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG);
3538
3539  if (Commuted) {
3540    // Commute is back and try unpck* again.
3541    Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG);
3542    if (X86::isUNPCKL_v_undef_Mask(PermMask.Val) ||
3543        X86::isUNPCKLMask(PermMask.Val) ||
3544        X86::isUNPCKHMask(PermMask.Val))
3545      return Op;
3546  }
3547
3548  // If VT is integer, try PSHUF* first, then SHUFP*.
3549  if (MVT::isInteger(VT)) {
3550    if (X86::isPSHUFDMask(PermMask.Val) ||
3551        X86::isPSHUFHWMask(PermMask.Val) ||
3552        X86::isPSHUFLWMask(PermMask.Val)) {
3553      if (V2.getOpcode() != ISD::UNDEF)
3554        return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1,
3555                           DAG.getNode(ISD::UNDEF, V1.getValueType()),PermMask);
3556      return Op;
3557    }
3558
3559    if (X86::isSHUFPMask(PermMask.Val))
3560      return Op;
3561
3562    // Handle v8i16 shuffle high / low shuffle node pair.
3563    if (VT == MVT::v8i16 && isPSHUFHW_PSHUFLWMask(PermMask.Val)) {
3564      MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
3565      MVT::ValueType BaseVT = MVT::getVectorBaseType(MaskVT);
3566      std::vector<SDOperand> MaskVec;
3567      for (unsigned i = 0; i != 4; ++i)
3568        MaskVec.push_back(PermMask.getOperand(i));
3569      for (unsigned i = 4; i != 8; ++i)
3570        MaskVec.push_back(DAG.getConstant(i, BaseVT));
3571      SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
3572                                   &MaskVec[0], MaskVec.size());
3573      V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask);
3574      MaskVec.clear();
3575      for (unsigned i = 0; i != 4; ++i)
3576        MaskVec.push_back(DAG.getConstant(i, BaseVT));
3577      for (unsigned i = 4; i != 8; ++i)
3578        MaskVec.push_back(PermMask.getOperand(i));
3579      Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0],MaskVec.size());
3580      return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask);
3581    }
3582  } else {
3583    // Floating point cases in the other order.
3584    if (X86::isSHUFPMask(PermMask.Val))
3585      return Op;
3586    if (X86::isPSHUFDMask(PermMask.Val) ||
3587        X86::isPSHUFHWMask(PermMask.Val) ||
3588        X86::isPSHUFLWMask(PermMask.Val)) {
3589      if (V2.getOpcode() != ISD::UNDEF)
3590        return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1,
3591                           DAG.getNode(ISD::UNDEF, V1.getValueType()),PermMask);
3592      return Op;
3593    }
3594  }
3595
3596  if (NumElems == 4) {
3597    MVT::ValueType MaskVT = PermMask.getValueType();
3598    MVT::ValueType MaskEVT = MVT::getVectorBaseType(MaskVT);
3599    std::vector<std::pair<int, int> > Locs;
3600    Locs.reserve(NumElems);
3601    std::vector<SDOperand> Mask1(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT));
3602    std::vector<SDOperand> Mask2(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT));
3603    unsigned NumHi = 0;
3604    unsigned NumLo = 0;
3605    // If no more than two elements come from either vector. This can be
3606    // implemented with two shuffles. First shuffle gather the elements.
3607    // The second shuffle, which takes the first shuffle as both of its
3608    // vector operands, put the elements into the right order.
3609    for (unsigned i = 0; i != NumElems; ++i) {
3610      SDOperand Elt = PermMask.getOperand(i);
3611      if (Elt.getOpcode() == ISD::UNDEF) {
3612        Locs[i] = std::make_pair(-1, -1);
3613      } else {
3614        unsigned Val = cast<ConstantSDNode>(Elt)->getValue();
3615        if (Val < NumElems) {
3616          Locs[i] = std::make_pair(0, NumLo);
3617          Mask1[NumLo] = Elt;
3618          NumLo++;
3619        } else {
3620          Locs[i] = std::make_pair(1, NumHi);
3621          if (2+NumHi < NumElems)
3622            Mask1[2+NumHi] = Elt;
3623          NumHi++;
3624        }
3625      }
3626    }
3627    if (NumLo <= 2 && NumHi <= 2) {
3628      V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2,
3629                       DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
3630                                   &Mask1[0], Mask1.size()));
3631      for (unsigned i = 0; i != NumElems; ++i) {
3632        if (Locs[i].first == -1)
3633          continue;
3634        else {
3635          unsigned Idx = (i < NumElems/2) ? 0 : NumElems;
3636          Idx += Locs[i].first * (NumElems/2) + Locs[i].second;
3637          Mask2[i] = DAG.getConstant(Idx, MaskEVT);
3638        }
3639      }
3640
3641      return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V1,
3642                         DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
3643                                     &Mask2[0], Mask2.size()));
3644    }
3645
3646    // Break it into (shuffle shuffle_hi, shuffle_lo).
3647    Locs.clear();
3648    std::vector<SDOperand> LoMask(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT));
3649    std::vector<SDOperand> HiMask(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT));
3650    std::vector<SDOperand> *MaskPtr = &LoMask;
3651    unsigned MaskIdx = 0;
3652    unsigned LoIdx = 0;
3653    unsigned HiIdx = NumElems/2;
3654    for (unsigned i = 0; i != NumElems; ++i) {
3655      if (i == NumElems/2) {
3656        MaskPtr = &HiMask;
3657        MaskIdx = 1;
3658        LoIdx = 0;
3659        HiIdx = NumElems/2;
3660      }
3661      SDOperand Elt = PermMask.getOperand(i);
3662      if (Elt.getOpcode() == ISD::UNDEF) {
3663        Locs[i] = std::make_pair(-1, -1);
3664      } else if (cast<ConstantSDNode>(Elt)->getValue() < NumElems) {
3665        Locs[i] = std::make_pair(MaskIdx, LoIdx);
3666        (*MaskPtr)[LoIdx] = Elt;
3667        LoIdx++;
3668      } else {
3669        Locs[i] = std::make_pair(MaskIdx, HiIdx);
3670        (*MaskPtr)[HiIdx] = Elt;
3671        HiIdx++;
3672      }
3673    }
3674
3675    SDOperand LoShuffle =
3676      DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2,
3677                  DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
3678                              &LoMask[0], LoMask.size()));
3679    SDOperand HiShuffle =
3680      DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2,
3681                  DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
3682                              &HiMask[0], HiMask.size()));
3683    std::vector<SDOperand> MaskOps;
3684    for (unsigned i = 0; i != NumElems; ++i) {
3685      if (Locs[i].first == -1) {
3686        MaskOps.push_back(DAG.getNode(ISD::UNDEF, MaskEVT));
3687      } else {
3688        unsigned Idx = Locs[i].first * NumElems + Locs[i].second;
3689        MaskOps.push_back(DAG.getConstant(Idx, MaskEVT));
3690      }
3691    }
3692    return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, LoShuffle, HiShuffle,
3693                       DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
3694                                   &MaskOps[0], MaskOps.size()));
3695  }
3696
3697  return SDOperand();
3698}
3699
3700SDOperand
3701X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) {
3702  if (!isa<ConstantSDNode>(Op.getOperand(1)))
3703    return SDOperand();
3704
3705  MVT::ValueType VT = Op.getValueType();
3706  // TODO: handle v16i8.
3707  if (MVT::getSizeInBits(VT) == 16) {
3708    // Transform it so it match pextrw which produces a 32-bit result.
3709    MVT::ValueType EVT = (MVT::ValueType)(VT+1);
3710    SDOperand Extract = DAG.getNode(X86ISD::PEXTRW, EVT,
3711                                    Op.getOperand(0), Op.getOperand(1));
3712    SDOperand Assert  = DAG.getNode(ISD::AssertZext, EVT, Extract,
3713                                    DAG.getValueType(VT));
3714    return DAG.getNode(ISD::TRUNCATE, VT, Assert);
3715  } else if (MVT::getSizeInBits(VT) == 32) {
3716    SDOperand Vec = Op.getOperand(0);
3717    unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue();
3718    if (Idx == 0)
3719      return Op;
3720    // SHUFPS the element to the lowest double word, then movss.
3721    MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4);
3722    std::vector<SDOperand> IdxVec;
3723    IdxVec.push_back(DAG.getConstant(Idx, MVT::getVectorBaseType(MaskVT)));
3724    IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(MaskVT)));
3725    IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(MaskVT)));
3726    IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(MaskVT)));
3727    SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
3728                                 &IdxVec[0], IdxVec.size());
3729    Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(),
3730                      Vec, Vec, Mask);
3731    return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec,
3732                       DAG.getConstant(0, getPointerTy()));
3733  } else if (MVT::getSizeInBits(VT) == 64) {
3734    SDOperand Vec = Op.getOperand(0);
3735    unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue();
3736    if (Idx == 0)
3737      return Op;
3738
3739    // UNPCKHPD the element to the lowest double word, then movsd.
3740    // Note if the lower 64 bits of the result of the UNPCKHPD is then stored
3741    // to a f64mem, the whole operation is folded into a single MOVHPDmr.
3742    MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4);
3743    std::vector<SDOperand> IdxVec;
3744    IdxVec.push_back(DAG.getConstant(1, MVT::getVectorBaseType(MaskVT)));
3745    IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(MaskVT)));
3746    SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
3747                                 &IdxVec[0], IdxVec.size());
3748    Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(),
3749                      Vec, DAG.getNode(ISD::UNDEF, Vec.getValueType()), Mask);
3750    return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec,
3751                       DAG.getConstant(0, getPointerTy()));
3752  }
3753
3754  return SDOperand();
3755}
3756
3757SDOperand
3758X86TargetLowering::LowerINSERT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) {
3759  // Transform it so it match pinsrw which expects a 16-bit value in a GR32
3760  // as its second argument.
3761  MVT::ValueType VT = Op.getValueType();
3762  MVT::ValueType BaseVT = MVT::getVectorBaseType(VT);
3763  SDOperand N0 = Op.getOperand(0);
3764  SDOperand N1 = Op.getOperand(1);
3765  SDOperand N2 = Op.getOperand(2);
3766  if (MVT::getSizeInBits(BaseVT) == 16) {
3767    if (N1.getValueType() != MVT::i32)
3768      N1 = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, N1);
3769    if (N2.getValueType() != MVT::i32)
3770      N2 = DAG.getConstant(cast<ConstantSDNode>(N2)->getValue(), MVT::i32);
3771    return DAG.getNode(X86ISD::PINSRW, VT, N0, N1, N2);
3772  } else if (MVT::getSizeInBits(BaseVT) == 32) {
3773    unsigned Idx = cast<ConstantSDNode>(N2)->getValue();
3774    if (Idx == 0) {
3775      // Use a movss.
3776      N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, N1);
3777      MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4);
3778      MVT::ValueType BaseVT = MVT::getVectorBaseType(MaskVT);
3779      std::vector<SDOperand> MaskVec;
3780      MaskVec.push_back(DAG.getConstant(4, BaseVT));
3781      for (unsigned i = 1; i <= 3; ++i)
3782        MaskVec.push_back(DAG.getConstant(i, BaseVT));
3783      return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, N0, N1,
3784                         DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
3785                                     &MaskVec[0], MaskVec.size()));
3786    } else {
3787      // Use two pinsrw instructions to insert a 32 bit value.
3788      Idx <<= 1;
3789      if (MVT::isFloatingPoint(N1.getValueType())) {
3790        if (ISD::isNON_EXTLoad(N1.Val)) {
3791          // Just load directly from f32mem to GR32.
3792          LoadSDNode *LD = cast<LoadSDNode>(N1);
3793          N1 = DAG.getLoad(MVT::i32, LD->getChain(), LD->getBasePtr(),
3794                           LD->getSrcValue(), LD->getSrcValueOffset());
3795        } else {
3796          N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, MVT::v4f32, N1);
3797          N1 = DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, N1);
3798          N1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i32, N1,
3799                           DAG.getConstant(0, getPointerTy()));
3800        }
3801      }
3802      N0 = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, N0);
3803      N0 = DAG.getNode(X86ISD::PINSRW, MVT::v8i16, N0, N1,
3804                       DAG.getConstant(Idx, getPointerTy()));
3805      N1 = DAG.getNode(ISD::SRL, MVT::i32, N1, DAG.getConstant(16, MVT::i8));
3806      N0 = DAG.getNode(X86ISD::PINSRW, MVT::v8i16, N0, N1,
3807                       DAG.getConstant(Idx+1, getPointerTy()));
3808      return DAG.getNode(ISD::BIT_CONVERT, VT, N0);
3809    }
3810  }
3811
3812  return SDOperand();
3813}
3814
3815SDOperand
3816X86TargetLowering::LowerSCALAR_TO_VECTOR(SDOperand Op, SelectionDAG &DAG) {
3817  SDOperand AnyExt = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, Op.getOperand(0));
3818  return DAG.getNode(X86ISD::S2VEC, Op.getValueType(), AnyExt);
3819}
3820
3821// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
3822// their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is
3823// one of the above mentioned nodes. It has to be wrapped because otherwise
3824// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
3825// be used to form addressing mode. These wrapped nodes will be selected
3826// into MOV32ri.
3827SDOperand
3828X86TargetLowering::LowerConstantPool(SDOperand Op, SelectionDAG &DAG) {
3829  ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
3830  SDOperand Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(),
3831                                 DAG.getTargetConstantPool(CP->getConstVal(),
3832                                                           getPointerTy(),
3833                                                           CP->getAlignment()));
3834  if (Subtarget->isTargetDarwin()) {
3835    // With PIC, the address is actually $g + Offset.
3836    if (!Subtarget->is64Bit() &&
3837        getTargetMachine().getRelocationModel() == Reloc::PIC_)
3838      Result = DAG.getNode(ISD::ADD, getPointerTy(),
3839                    DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), Result);
3840  }
3841
3842  return Result;
3843}
3844
3845SDOperand
3846X86TargetLowering::LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG) {
3847  GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
3848  SDOperand Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(),
3849                                 DAG.getTargetGlobalAddress(GV,
3850                                                            getPointerTy()));
3851  if (Subtarget->isTargetDarwin()) {
3852    // With PIC, the address is actually $g + Offset.
3853    if (!Subtarget->is64Bit() &&
3854        getTargetMachine().getRelocationModel() == Reloc::PIC_)
3855      Result = DAG.getNode(ISD::ADD, getPointerTy(),
3856                           DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()),
3857                           Result);
3858
3859    // For Darwin, external and weak symbols are indirect, so we want to load
3860    // the value at address GV, not the value of GV itself. This means that
3861    // the GlobalAddress must be in the base or index register of the address,
3862    // not the GV offset field.
3863    if (getTargetMachine().getRelocationModel() != Reloc::Static &&
3864        DarwinGVRequiresExtraLoad(GV))
3865      Result = DAG.getLoad(getPointerTy(), DAG.getEntryNode(), Result, NULL, 0);
3866  } else if (Subtarget->isTargetCygwin() || Subtarget->isTargetWindows()) {
3867    // FIXME: What about PIC?
3868    if (WindowsGVRequiresExtraLoad(GV))
3869      Result = DAG.getLoad(getPointerTy(), DAG.getEntryNode(), Result, NULL, 0);
3870  }
3871
3872
3873  return Result;
3874}
3875
3876SDOperand
3877X86TargetLowering::LowerExternalSymbol(SDOperand Op, SelectionDAG &DAG) {
3878  const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol();
3879  SDOperand Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(),
3880                                 DAG.getTargetExternalSymbol(Sym,
3881                                                             getPointerTy()));
3882  if (Subtarget->isTargetDarwin()) {
3883    // With PIC, the address is actually $g + Offset.
3884    if (!Subtarget->is64Bit() &&
3885        getTargetMachine().getRelocationModel() == Reloc::PIC_)
3886      Result = DAG.getNode(ISD::ADD, getPointerTy(),
3887                           DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()),
3888                           Result);
3889  }
3890
3891  return Result;
3892}
3893
3894SDOperand X86TargetLowering::LowerShift(SDOperand Op, SelectionDAG &DAG) {
3895    assert(Op.getNumOperands() == 3 && Op.getValueType() == MVT::i32 &&
3896           "Not an i64 shift!");
3897    bool isSRA = Op.getOpcode() == ISD::SRA_PARTS;
3898    SDOperand ShOpLo = Op.getOperand(0);
3899    SDOperand ShOpHi = Op.getOperand(1);
3900    SDOperand ShAmt  = Op.getOperand(2);
3901    SDOperand Tmp1 = isSRA ?
3902      DAG.getNode(ISD::SRA, MVT::i32, ShOpHi, DAG.getConstant(31, MVT::i8)) :
3903      DAG.getConstant(0, MVT::i32);
3904
3905    SDOperand Tmp2, Tmp3;
3906    if (Op.getOpcode() == ISD::SHL_PARTS) {
3907      Tmp2 = DAG.getNode(X86ISD::SHLD, MVT::i32, ShOpHi, ShOpLo, ShAmt);
3908      Tmp3 = DAG.getNode(ISD::SHL, MVT::i32, ShOpLo, ShAmt);
3909    } else {
3910      Tmp2 = DAG.getNode(X86ISD::SHRD, MVT::i32, ShOpLo, ShOpHi, ShAmt);
3911      Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, MVT::i32, ShOpHi, ShAmt);
3912    }
3913
3914    const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag);
3915    SDOperand AndNode = DAG.getNode(ISD::AND, MVT::i8, ShAmt,
3916                                    DAG.getConstant(32, MVT::i8));
3917    SDOperand COps[]={DAG.getEntryNode(), AndNode, DAG.getConstant(0, MVT::i8)};
3918    SDOperand InFlag = DAG.getNode(X86ISD::CMP, VTs, 2, COps, 3).getValue(1);
3919
3920    SDOperand Hi, Lo;
3921    SDOperand CC = DAG.getConstant(X86::COND_NE, MVT::i8);
3922
3923    VTs = DAG.getNodeValueTypes(MVT::i32, MVT::Flag);
3924    SmallVector<SDOperand, 4> Ops;
3925    if (Op.getOpcode() == ISD::SHL_PARTS) {
3926      Ops.push_back(Tmp2);
3927      Ops.push_back(Tmp3);
3928      Ops.push_back(CC);
3929      Ops.push_back(InFlag);
3930      Hi = DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size());
3931      InFlag = Hi.getValue(1);
3932
3933      Ops.clear();
3934      Ops.push_back(Tmp3);
3935      Ops.push_back(Tmp1);
3936      Ops.push_back(CC);
3937      Ops.push_back(InFlag);
3938      Lo = DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size());
3939    } else {
3940      Ops.push_back(Tmp2);
3941      Ops.push_back(Tmp3);
3942      Ops.push_back(CC);
3943      Ops.push_back(InFlag);
3944      Lo = DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size());
3945      InFlag = Lo.getValue(1);
3946
3947      Ops.clear();
3948      Ops.push_back(Tmp3);
3949      Ops.push_back(Tmp1);
3950      Ops.push_back(CC);
3951      Ops.push_back(InFlag);
3952      Hi = DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size());
3953    }
3954
3955    VTs = DAG.getNodeValueTypes(MVT::i32, MVT::i32);
3956    Ops.clear();
3957    Ops.push_back(Lo);
3958    Ops.push_back(Hi);
3959    return DAG.getNode(ISD::MERGE_VALUES, VTs, 2, &Ops[0], Ops.size());
3960}
3961
3962SDOperand X86TargetLowering::LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG) {
3963  assert(Op.getOperand(0).getValueType() <= MVT::i64 &&
3964         Op.getOperand(0).getValueType() >= MVT::i16 &&
3965         "Unknown SINT_TO_FP to lower!");
3966
3967  SDOperand Result;
3968  MVT::ValueType SrcVT = Op.getOperand(0).getValueType();
3969  unsigned Size = MVT::getSizeInBits(SrcVT)/8;
3970  MachineFunction &MF = DAG.getMachineFunction();
3971  int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size);
3972  SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
3973  SDOperand Chain = DAG.getStore(DAG.getEntryNode(), Op.getOperand(0),
3974                                 StackSlot, NULL, 0);
3975
3976  // Build the FILD
3977  std::vector<MVT::ValueType> Tys;
3978  Tys.push_back(MVT::f64);
3979  Tys.push_back(MVT::Other);
3980  if (X86ScalarSSE) Tys.push_back(MVT::Flag);
3981  std::vector<SDOperand> Ops;
3982  Ops.push_back(Chain);
3983  Ops.push_back(StackSlot);
3984  Ops.push_back(DAG.getValueType(SrcVT));
3985  Result = DAG.getNode(X86ScalarSSE ? X86ISD::FILD_FLAG :X86ISD::FILD,
3986                       Tys, &Ops[0], Ops.size());
3987
3988  if (X86ScalarSSE) {
3989    Chain = Result.getValue(1);
3990    SDOperand InFlag = Result.getValue(2);
3991
3992    // FIXME: Currently the FST is flagged to the FILD_FLAG. This
3993    // shouldn't be necessary except that RFP cannot be live across
3994    // multiple blocks. When stackifier is fixed, they can be uncoupled.
3995    MachineFunction &MF = DAG.getMachineFunction();
3996    int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8);
3997    SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
3998    std::vector<MVT::ValueType> Tys;
3999    Tys.push_back(MVT::Other);
4000    std::vector<SDOperand> Ops;
4001    Ops.push_back(Chain);
4002    Ops.push_back(Result);
4003    Ops.push_back(StackSlot);
4004    Ops.push_back(DAG.getValueType(Op.getValueType()));
4005    Ops.push_back(InFlag);
4006    Chain = DAG.getNode(X86ISD::FST, Tys, &Ops[0], Ops.size());
4007    Result = DAG.getLoad(Op.getValueType(), Chain, StackSlot, NULL, 0);
4008  }
4009
4010  return Result;
4011}
4012
4013SDOperand X86TargetLowering::LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG) {
4014  assert(Op.getValueType() <= MVT::i64 && Op.getValueType() >= MVT::i16 &&
4015         "Unknown FP_TO_SINT to lower!");
4016  // We lower FP->sint64 into FISTP64, followed by a load, all to a temporary
4017  // stack slot.
4018  MachineFunction &MF = DAG.getMachineFunction();
4019  unsigned MemSize = MVT::getSizeInBits(Op.getValueType())/8;
4020  int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize);
4021  SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
4022
4023  unsigned Opc;
4024  switch (Op.getValueType()) {
4025    default: assert(0 && "Invalid FP_TO_SINT to lower!");
4026    case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break;
4027    case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break;
4028    case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break;
4029  }
4030
4031  SDOperand Chain = DAG.getEntryNode();
4032  SDOperand Value = Op.getOperand(0);
4033  if (X86ScalarSSE) {
4034    assert(Op.getValueType() == MVT::i64 && "Invalid FP_TO_SINT to lower!");
4035    Chain = DAG.getStore(Chain, Value, StackSlot, NULL, 0);
4036    std::vector<MVT::ValueType> Tys;
4037    Tys.push_back(MVT::f64);
4038    Tys.push_back(MVT::Other);
4039    std::vector<SDOperand> Ops;
4040    Ops.push_back(Chain);
4041    Ops.push_back(StackSlot);
4042    Ops.push_back(DAG.getValueType(Op.getOperand(0).getValueType()));
4043    Value = DAG.getNode(X86ISD::FLD, Tys, &Ops[0], Ops.size());
4044    Chain = Value.getValue(1);
4045    SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize);
4046    StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
4047  }
4048
4049  // Build the FP_TO_INT*_IN_MEM
4050  std::vector<SDOperand> Ops;
4051  Ops.push_back(Chain);
4052  Ops.push_back(Value);
4053  Ops.push_back(StackSlot);
4054  SDOperand FIST = DAG.getNode(Opc, MVT::Other, &Ops[0], Ops.size());
4055
4056  // Load the result.
4057  return DAG.getLoad(Op.getValueType(), FIST, StackSlot, NULL, 0);
4058}
4059
4060SDOperand X86TargetLowering::LowerFABS(SDOperand Op, SelectionDAG &DAG) {
4061  MVT::ValueType VT = Op.getValueType();
4062  const Type *OpNTy =  MVT::getTypeForValueType(VT);
4063  std::vector<Constant*> CV;
4064  if (VT == MVT::f64) {
4065    CV.push_back(ConstantFP::get(OpNTy, BitsToDouble(~(1ULL << 63))));
4066    CV.push_back(ConstantFP::get(OpNTy, 0.0));
4067  } else {
4068    CV.push_back(ConstantFP::get(OpNTy, BitsToFloat(~(1U << 31))));
4069    CV.push_back(ConstantFP::get(OpNTy, 0.0));
4070    CV.push_back(ConstantFP::get(OpNTy, 0.0));
4071    CV.push_back(ConstantFP::get(OpNTy, 0.0));
4072  }
4073  Constant *CS = ConstantStruct::get(CV);
4074  SDOperand CPIdx = DAG.getConstantPool(CS, getPointerTy(), 4);
4075  std::vector<MVT::ValueType> Tys;
4076  Tys.push_back(VT);
4077  Tys.push_back(MVT::Other);
4078  SmallVector<SDOperand, 3> Ops;
4079  Ops.push_back(DAG.getEntryNode());
4080  Ops.push_back(CPIdx);
4081  Ops.push_back(DAG.getSrcValue(NULL));
4082  SDOperand Mask = DAG.getNode(X86ISD::LOAD_PACK, Tys, &Ops[0], Ops.size());
4083  return DAG.getNode(X86ISD::FAND, VT, Op.getOperand(0), Mask);
4084}
4085
4086SDOperand X86TargetLowering::LowerFNEG(SDOperand Op, SelectionDAG &DAG) {
4087  MVT::ValueType VT = Op.getValueType();
4088  const Type *OpNTy =  MVT::getTypeForValueType(VT);
4089  std::vector<Constant*> CV;
4090  if (VT == MVT::f64) {
4091    CV.push_back(ConstantFP::get(OpNTy, BitsToDouble(1ULL << 63)));
4092    CV.push_back(ConstantFP::get(OpNTy, 0.0));
4093  } else {
4094    CV.push_back(ConstantFP::get(OpNTy, BitsToFloat(1U << 31)));
4095    CV.push_back(ConstantFP::get(OpNTy, 0.0));
4096    CV.push_back(ConstantFP::get(OpNTy, 0.0));
4097    CV.push_back(ConstantFP::get(OpNTy, 0.0));
4098  }
4099  Constant *CS = ConstantStruct::get(CV);
4100  SDOperand CPIdx = DAG.getConstantPool(CS, getPointerTy(), 4);
4101  std::vector<MVT::ValueType> Tys;
4102  Tys.push_back(VT);
4103  Tys.push_back(MVT::Other);
4104  SmallVector<SDOperand, 3> Ops;
4105  Ops.push_back(DAG.getEntryNode());
4106  Ops.push_back(CPIdx);
4107  Ops.push_back(DAG.getSrcValue(NULL));
4108  SDOperand Mask = DAG.getNode(X86ISD::LOAD_PACK, Tys, &Ops[0], Ops.size());
4109  return DAG.getNode(X86ISD::FXOR, VT, Op.getOperand(0), Mask);
4110}
4111
4112SDOperand X86TargetLowering::LowerSETCC(SDOperand Op, SelectionDAG &DAG,
4113                                        SDOperand Chain) {
4114  assert(Op.getValueType() == MVT::i8 && "SetCC type must be 8-bit integer");
4115  SDOperand Cond;
4116  SDOperand Op0 = Op.getOperand(0);
4117  SDOperand Op1 = Op.getOperand(1);
4118  SDOperand CC = Op.getOperand(2);
4119  ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
4120  const MVT::ValueType *VTs1 = DAG.getNodeValueTypes(MVT::Other, MVT::Flag);
4121  const MVT::ValueType *VTs2 = DAG.getNodeValueTypes(MVT::i8, MVT::Flag);
4122  bool isFP = MVT::isFloatingPoint(Op.getOperand(1).getValueType());
4123  unsigned X86CC;
4124
4125  if (translateX86CC(cast<CondCodeSDNode>(CC)->get(), isFP, X86CC,
4126                     Op0, Op1, DAG)) {
4127    SDOperand Ops1[] = { Chain, Op0, Op1 };
4128    Cond = DAG.getNode(X86ISD::CMP, VTs1, 2, Ops1, 3).getValue(1);
4129    SDOperand Ops2[] = { DAG.getConstant(X86CC, MVT::i8), Cond };
4130    return DAG.getNode(X86ISD::SETCC, VTs2, 2, Ops2, 2);
4131  }
4132
4133  assert(isFP && "Illegal integer SetCC!");
4134
4135  SDOperand COps[] = { Chain, Op0, Op1 };
4136  Cond = DAG.getNode(X86ISD::CMP, VTs1, 2, COps, 3).getValue(1);
4137
4138  switch (SetCCOpcode) {
4139  default: assert(false && "Illegal floating point SetCC!");
4140  case ISD::SETOEQ: {  // !PF & ZF
4141    SDOperand Ops1[] = { DAG.getConstant(X86::COND_NP, MVT::i8), Cond };
4142    SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, VTs2, 2, Ops1, 2);
4143    SDOperand Ops2[] = { DAG.getConstant(X86::COND_E, MVT::i8),
4144                         Tmp1.getValue(1) };
4145    SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, VTs2, 2, Ops2, 2);
4146    return DAG.getNode(ISD::AND, MVT::i8, Tmp1, Tmp2);
4147  }
4148  case ISD::SETUNE: {  // PF | !ZF
4149    SDOperand Ops1[] = { DAG.getConstant(X86::COND_P, MVT::i8), Cond };
4150    SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, VTs2, 2, Ops1, 2);
4151    SDOperand Ops2[] = { DAG.getConstant(X86::COND_NE, MVT::i8),
4152                         Tmp1.getValue(1) };
4153    SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, VTs2, 2, Ops2, 2);
4154    return DAG.getNode(ISD::OR, MVT::i8, Tmp1, Tmp2);
4155  }
4156  }
4157}
4158
4159SDOperand X86TargetLowering::LowerSELECT(SDOperand Op, SelectionDAG &DAG) {
4160  bool addTest = true;
4161  SDOperand Chain = DAG.getEntryNode();
4162  SDOperand Cond  = Op.getOperand(0);
4163  SDOperand CC;
4164  const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag);
4165
4166  if (Cond.getOpcode() == ISD::SETCC)
4167    Cond = LowerSETCC(Cond, DAG, Chain);
4168
4169  if (Cond.getOpcode() == X86ISD::SETCC) {
4170    CC = Cond.getOperand(0);
4171
4172    // If condition flag is set by a X86ISD::CMP, then make a copy of it
4173    // (since flag operand cannot be shared). Use it as the condition setting
4174    // operand in place of the X86ISD::SETCC.
4175    // If the X86ISD::SETCC has more than one use, then perhaps it's better
4176    // to use a test instead of duplicating the X86ISD::CMP (for register
4177    // pressure reason)?
4178    SDOperand Cmp = Cond.getOperand(1);
4179    unsigned Opc = Cmp.getOpcode();
4180    bool IllegalFPCMov = !X86ScalarSSE &&
4181      MVT::isFloatingPoint(Op.getValueType()) &&
4182      !hasFPCMov(cast<ConstantSDNode>(CC)->getSignExtended());
4183    if ((Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI) &&
4184        !IllegalFPCMov) {
4185      SDOperand Ops[] = { Chain, Cmp.getOperand(1), Cmp.getOperand(2) };
4186      Cond = DAG.getNode(Opc, VTs, 2, Ops, 3);
4187      addTest = false;
4188    }
4189  }
4190
4191  if (addTest) {
4192    CC = DAG.getConstant(X86::COND_NE, MVT::i8);
4193    SDOperand Ops[] = { Chain, Cond, DAG.getConstant(0, MVT::i8) };
4194    Cond = DAG.getNode(X86ISD::CMP, VTs, 2, Ops, 3);
4195  }
4196
4197  VTs = DAG.getNodeValueTypes(Op.getValueType(), MVT::Flag);
4198  SmallVector<SDOperand, 4> Ops;
4199  // X86ISD::CMOV means set the result (which is operand 1) to the RHS if
4200  // condition is true.
4201  Ops.push_back(Op.getOperand(2));
4202  Ops.push_back(Op.getOperand(1));
4203  Ops.push_back(CC);
4204  Ops.push_back(Cond.getValue(1));
4205  return DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size());
4206}
4207
4208SDOperand X86TargetLowering::LowerBRCOND(SDOperand Op, SelectionDAG &DAG) {
4209  bool addTest = true;
4210  SDOperand Chain = Op.getOperand(0);
4211  SDOperand Cond  = Op.getOperand(1);
4212  SDOperand Dest  = Op.getOperand(2);
4213  SDOperand CC;
4214  const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag);
4215
4216  if (Cond.getOpcode() == ISD::SETCC)
4217    Cond = LowerSETCC(Cond, DAG, Chain);
4218
4219  if (Cond.getOpcode() == X86ISD::SETCC) {
4220    CC = Cond.getOperand(0);
4221
4222    // If condition flag is set by a X86ISD::CMP, then make a copy of it
4223    // (since flag operand cannot be shared). Use it as the condition setting
4224    // operand in place of the X86ISD::SETCC.
4225    // If the X86ISD::SETCC has more than one use, then perhaps it's better
4226    // to use a test instead of duplicating the X86ISD::CMP (for register
4227    // pressure reason)?
4228    SDOperand Cmp = Cond.getOperand(1);
4229    unsigned Opc = Cmp.getOpcode();
4230    if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI) {
4231      SDOperand Ops[] = { Chain, Cmp.getOperand(1), Cmp.getOperand(2) };
4232      Cond = DAG.getNode(Opc, VTs, 2, Ops, 3);
4233      addTest = false;
4234    }
4235  }
4236
4237  if (addTest) {
4238    CC = DAG.getConstant(X86::COND_NE, MVT::i8);
4239    SDOperand Ops[] = { Chain, Cond, DAG.getConstant(0, MVT::i8) };
4240    Cond = DAG.getNode(X86ISD::CMP, VTs, 2, Ops, 3);
4241  }
4242  return DAG.getNode(X86ISD::BRCOND, Op.getValueType(),
4243                     Cond, Op.getOperand(2), CC, Cond.getValue(1));
4244}
4245
4246SDOperand X86TargetLowering::LowerJumpTable(SDOperand Op, SelectionDAG &DAG) {
4247  JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
4248  SDOperand Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(),
4249                                 DAG.getTargetJumpTable(JT->getIndex(),
4250                                                        getPointerTy()));
4251  if (Subtarget->isTargetDarwin()) {
4252    // With PIC, the address is actually $g + Offset.
4253    if (!Subtarget->is64Bit() &&
4254        getTargetMachine().getRelocationModel() == Reloc::PIC_)
4255      Result = DAG.getNode(ISD::ADD, getPointerTy(),
4256                           DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()),
4257                           Result);
4258  }
4259
4260  return Result;
4261}
4262
4263SDOperand X86TargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) {
4264  unsigned CallingConv= cast<ConstantSDNode>(Op.getOperand(1))->getValue();
4265
4266  if (Subtarget->is64Bit())
4267    return LowerX86_64CCCCallTo(Op, DAG);
4268  else
4269    switch (CallingConv) {
4270    default:
4271      assert(0 && "Unsupported calling convention");
4272    case CallingConv::Fast:
4273      if (EnableFastCC) {
4274        return LowerFastCCCallTo(Op, DAG, false);
4275      }
4276      // Falls through
4277    case CallingConv::C:
4278    case CallingConv::CSRet:
4279      return LowerCCCCallTo(Op, DAG);
4280    case CallingConv::X86_StdCall:
4281      return LowerStdCallCCCallTo(Op, DAG);
4282    case CallingConv::X86_FastCall:
4283      return LowerFastCCCallTo(Op, DAG, true);
4284    }
4285}
4286
4287SDOperand X86TargetLowering::LowerRET(SDOperand Op, SelectionDAG &DAG) {
4288  SDOperand Copy;
4289
4290  switch(Op.getNumOperands()) {
4291    default:
4292      assert(0 && "Do not know how to return this many arguments!");
4293      abort();
4294    case 1:    // ret void.
4295      return DAG.getNode(X86ISD::RET_FLAG, MVT::Other, Op.getOperand(0),
4296                        DAG.getConstant(getBytesToPopOnReturn(), MVT::i16));
4297    case 3: {
4298      MVT::ValueType ArgVT = Op.getOperand(1).getValueType();
4299
4300      if (MVT::isVector(ArgVT) ||
4301          (Subtarget->is64Bit() && MVT::isFloatingPoint(ArgVT))) {
4302        // Integer or FP vector result -> XMM0.
4303        if (DAG.getMachineFunction().liveout_empty())
4304          DAG.getMachineFunction().addLiveOut(X86::XMM0);
4305        Copy = DAG.getCopyToReg(Op.getOperand(0), X86::XMM0, Op.getOperand(1),
4306                                SDOperand());
4307      } else if (MVT::isInteger(ArgVT)) {
4308        // Integer result -> EAX / RAX.
4309        // The C calling convention guarantees the return value has been
4310        // promoted to at least MVT::i32. The X86-64 ABI doesn't require the
4311        // value to be promoted MVT::i64. So we don't have to extend it to
4312        // 64-bit. Return the value in EAX, but mark RAX as liveout.
4313        unsigned Reg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
4314        if (DAG.getMachineFunction().liveout_empty())
4315          DAG.getMachineFunction().addLiveOut(Reg);
4316
4317        Reg = (ArgVT == MVT::i64) ? X86::RAX : X86::EAX;
4318        Copy = DAG.getCopyToReg(Op.getOperand(0), Reg, Op.getOperand(1),
4319                                SDOperand());
4320      } else if (!X86ScalarSSE) {
4321        // FP return with fp-stack value.
4322        if (DAG.getMachineFunction().liveout_empty())
4323          DAG.getMachineFunction().addLiveOut(X86::ST0);
4324
4325        std::vector<MVT::ValueType> Tys;
4326        Tys.push_back(MVT::Other);
4327        Tys.push_back(MVT::Flag);
4328        std::vector<SDOperand> Ops;
4329        Ops.push_back(Op.getOperand(0));
4330        Ops.push_back(Op.getOperand(1));
4331        Copy = DAG.getNode(X86ISD::FP_SET_RESULT, Tys, &Ops[0], Ops.size());
4332      } else {
4333        // FP return with ScalarSSE (return on fp-stack).
4334        if (DAG.getMachineFunction().liveout_empty())
4335          DAG.getMachineFunction().addLiveOut(X86::ST0);
4336
4337        SDOperand MemLoc;
4338        SDOperand Chain = Op.getOperand(0);
4339        SDOperand Value = Op.getOperand(1);
4340
4341        if (ISD::isNON_EXTLoad(Value.Val) &&
4342            (Chain == Value.getValue(1) || Chain == Value.getOperand(0))) {
4343          Chain  = Value.getOperand(0);
4344          MemLoc = Value.getOperand(1);
4345        } else {
4346          // Spill the value to memory and reload it into top of stack.
4347          unsigned Size = MVT::getSizeInBits(ArgVT)/8;
4348          MachineFunction &MF = DAG.getMachineFunction();
4349          int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size);
4350          MemLoc = DAG.getFrameIndex(SSFI, getPointerTy());
4351          Chain = DAG.getStore(Op.getOperand(0), Value, MemLoc, NULL, 0);
4352        }
4353        std::vector<MVT::ValueType> Tys;
4354        Tys.push_back(MVT::f64);
4355        Tys.push_back(MVT::Other);
4356        std::vector<SDOperand> Ops;
4357        Ops.push_back(Chain);
4358        Ops.push_back(MemLoc);
4359        Ops.push_back(DAG.getValueType(ArgVT));
4360        Copy = DAG.getNode(X86ISD::FLD, Tys, &Ops[0], Ops.size());
4361        Tys.clear();
4362        Tys.push_back(MVT::Other);
4363        Tys.push_back(MVT::Flag);
4364        Ops.clear();
4365        Ops.push_back(Copy.getValue(1));
4366        Ops.push_back(Copy);
4367        Copy = DAG.getNode(X86ISD::FP_SET_RESULT, Tys, &Ops[0], Ops.size());
4368      }
4369      break;
4370    }
4371    case 5: {
4372      unsigned Reg1 = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
4373      unsigned Reg2 = Subtarget->is64Bit() ? X86::RDX : X86::EDX;
4374      if (DAG.getMachineFunction().liveout_empty()) {
4375        DAG.getMachineFunction().addLiveOut(Reg1);
4376        DAG.getMachineFunction().addLiveOut(Reg2);
4377      }
4378
4379      Copy = DAG.getCopyToReg(Op.getOperand(0), Reg2, Op.getOperand(3),
4380                              SDOperand());
4381      Copy = DAG.getCopyToReg(Copy, Reg1, Op.getOperand(1), Copy.getValue(1));
4382      break;
4383    }
4384  }
4385  return DAG.getNode(X86ISD::RET_FLAG, MVT::Other,
4386                     Copy, DAG.getConstant(getBytesToPopOnReturn(), MVT::i16),
4387                     Copy.getValue(1));
4388}
4389
4390SDOperand
4391X86TargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG) {
4392  MachineFunction &MF = DAG.getMachineFunction();
4393  const Function* Fn = MF.getFunction();
4394  if (Fn->hasExternalLinkage() &&
4395      Subtarget->isTargetCygwin() &&
4396      Fn->getName() == "main")
4397    MF.getInfo<X86FunctionInfo>()->setForceFramePointer(true);
4398
4399  unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getValue();
4400  if (Subtarget->is64Bit())
4401    return LowerX86_64CCCArguments(Op, DAG);
4402  else
4403    switch(CC) {
4404    default:
4405      assert(0 && "Unsupported calling convention");
4406    case CallingConv::Fast:
4407      if (EnableFastCC) {
4408        return LowerFastCCArguments(Op, DAG);
4409      }
4410      // Falls through
4411    case CallingConv::C:
4412    case CallingConv::CSRet:
4413      return LowerCCCArguments(Op, DAG);
4414    case CallingConv::X86_StdCall:
4415      MF.getInfo<X86FunctionInfo>()->setDecorationStyle(StdCall);
4416      return LowerStdCallCCArguments(Op, DAG);
4417    case CallingConv::X86_FastCall:
4418      MF.getInfo<X86FunctionInfo>()->setDecorationStyle(FastCall);
4419      return LowerFastCallCCArguments(Op, DAG);
4420    }
4421}
4422
4423SDOperand X86TargetLowering::LowerMEMSET(SDOperand Op, SelectionDAG &DAG) {
4424  SDOperand InFlag(0, 0);
4425  SDOperand Chain = Op.getOperand(0);
4426  unsigned Align =
4427    (unsigned)cast<ConstantSDNode>(Op.getOperand(4))->getValue();
4428  if (Align == 0) Align = 1;
4429
4430  ConstantSDNode *I = dyn_cast<ConstantSDNode>(Op.getOperand(3));
4431  // If not DWORD aligned, call memset if size is less than the threshold.
4432  // It knows how to align to the right boundary first.
4433  if ((Align & 3) != 0 ||
4434      (I && I->getValue() < Subtarget->getMinRepStrSizeThreshold())) {
4435    MVT::ValueType IntPtr = getPointerTy();
4436    const Type *IntPtrTy = getTargetData()->getIntPtrType();
4437    std::vector<std::pair<SDOperand, const Type*> > Args;
4438    Args.push_back(std::make_pair(Op.getOperand(1), IntPtrTy));
4439    // Extend the ubyte argument to be an int value for the call.
4440    SDOperand Val = DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, Op.getOperand(2));
4441    Args.push_back(std::make_pair(Val, IntPtrTy));
4442    Args.push_back(std::make_pair(Op.getOperand(3), IntPtrTy));
4443    std::pair<SDOperand,SDOperand> CallResult =
4444      LowerCallTo(Chain, Type::VoidTy, false, CallingConv::C, false,
4445                  DAG.getExternalSymbol("memset", IntPtr), Args, DAG);
4446    return CallResult.second;
4447  }
4448
4449  MVT::ValueType AVT;
4450  SDOperand Count;
4451  ConstantSDNode *ValC = dyn_cast<ConstantSDNode>(Op.getOperand(2));
4452  unsigned BytesLeft = 0;
4453  bool TwoRepStos = false;
4454  if (ValC) {
4455    unsigned ValReg;
4456    uint64_t Val = ValC->getValue() & 255;
4457
4458    // If the value is a constant, then we can potentially use larger sets.
4459    switch (Align & 3) {
4460      case 2:   // WORD aligned
4461        AVT = MVT::i16;
4462        ValReg = X86::AX;
4463        Val = (Val << 8) | Val;
4464        break;
4465      case 0:  // DWORD aligned
4466        AVT = MVT::i32;
4467        ValReg = X86::EAX;
4468        Val = (Val << 8)  | Val;
4469        Val = (Val << 16) | Val;
4470        if (Subtarget->is64Bit() && ((Align & 0xF) == 0)) {  // QWORD aligned
4471          AVT = MVT::i64;
4472          ValReg = X86::RAX;
4473          Val = (Val << 32) | Val;
4474        }
4475        break;
4476      default:  // Byte aligned
4477        AVT = MVT::i8;
4478        ValReg = X86::AL;
4479        Count = Op.getOperand(3);
4480        break;
4481    }
4482
4483    if (AVT > MVT::i8) {
4484      if (I) {
4485        unsigned UBytes = MVT::getSizeInBits(AVT) / 8;
4486        Count = DAG.getConstant(I->getValue() / UBytes, getPointerTy());
4487        BytesLeft = I->getValue() % UBytes;
4488      } else {
4489        assert(AVT >= MVT::i32 &&
4490               "Do not use rep;stos if not at least DWORD aligned");
4491        Count = DAG.getNode(ISD::SRL, Op.getOperand(3).getValueType(),
4492                            Op.getOperand(3), DAG.getConstant(2, MVT::i8));
4493        TwoRepStos = true;
4494      }
4495    }
4496
4497    Chain  = DAG.getCopyToReg(Chain, ValReg, DAG.getConstant(Val, AVT),
4498                              InFlag);
4499    InFlag = Chain.getValue(1);
4500  } else {
4501    AVT = MVT::i8;
4502    Count  = Op.getOperand(3);
4503    Chain  = DAG.getCopyToReg(Chain, X86::AL, Op.getOperand(2), InFlag);
4504    InFlag = Chain.getValue(1);
4505  }
4506
4507  Chain  = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RCX : X86::ECX,
4508                            Count, InFlag);
4509  InFlag = Chain.getValue(1);
4510  Chain  = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RDI : X86::EDI,
4511                            Op.getOperand(1), InFlag);
4512  InFlag = Chain.getValue(1);
4513
4514  std::vector<MVT::ValueType> Tys;
4515  Tys.push_back(MVT::Other);
4516  Tys.push_back(MVT::Flag);
4517  std::vector<SDOperand> Ops;
4518  Ops.push_back(Chain);
4519  Ops.push_back(DAG.getValueType(AVT));
4520  Ops.push_back(InFlag);
4521  Chain  = DAG.getNode(X86ISD::REP_STOS, Tys, &Ops[0], Ops.size());
4522
4523  if (TwoRepStos) {
4524    InFlag = Chain.getValue(1);
4525    Count = Op.getOperand(3);
4526    MVT::ValueType CVT = Count.getValueType();
4527    SDOperand Left = DAG.getNode(ISD::AND, CVT, Count,
4528                               DAG.getConstant((AVT == MVT::i64) ? 7 : 3, CVT));
4529    Chain  = DAG.getCopyToReg(Chain, (CVT == MVT::i64) ? X86::RCX : X86::ECX,
4530                              Left, InFlag);
4531    InFlag = Chain.getValue(1);
4532    Tys.clear();
4533    Tys.push_back(MVT::Other);
4534    Tys.push_back(MVT::Flag);
4535    Ops.clear();
4536    Ops.push_back(Chain);
4537    Ops.push_back(DAG.getValueType(MVT::i8));
4538    Ops.push_back(InFlag);
4539    Chain  = DAG.getNode(X86ISD::REP_STOS, Tys, &Ops[0], Ops.size());
4540  } else if (BytesLeft) {
4541    // Issue stores for the last 1 - 7 bytes.
4542    SDOperand Value;
4543    unsigned Val = ValC->getValue() & 255;
4544    unsigned Offset = I->getValue() - BytesLeft;
4545    SDOperand DstAddr = Op.getOperand(1);
4546    MVT::ValueType AddrVT = DstAddr.getValueType();
4547    if (BytesLeft >= 4) {
4548      Val = (Val << 8)  | Val;
4549      Val = (Val << 16) | Val;
4550      Value = DAG.getConstant(Val, MVT::i32);
4551      Chain = DAG.getStore(Chain, Value,
4552                           DAG.getNode(ISD::ADD, AddrVT, DstAddr,
4553                                       DAG.getConstant(Offset, AddrVT)),
4554                           NULL, 0);
4555      BytesLeft -= 4;
4556      Offset += 4;
4557    }
4558    if (BytesLeft >= 2) {
4559      Value = DAG.getConstant((Val << 8) | Val, MVT::i16);
4560      Chain = DAG.getStore(Chain, Value,
4561                           DAG.getNode(ISD::ADD, AddrVT, DstAddr,
4562                                       DAG.getConstant(Offset, AddrVT)),
4563                           NULL, 0);
4564      BytesLeft -= 2;
4565      Offset += 2;
4566    }
4567    if (BytesLeft == 1) {
4568      Value = DAG.getConstant(Val, MVT::i8);
4569      Chain = DAG.getStore(Chain, Value,
4570                           DAG.getNode(ISD::ADD, AddrVT, DstAddr,
4571                                       DAG.getConstant(Offset, AddrVT)),
4572                           NULL, 0);
4573    }
4574  }
4575
4576  return Chain;
4577}
4578
4579SDOperand X86TargetLowering::LowerMEMCPY(SDOperand Op, SelectionDAG &DAG) {
4580  SDOperand Chain = Op.getOperand(0);
4581  unsigned Align =
4582    (unsigned)cast<ConstantSDNode>(Op.getOperand(4))->getValue();
4583  if (Align == 0) Align = 1;
4584
4585  ConstantSDNode *I = dyn_cast<ConstantSDNode>(Op.getOperand(3));
4586  // If not DWORD aligned, call memcpy if size is less than the threshold.
4587  // It knows how to align to the right boundary first.
4588  if ((Align & 3) != 0 ||
4589      (I && I->getValue() < Subtarget->getMinRepStrSizeThreshold())) {
4590    MVT::ValueType IntPtr = getPointerTy();
4591    const Type *IntPtrTy = getTargetData()->getIntPtrType();
4592    std::vector<std::pair<SDOperand, const Type*> > Args;
4593    Args.push_back(std::make_pair(Op.getOperand(1), IntPtrTy));
4594    Args.push_back(std::make_pair(Op.getOperand(2), IntPtrTy));
4595    Args.push_back(std::make_pair(Op.getOperand(3), IntPtrTy));
4596    std::pair<SDOperand,SDOperand> CallResult =
4597      LowerCallTo(Chain, Type::VoidTy, false, CallingConv::C, false,
4598                  DAG.getExternalSymbol("memcpy", IntPtr), Args, DAG);
4599    return CallResult.second;
4600  }
4601
4602  MVT::ValueType AVT;
4603  SDOperand Count;
4604  unsigned BytesLeft = 0;
4605  bool TwoRepMovs = false;
4606  switch (Align & 3) {
4607    case 2:   // WORD aligned
4608      AVT = MVT::i16;
4609      break;
4610    case 0:  // DWORD aligned
4611      AVT = MVT::i32;
4612      if (Subtarget->is64Bit() && ((Align & 0xF) == 0))  // QWORD aligned
4613        AVT = MVT::i64;
4614      break;
4615    default:  // Byte aligned
4616      AVT = MVT::i8;
4617      Count = Op.getOperand(3);
4618      break;
4619  }
4620
4621  if (AVT > MVT::i8) {
4622    if (I) {
4623      unsigned UBytes = MVT::getSizeInBits(AVT) / 8;
4624      Count = DAG.getConstant(I->getValue() / UBytes, getPointerTy());
4625      BytesLeft = I->getValue() % UBytes;
4626    } else {
4627      assert(AVT >= MVT::i32 &&
4628             "Do not use rep;movs if not at least DWORD aligned");
4629      Count = DAG.getNode(ISD::SRL, Op.getOperand(3).getValueType(),
4630                          Op.getOperand(3), DAG.getConstant(2, MVT::i8));
4631      TwoRepMovs = true;
4632    }
4633  }
4634
4635  SDOperand InFlag(0, 0);
4636  Chain  = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RCX : X86::ECX,
4637                            Count, InFlag);
4638  InFlag = Chain.getValue(1);
4639  Chain  = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RDI : X86::EDI,
4640                            Op.getOperand(1), InFlag);
4641  InFlag = Chain.getValue(1);
4642  Chain  = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RSI : X86::ESI,
4643                            Op.getOperand(2), InFlag);
4644  InFlag = Chain.getValue(1);
4645
4646  std::vector<MVT::ValueType> Tys;
4647  Tys.push_back(MVT::Other);
4648  Tys.push_back(MVT::Flag);
4649  std::vector<SDOperand> Ops;
4650  Ops.push_back(Chain);
4651  Ops.push_back(DAG.getValueType(AVT));
4652  Ops.push_back(InFlag);
4653  Chain = DAG.getNode(X86ISD::REP_MOVS, Tys, &Ops[0], Ops.size());
4654
4655  if (TwoRepMovs) {
4656    InFlag = Chain.getValue(1);
4657    Count = Op.getOperand(3);
4658    MVT::ValueType CVT = Count.getValueType();
4659    SDOperand Left = DAG.getNode(ISD::AND, CVT, Count,
4660                               DAG.getConstant((AVT == MVT::i64) ? 7 : 3, CVT));
4661    Chain  = DAG.getCopyToReg(Chain, (CVT == MVT::i64) ? X86::RCX : X86::ECX,
4662                              Left, InFlag);
4663    InFlag = Chain.getValue(1);
4664    Tys.clear();
4665    Tys.push_back(MVT::Other);
4666    Tys.push_back(MVT::Flag);
4667    Ops.clear();
4668    Ops.push_back(Chain);
4669    Ops.push_back(DAG.getValueType(MVT::i8));
4670    Ops.push_back(InFlag);
4671    Chain = DAG.getNode(X86ISD::REP_MOVS, Tys, &Ops[0], Ops.size());
4672  } else if (BytesLeft) {
4673    // Issue loads and stores for the last 1 - 7 bytes.
4674    unsigned Offset = I->getValue() - BytesLeft;
4675    SDOperand DstAddr = Op.getOperand(1);
4676    MVT::ValueType DstVT = DstAddr.getValueType();
4677    SDOperand SrcAddr = Op.getOperand(2);
4678    MVT::ValueType SrcVT = SrcAddr.getValueType();
4679    SDOperand Value;
4680    if (BytesLeft >= 4) {
4681      Value = DAG.getLoad(MVT::i32, Chain,
4682                          DAG.getNode(ISD::ADD, SrcVT, SrcAddr,
4683                                      DAG.getConstant(Offset, SrcVT)),
4684                          NULL, 0);
4685      Chain = Value.getValue(1);
4686      Chain = DAG.getStore(Chain, Value,
4687                           DAG.getNode(ISD::ADD, DstVT, DstAddr,
4688                                       DAG.getConstant(Offset, DstVT)),
4689                           NULL, 0);
4690      BytesLeft -= 4;
4691      Offset += 4;
4692    }
4693    if (BytesLeft >= 2) {
4694      Value = DAG.getLoad(MVT::i16, Chain,
4695                          DAG.getNode(ISD::ADD, SrcVT, SrcAddr,
4696                                      DAG.getConstant(Offset, SrcVT)),
4697                          NULL, 0);
4698      Chain = Value.getValue(1);
4699      Chain = DAG.getStore(Chain, Value,
4700                           DAG.getNode(ISD::ADD, DstVT, DstAddr,
4701                                       DAG.getConstant(Offset, DstVT)),
4702                           NULL, 0);
4703      BytesLeft -= 2;
4704      Offset += 2;
4705    }
4706
4707    if (BytesLeft == 1) {
4708      Value = DAG.getLoad(MVT::i8, Chain,
4709                          DAG.getNode(ISD::ADD, SrcVT, SrcAddr,
4710                                      DAG.getConstant(Offset, SrcVT)),
4711                          NULL, 0);
4712      Chain = Value.getValue(1);
4713      Chain = DAG.getStore(Chain, Value,
4714                           DAG.getNode(ISD::ADD, DstVT, DstAddr,
4715                                       DAG.getConstant(Offset, DstVT)),
4716                           NULL, 0);
4717    }
4718  }
4719
4720  return Chain;
4721}
4722
4723SDOperand
4724X86TargetLowering::LowerREADCYCLCECOUNTER(SDOperand Op, SelectionDAG &DAG) {
4725  std::vector<MVT::ValueType> Tys;
4726  Tys.push_back(MVT::Other);
4727  Tys.push_back(MVT::Flag);
4728  std::vector<SDOperand> Ops;
4729  Ops.push_back(Op.getOperand(0));
4730  SDOperand rd = DAG.getNode(X86ISD::RDTSC_DAG, Tys, &Ops[0], Ops.size());
4731  Ops.clear();
4732  Ops.push_back(DAG.getCopyFromReg(rd, X86::EAX, MVT::i32, rd.getValue(1)));
4733  Ops.push_back(DAG.getCopyFromReg(Ops[0].getValue(1), X86::EDX,
4734                                   MVT::i32, Ops[0].getValue(2)));
4735  Ops.push_back(Ops[1].getValue(1));
4736  Tys[0] = Tys[1] = MVT::i32;
4737  Tys.push_back(MVT::Other);
4738  return DAG.getNode(ISD::MERGE_VALUES, Tys, &Ops[0], Ops.size());
4739}
4740
4741SDOperand X86TargetLowering::LowerVASTART(SDOperand Op, SelectionDAG &DAG) {
4742  SrcValueSDNode *SV = cast<SrcValueSDNode>(Op.getOperand(2));
4743
4744  if (!Subtarget->is64Bit()) {
4745    // vastart just stores the address of the VarArgsFrameIndex slot into the
4746    // memory location argument.
4747    SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy());
4748    return DAG.getStore(Op.getOperand(0), FR,Op.getOperand(1), SV->getValue(),
4749                        SV->getOffset());
4750  }
4751
4752  // __va_list_tag:
4753  //   gp_offset         (0 - 6 * 8)
4754  //   fp_offset         (48 - 48 + 8 * 16)
4755  //   overflow_arg_area (point to parameters coming in memory).
4756  //   reg_save_area
4757  std::vector<SDOperand> MemOps;
4758  SDOperand FIN = Op.getOperand(1);
4759  // Store gp_offset
4760  SDOperand Store = DAG.getStore(Op.getOperand(0),
4761                                 DAG.getConstant(VarArgsGPOffset, MVT::i32),
4762                                 FIN, SV->getValue(), SV->getOffset());
4763  MemOps.push_back(Store);
4764
4765  // Store fp_offset
4766  FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN,
4767                    DAG.getConstant(4, getPointerTy()));
4768  Store = DAG.getStore(Op.getOperand(0),
4769                       DAG.getConstant(VarArgsFPOffset, MVT::i32),
4770                       FIN, SV->getValue(), SV->getOffset());
4771  MemOps.push_back(Store);
4772
4773  // Store ptr to overflow_arg_area
4774  FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN,
4775                    DAG.getConstant(4, getPointerTy()));
4776  SDOperand OVFIN = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy());
4777  Store = DAG.getStore(Op.getOperand(0), OVFIN, FIN, SV->getValue(),
4778                       SV->getOffset());
4779  MemOps.push_back(Store);
4780
4781  // Store ptr to reg_save_area.
4782  FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN,
4783                    DAG.getConstant(8, getPointerTy()));
4784  SDOperand RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy());
4785  Store = DAG.getStore(Op.getOperand(0), RSFIN, FIN, SV->getValue(),
4786                       SV->getOffset());
4787  MemOps.push_back(Store);
4788  return DAG.getNode(ISD::TokenFactor, MVT::Other, &MemOps[0], MemOps.size());
4789}
4790
4791SDOperand
4792X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDOperand Op, SelectionDAG &DAG) {
4793  unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getValue();
4794  switch (IntNo) {
4795  default: return SDOperand();    // Don't custom lower most intrinsics.
4796    // Comparison intrinsics.
4797  case Intrinsic::x86_sse_comieq_ss:
4798  case Intrinsic::x86_sse_comilt_ss:
4799  case Intrinsic::x86_sse_comile_ss:
4800  case Intrinsic::x86_sse_comigt_ss:
4801  case Intrinsic::x86_sse_comige_ss:
4802  case Intrinsic::x86_sse_comineq_ss:
4803  case Intrinsic::x86_sse_ucomieq_ss:
4804  case Intrinsic::x86_sse_ucomilt_ss:
4805  case Intrinsic::x86_sse_ucomile_ss:
4806  case Intrinsic::x86_sse_ucomigt_ss:
4807  case Intrinsic::x86_sse_ucomige_ss:
4808  case Intrinsic::x86_sse_ucomineq_ss:
4809  case Intrinsic::x86_sse2_comieq_sd:
4810  case Intrinsic::x86_sse2_comilt_sd:
4811  case Intrinsic::x86_sse2_comile_sd:
4812  case Intrinsic::x86_sse2_comigt_sd:
4813  case Intrinsic::x86_sse2_comige_sd:
4814  case Intrinsic::x86_sse2_comineq_sd:
4815  case Intrinsic::x86_sse2_ucomieq_sd:
4816  case Intrinsic::x86_sse2_ucomilt_sd:
4817  case Intrinsic::x86_sse2_ucomile_sd:
4818  case Intrinsic::x86_sse2_ucomigt_sd:
4819  case Intrinsic::x86_sse2_ucomige_sd:
4820  case Intrinsic::x86_sse2_ucomineq_sd: {
4821    unsigned Opc = 0;
4822    ISD::CondCode CC = ISD::SETCC_INVALID;
4823    switch (IntNo) {
4824    default: break;
4825    case Intrinsic::x86_sse_comieq_ss:
4826    case Intrinsic::x86_sse2_comieq_sd:
4827      Opc = X86ISD::COMI;
4828      CC = ISD::SETEQ;
4829      break;
4830    case Intrinsic::x86_sse_comilt_ss:
4831    case Intrinsic::x86_sse2_comilt_sd:
4832      Opc = X86ISD::COMI;
4833      CC = ISD::SETLT;
4834      break;
4835    case Intrinsic::x86_sse_comile_ss:
4836    case Intrinsic::x86_sse2_comile_sd:
4837      Opc = X86ISD::COMI;
4838      CC = ISD::SETLE;
4839      break;
4840    case Intrinsic::x86_sse_comigt_ss:
4841    case Intrinsic::x86_sse2_comigt_sd:
4842      Opc = X86ISD::COMI;
4843      CC = ISD::SETGT;
4844      break;
4845    case Intrinsic::x86_sse_comige_ss:
4846    case Intrinsic::x86_sse2_comige_sd:
4847      Opc = X86ISD::COMI;
4848      CC = ISD::SETGE;
4849      break;
4850    case Intrinsic::x86_sse_comineq_ss:
4851    case Intrinsic::x86_sse2_comineq_sd:
4852      Opc = X86ISD::COMI;
4853      CC = ISD::SETNE;
4854      break;
4855    case Intrinsic::x86_sse_ucomieq_ss:
4856    case Intrinsic::x86_sse2_ucomieq_sd:
4857      Opc = X86ISD::UCOMI;
4858      CC = ISD::SETEQ;
4859      break;
4860    case Intrinsic::x86_sse_ucomilt_ss:
4861    case Intrinsic::x86_sse2_ucomilt_sd:
4862      Opc = X86ISD::UCOMI;
4863      CC = ISD::SETLT;
4864      break;
4865    case Intrinsic::x86_sse_ucomile_ss:
4866    case Intrinsic::x86_sse2_ucomile_sd:
4867      Opc = X86ISD::UCOMI;
4868      CC = ISD::SETLE;
4869      break;
4870    case Intrinsic::x86_sse_ucomigt_ss:
4871    case Intrinsic::x86_sse2_ucomigt_sd:
4872      Opc = X86ISD::UCOMI;
4873      CC = ISD::SETGT;
4874      break;
4875    case Intrinsic::x86_sse_ucomige_ss:
4876    case Intrinsic::x86_sse2_ucomige_sd:
4877      Opc = X86ISD::UCOMI;
4878      CC = ISD::SETGE;
4879      break;
4880    case Intrinsic::x86_sse_ucomineq_ss:
4881    case Intrinsic::x86_sse2_ucomineq_sd:
4882      Opc = X86ISD::UCOMI;
4883      CC = ISD::SETNE;
4884      break;
4885    }
4886
4887    unsigned X86CC;
4888    SDOperand LHS = Op.getOperand(1);
4889    SDOperand RHS = Op.getOperand(2);
4890    translateX86CC(CC, true, X86CC, LHS, RHS, DAG);
4891
4892    const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag);
4893    SDOperand Ops1[] = { DAG.getEntryNode(), LHS, RHS };
4894    SDOperand Cond = DAG.getNode(Opc, VTs, 2, Ops1, 3);
4895    VTs = DAG.getNodeValueTypes(MVT::i8, MVT::Flag);
4896    SDOperand Ops2[] = { DAG.getConstant(X86CC, MVT::i8), Cond };
4897    SDOperand SetCC = DAG.getNode(X86ISD::SETCC, VTs, 2, Ops2, 2);
4898    return DAG.getNode(ISD::ANY_EXTEND, MVT::i32, SetCC);
4899  }
4900  }
4901}
4902
4903/// LowerOperation - Provide custom lowering hooks for some operations.
4904///
4905SDOperand X86TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
4906  switch (Op.getOpcode()) {
4907  default: assert(0 && "Should not custom lower this!");
4908  case ISD::BUILD_VECTOR:       return LowerBUILD_VECTOR(Op, DAG);
4909  case ISD::VECTOR_SHUFFLE:     return LowerVECTOR_SHUFFLE(Op, DAG);
4910  case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
4911  case ISD::INSERT_VECTOR_ELT:  return LowerINSERT_VECTOR_ELT(Op, DAG);
4912  case ISD::SCALAR_TO_VECTOR:   return LowerSCALAR_TO_VECTOR(Op, DAG);
4913  case ISD::ConstantPool:       return LowerConstantPool(Op, DAG);
4914  case ISD::GlobalAddress:      return LowerGlobalAddress(Op, DAG);
4915  case ISD::ExternalSymbol:     return LowerExternalSymbol(Op, DAG);
4916  case ISD::SHL_PARTS:
4917  case ISD::SRA_PARTS:
4918  case ISD::SRL_PARTS:          return LowerShift(Op, DAG);
4919  case ISD::SINT_TO_FP:         return LowerSINT_TO_FP(Op, DAG);
4920  case ISD::FP_TO_SINT:         return LowerFP_TO_SINT(Op, DAG);
4921  case ISD::FABS:               return LowerFABS(Op, DAG);
4922  case ISD::FNEG:               return LowerFNEG(Op, DAG);
4923  case ISD::SETCC:              return LowerSETCC(Op, DAG, DAG.getEntryNode());
4924  case ISD::SELECT:             return LowerSELECT(Op, DAG);
4925  case ISD::BRCOND:             return LowerBRCOND(Op, DAG);
4926  case ISD::JumpTable:          return LowerJumpTable(Op, DAG);
4927  case ISD::CALL:               return LowerCALL(Op, DAG);
4928  case ISD::RET:                return LowerRET(Op, DAG);
4929  case ISD::FORMAL_ARGUMENTS:   return LowerFORMAL_ARGUMENTS(Op, DAG);
4930  case ISD::MEMSET:             return LowerMEMSET(Op, DAG);
4931  case ISD::MEMCPY:             return LowerMEMCPY(Op, DAG);
4932  case ISD::READCYCLECOUNTER:   return LowerREADCYCLCECOUNTER(Op, DAG);
4933  case ISD::VASTART:            return LowerVASTART(Op, DAG);
4934  case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
4935  }
4936}
4937
4938const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
4939  switch (Opcode) {
4940  default: return NULL;
4941  case X86ISD::SHLD:               return "X86ISD::SHLD";
4942  case X86ISD::SHRD:               return "X86ISD::SHRD";
4943  case X86ISD::FAND:               return "X86ISD::FAND";
4944  case X86ISD::FXOR:               return "X86ISD::FXOR";
4945  case X86ISD::FILD:               return "X86ISD::FILD";
4946  case X86ISD::FILD_FLAG:          return "X86ISD::FILD_FLAG";
4947  case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM";
4948  case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM";
4949  case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM";
4950  case X86ISD::FLD:                return "X86ISD::FLD";
4951  case X86ISD::FST:                return "X86ISD::FST";
4952  case X86ISD::FP_GET_RESULT:      return "X86ISD::FP_GET_RESULT";
4953  case X86ISD::FP_SET_RESULT:      return "X86ISD::FP_SET_RESULT";
4954  case X86ISD::CALL:               return "X86ISD::CALL";
4955  case X86ISD::TAILCALL:           return "X86ISD::TAILCALL";
4956  case X86ISD::RDTSC_DAG:          return "X86ISD::RDTSC_DAG";
4957  case X86ISD::CMP:                return "X86ISD::CMP";
4958  case X86ISD::COMI:               return "X86ISD::COMI";
4959  case X86ISD::UCOMI:              return "X86ISD::UCOMI";
4960  case X86ISD::SETCC:              return "X86ISD::SETCC";
4961  case X86ISD::CMOV:               return "X86ISD::CMOV";
4962  case X86ISD::BRCOND:             return "X86ISD::BRCOND";
4963  case X86ISD::RET_FLAG:           return "X86ISD::RET_FLAG";
4964  case X86ISD::REP_STOS:           return "X86ISD::REP_STOS";
4965  case X86ISD::REP_MOVS:           return "X86ISD::REP_MOVS";
4966  case X86ISD::LOAD_PACK:          return "X86ISD::LOAD_PACK";
4967  case X86ISD::LOAD_UA:            return "X86ISD::LOAD_UA";
4968  case X86ISD::GlobalBaseReg:      return "X86ISD::GlobalBaseReg";
4969  case X86ISD::Wrapper:            return "X86ISD::Wrapper";
4970  case X86ISD::S2VEC:              return "X86ISD::S2VEC";
4971  case X86ISD::PEXTRW:             return "X86ISD::PEXTRW";
4972  case X86ISD::PINSRW:             return "X86ISD::PINSRW";
4973  }
4974}
4975
4976/// isLegalAddressImmediate - Return true if the integer value or
4977/// GlobalValue can be used as the offset of the target addressing mode.
4978bool X86TargetLowering::isLegalAddressImmediate(int64_t V) const {
4979  // X86 allows a sign-extended 32-bit immediate field.
4980  return (V > -(1LL << 32) && V < (1LL << 32)-1);
4981}
4982
4983bool X86TargetLowering::isLegalAddressImmediate(GlobalValue *GV) const {
4984  // GV is 64-bit but displacement field is 32-bit unless we are in small code
4985  // model. Mac OS X happens to support only small PIC code model.
4986  // FIXME: better support for other OS's.
4987  if (Subtarget->is64Bit() && !Subtarget->isTargetDarwin())
4988    return false;
4989  if (Subtarget->isTargetDarwin()) {
4990    Reloc::Model RModel = getTargetMachine().getRelocationModel();
4991    if (RModel == Reloc::Static)
4992      return true;
4993    else if (RModel == Reloc::DynamicNoPIC)
4994      return !DarwinGVRequiresExtraLoad(GV);
4995    else
4996      return false;
4997  } else
4998    return true;
4999}
5000
5001/// isShuffleMaskLegal - Targets can use this to indicate that they only
5002/// support *some* VECTOR_SHUFFLE operations, those with specific masks.
5003/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
5004/// are assumed to be legal.
5005bool
5006X86TargetLowering::isShuffleMaskLegal(SDOperand Mask, MVT::ValueType VT) const {
5007  // Only do shuffles on 128-bit vector types for now.
5008  if (MVT::getSizeInBits(VT) == 64) return false;
5009  return (Mask.Val->getNumOperands() <= 4 ||
5010          isSplatMask(Mask.Val)  ||
5011          isPSHUFHW_PSHUFLWMask(Mask.Val) ||
5012          X86::isUNPCKLMask(Mask.Val) ||
5013          X86::isUNPCKL_v_undef_Mask(Mask.Val) ||
5014          X86::isUNPCKHMask(Mask.Val));
5015}
5016
5017bool X86TargetLowering::isVectorClearMaskLegal(std::vector<SDOperand> &BVOps,
5018                                               MVT::ValueType EVT,
5019                                               SelectionDAG &DAG) const {
5020  unsigned NumElts = BVOps.size();
5021  // Only do shuffles on 128-bit vector types for now.
5022  if (MVT::getSizeInBits(EVT) * NumElts == 64) return false;
5023  if (NumElts == 2) return true;
5024  if (NumElts == 4) {
5025    return (isMOVLMask(BVOps)  || isCommutedMOVL(BVOps, true) ||
5026            isSHUFPMask(BVOps) || isCommutedSHUFP(BVOps));
5027  }
5028  return false;
5029}
5030
5031//===----------------------------------------------------------------------===//
5032//                           X86 Scheduler Hooks
5033//===----------------------------------------------------------------------===//
5034
5035MachineBasicBlock *
5036X86TargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI,
5037                                           MachineBasicBlock *BB) {
5038  switch (MI->getOpcode()) {
5039  default: assert(false && "Unexpected instr type to insert");
5040  case X86::CMOV_FR32:
5041  case X86::CMOV_FR64:
5042  case X86::CMOV_V4F32:
5043  case X86::CMOV_V2F64:
5044  case X86::CMOV_V2I64: {
5045    // To "insert" a SELECT_CC instruction, we actually have to insert the
5046    // diamond control-flow pattern.  The incoming instruction knows the
5047    // destination vreg to set, the condition code register to branch on, the
5048    // true/false values to select between, and a branch opcode to use.
5049    const BasicBlock *LLVM_BB = BB->getBasicBlock();
5050    ilist<MachineBasicBlock>::iterator It = BB;
5051    ++It;
5052
5053    //  thisMBB:
5054    //  ...
5055    //   TrueVal = ...
5056    //   cmpTY ccX, r1, r2
5057    //   bCC copy1MBB
5058    //   fallthrough --> copy0MBB
5059    MachineBasicBlock *thisMBB = BB;
5060    MachineBasicBlock *copy0MBB = new MachineBasicBlock(LLVM_BB);
5061    MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB);
5062    unsigned Opc =
5063      X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm());
5064    BuildMI(BB, Opc, 1).addMBB(sinkMBB);
5065    MachineFunction *F = BB->getParent();
5066    F->getBasicBlockList().insert(It, copy0MBB);
5067    F->getBasicBlockList().insert(It, sinkMBB);
5068    // Update machine-CFG edges by first adding all successors of the current
5069    // block to the new block which will contain the Phi node for the select.
5070    for(MachineBasicBlock::succ_iterator i = BB->succ_begin(),
5071        e = BB->succ_end(); i != e; ++i)
5072      sinkMBB->addSuccessor(*i);
5073    // Next, remove all successors of the current block, and add the true
5074    // and fallthrough blocks as its successors.
5075    while(!BB->succ_empty())
5076      BB->removeSuccessor(BB->succ_begin());
5077    BB->addSuccessor(copy0MBB);
5078    BB->addSuccessor(sinkMBB);
5079
5080    //  copy0MBB:
5081    //   %FalseValue = ...
5082    //   # fallthrough to sinkMBB
5083    BB = copy0MBB;
5084
5085    // Update machine-CFG edges
5086    BB->addSuccessor(sinkMBB);
5087
5088    //  sinkMBB:
5089    //   %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
5090    //  ...
5091    BB = sinkMBB;
5092    BuildMI(BB, X86::PHI, 4, MI->getOperand(0).getReg())
5093      .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB)
5094      .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
5095
5096    delete MI;   // The pseudo instruction is gone now.
5097    return BB;
5098  }
5099
5100  case X86::FP_TO_INT16_IN_MEM:
5101  case X86::FP_TO_INT32_IN_MEM:
5102  case X86::FP_TO_INT64_IN_MEM: {
5103    // Change the floating point control register to use "round towards zero"
5104    // mode when truncating to an integer value.
5105    MachineFunction *F = BB->getParent();
5106    int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2);
5107    addFrameReference(BuildMI(BB, X86::FNSTCW16m, 4), CWFrameIdx);
5108
5109    // Load the old value of the high byte of the control word...
5110    unsigned OldCW =
5111      F->getSSARegMap()->createVirtualRegister(X86::GR16RegisterClass);
5112    addFrameReference(BuildMI(BB, X86::MOV16rm, 4, OldCW), CWFrameIdx);
5113
5114    // Set the high part to be round to zero...
5115    addFrameReference(BuildMI(BB, X86::MOV16mi, 5), CWFrameIdx).addImm(0xC7F);
5116
5117    // Reload the modified control word now...
5118    addFrameReference(BuildMI(BB, X86::FLDCW16m, 4), CWFrameIdx);
5119
5120    // Restore the memory image of control word to original value
5121    addFrameReference(BuildMI(BB, X86::MOV16mr, 5), CWFrameIdx).addReg(OldCW);
5122
5123    // Get the X86 opcode to use.
5124    unsigned Opc;
5125    switch (MI->getOpcode()) {
5126    default: assert(0 && "illegal opcode!");
5127    case X86::FP_TO_INT16_IN_MEM: Opc = X86::FpIST16m; break;
5128    case X86::FP_TO_INT32_IN_MEM: Opc = X86::FpIST32m; break;
5129    case X86::FP_TO_INT64_IN_MEM: Opc = X86::FpIST64m; break;
5130    }
5131
5132    X86AddressMode AM;
5133    MachineOperand &Op = MI->getOperand(0);
5134    if (Op.isRegister()) {
5135      AM.BaseType = X86AddressMode::RegBase;
5136      AM.Base.Reg = Op.getReg();
5137    } else {
5138      AM.BaseType = X86AddressMode::FrameIndexBase;
5139      AM.Base.FrameIndex = Op.getFrameIndex();
5140    }
5141    Op = MI->getOperand(1);
5142    if (Op.isImmediate())
5143      AM.Scale = Op.getImm();
5144    Op = MI->getOperand(2);
5145    if (Op.isImmediate())
5146      AM.IndexReg = Op.getImm();
5147    Op = MI->getOperand(3);
5148    if (Op.isGlobalAddress()) {
5149      AM.GV = Op.getGlobal();
5150    } else {
5151      AM.Disp = Op.getImm();
5152    }
5153    addFullAddress(BuildMI(BB, Opc, 5), AM).addReg(MI->getOperand(4).getReg());
5154
5155    // Reload the original control word now.
5156    addFrameReference(BuildMI(BB, X86::FLDCW16m, 4), CWFrameIdx);
5157
5158    delete MI;   // The pseudo instruction is gone now.
5159    return BB;
5160  }
5161  }
5162}
5163
5164//===----------------------------------------------------------------------===//
5165//                           X86 Optimization Hooks
5166//===----------------------------------------------------------------------===//
5167
5168void X86TargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op,
5169                                                       uint64_t Mask,
5170                                                       uint64_t &KnownZero,
5171                                                       uint64_t &KnownOne,
5172                                                       unsigned Depth) const {
5173  unsigned Opc = Op.getOpcode();
5174  assert((Opc >= ISD::BUILTIN_OP_END ||
5175          Opc == ISD::INTRINSIC_WO_CHAIN ||
5176          Opc == ISD::INTRINSIC_W_CHAIN ||
5177          Opc == ISD::INTRINSIC_VOID) &&
5178         "Should use MaskedValueIsZero if you don't know whether Op"
5179         " is a target node!");
5180
5181  KnownZero = KnownOne = 0;   // Don't know anything.
5182  switch (Opc) {
5183  default: break;
5184  case X86ISD::SETCC:
5185    KnownZero |= (MVT::getIntVTBitMask(Op.getValueType()) ^ 1ULL);
5186    break;
5187  }
5188}
5189
5190/// getShuffleScalarElt - Returns the scalar element that will make up the ith
5191/// element of the result of the vector shuffle.
5192static SDOperand getShuffleScalarElt(SDNode *N, unsigned i, SelectionDAG &DAG) {
5193  MVT::ValueType VT = N->getValueType(0);
5194  SDOperand PermMask = N->getOperand(2);
5195  unsigned NumElems = PermMask.getNumOperands();
5196  SDOperand V = (i < NumElems) ? N->getOperand(0) : N->getOperand(1);
5197  i %= NumElems;
5198  if (V.getOpcode() == ISD::SCALAR_TO_VECTOR) {
5199    return (i == 0)
5200      ? V.getOperand(0) : DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(VT));
5201  } else if (V.getOpcode() == ISD::VECTOR_SHUFFLE) {
5202    SDOperand Idx = PermMask.getOperand(i);
5203    if (Idx.getOpcode() == ISD::UNDEF)
5204      return DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(VT));
5205    return getShuffleScalarElt(V.Val,cast<ConstantSDNode>(Idx)->getValue(),DAG);
5206  }
5207  return SDOperand();
5208}
5209
5210/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the
5211/// node is a GlobalAddress + an offset.
5212static bool isGAPlusOffset(SDNode *N, GlobalValue* &GA, int64_t &Offset) {
5213  if (N->getOpcode() == X86ISD::Wrapper) {
5214    if (dyn_cast<GlobalAddressSDNode>(N->getOperand(0))) {
5215      GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal();
5216      return true;
5217    }
5218  } else if (N->getOpcode() == ISD::ADD) {
5219    SDOperand N1 = N->getOperand(0);
5220    SDOperand N2 = N->getOperand(1);
5221    if (isGAPlusOffset(N1.Val, GA, Offset)) {
5222      ConstantSDNode *V = dyn_cast<ConstantSDNode>(N2);
5223      if (V) {
5224        Offset += V->getSignExtended();
5225        return true;
5226      }
5227    } else if (isGAPlusOffset(N2.Val, GA, Offset)) {
5228      ConstantSDNode *V = dyn_cast<ConstantSDNode>(N1);
5229      if (V) {
5230        Offset += V->getSignExtended();
5231        return true;
5232      }
5233    }
5234  }
5235  return false;
5236}
5237
5238/// isConsecutiveLoad - Returns true if N is loading from an address of Base
5239/// + Dist * Size.
5240static bool isConsecutiveLoad(SDNode *N, SDNode *Base, int Dist, int Size,
5241                              MachineFrameInfo *MFI) {
5242  if (N->getOperand(0).Val != Base->getOperand(0).Val)
5243    return false;
5244
5245  SDOperand Loc = N->getOperand(1);
5246  SDOperand BaseLoc = Base->getOperand(1);
5247  if (Loc.getOpcode() == ISD::FrameIndex) {
5248    if (BaseLoc.getOpcode() != ISD::FrameIndex)
5249      return false;
5250    int FI  = dyn_cast<FrameIndexSDNode>(Loc)->getIndex();
5251    int BFI = dyn_cast<FrameIndexSDNode>(BaseLoc)->getIndex();
5252    int FS  = MFI->getObjectSize(FI);
5253    int BFS = MFI->getObjectSize(BFI);
5254    if (FS != BFS || FS != Size) return false;
5255    return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Size);
5256  } else {
5257    GlobalValue *GV1 = NULL;
5258    GlobalValue *GV2 = NULL;
5259    int64_t Offset1 = 0;
5260    int64_t Offset2 = 0;
5261    bool isGA1 = isGAPlusOffset(Loc.Val, GV1, Offset1);
5262    bool isGA2 = isGAPlusOffset(BaseLoc.Val, GV2, Offset2);
5263    if (isGA1 && isGA2 && GV1 == GV2)
5264      return Offset1 == (Offset2 + Dist*Size);
5265  }
5266
5267  return false;
5268}
5269
5270static bool isBaseAlignment16(SDNode *Base, MachineFrameInfo *MFI,
5271                              const X86Subtarget *Subtarget) {
5272  GlobalValue *GV;
5273  int64_t Offset;
5274  if (isGAPlusOffset(Base, GV, Offset))
5275    return (GV->getAlignment() >= 16 && (Offset % 16) == 0);
5276  else {
5277    assert(Base->getOpcode() == ISD::FrameIndex && "Unexpected base node!");
5278    int BFI = dyn_cast<FrameIndexSDNode>(Base)->getIndex();
5279    if (BFI < 0)
5280      // Fixed objects do not specify alignment, however the offsets are known.
5281      return ((Subtarget->getStackAlignment() % 16) == 0 &&
5282              (MFI->getObjectOffset(BFI) % 16) == 0);
5283    else
5284      return MFI->getObjectAlignment(BFI) >= 16;
5285  }
5286  return false;
5287}
5288
5289
5290/// PerformShuffleCombine - Combine a vector_shuffle that is equal to
5291/// build_vector load1, load2, load3, load4, <0, 1, 2, 3> into a 128-bit load
5292/// if the load addresses are consecutive, non-overlapping, and in the right
5293/// order.
5294static SDOperand PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
5295                                       const X86Subtarget *Subtarget) {
5296  MachineFunction &MF = DAG.getMachineFunction();
5297  MachineFrameInfo *MFI = MF.getFrameInfo();
5298  MVT::ValueType VT = N->getValueType(0);
5299  MVT::ValueType EVT = MVT::getVectorBaseType(VT);
5300  SDOperand PermMask = N->getOperand(2);
5301  int NumElems = (int)PermMask.getNumOperands();
5302  SDNode *Base = NULL;
5303  for (int i = 0; i < NumElems; ++i) {
5304    SDOperand Idx = PermMask.getOperand(i);
5305    if (Idx.getOpcode() == ISD::UNDEF) {
5306      if (!Base) return SDOperand();
5307    } else {
5308      SDOperand Arg =
5309        getShuffleScalarElt(N, cast<ConstantSDNode>(Idx)->getValue(), DAG);
5310      if (!Arg.Val || !ISD::isNON_EXTLoad(Arg.Val))
5311        return SDOperand();
5312      if (!Base)
5313        Base = Arg.Val;
5314      else if (!isConsecutiveLoad(Arg.Val, Base,
5315                                  i, MVT::getSizeInBits(EVT)/8,MFI))
5316        return SDOperand();
5317    }
5318  }
5319
5320  bool isAlign16 = isBaseAlignment16(Base->getOperand(1).Val, MFI, Subtarget);
5321  if (isAlign16) {
5322    LoadSDNode *LD = cast<LoadSDNode>(Base);
5323    return DAG.getLoad(VT, LD->getChain(), LD->getBasePtr(), LD->getSrcValue(),
5324                       LD->getSrcValueOffset());
5325  } else {
5326    // Just use movups, it's shorter.
5327    std::vector<MVT::ValueType> Tys;
5328    Tys.push_back(MVT::v4f32);
5329    Tys.push_back(MVT::Other);
5330    SmallVector<SDOperand, 3> Ops;
5331    Ops.push_back(Base->getOperand(0));
5332    Ops.push_back(Base->getOperand(1));
5333    Ops.push_back(Base->getOperand(2));
5334    return DAG.getNode(ISD::BIT_CONVERT, VT,
5335                       DAG.getNode(X86ISD::LOAD_UA, Tys, &Ops[0], Ops.size()));
5336  }
5337}
5338
5339/// PerformSELECTCombine - Do target-specific dag combines on SELECT nodes.
5340static SDOperand PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
5341                                      const X86Subtarget *Subtarget) {
5342  SDOperand Cond = N->getOperand(0);
5343
5344  // If we have SSE[12] support, try to form min/max nodes.
5345  if (Subtarget->hasSSE2() &&
5346      (N->getValueType(0) == MVT::f32 || N->getValueType(0) == MVT::f64)) {
5347    if (Cond.getOpcode() == ISD::SETCC) {
5348      // Get the LHS/RHS of the select.
5349      SDOperand LHS = N->getOperand(1);
5350      SDOperand RHS = N->getOperand(2);
5351      ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
5352
5353      unsigned IntNo = 0;
5354      if (LHS == Cond.getOperand(0) && RHS == Cond.getOperand(1)) {
5355        switch (CC) {
5356        default: break;
5357        case ISD::SETOLE: // (X <= Y) ? X : Y -> min
5358        case ISD::SETULE:
5359        case ISD::SETLE:
5360          if (!UnsafeFPMath) break;
5361          // FALL THROUGH.
5362        case ISD::SETOLT:  // (X olt/lt Y) ? X : Y -> min
5363        case ISD::SETLT:
5364          IntNo = LHS.getValueType() == MVT::f32 ? Intrinsic::x86_sse_min_ss :
5365                                                   Intrinsic::x86_sse2_min_sd;
5366          break;
5367
5368        case ISD::SETOGT: // (X > Y) ? X : Y -> max
5369        case ISD::SETUGT:
5370        case ISD::SETGT:
5371          if (!UnsafeFPMath) break;
5372          // FALL THROUGH.
5373        case ISD::SETUGE:  // (X uge/ge Y) ? X : Y -> max
5374        case ISD::SETGE:
5375          IntNo = LHS.getValueType() == MVT::f32 ? Intrinsic::x86_sse_max_ss :
5376                                                   Intrinsic::x86_sse2_max_sd;
5377          break;
5378        }
5379      } else if (LHS == Cond.getOperand(1) && RHS == Cond.getOperand(0)) {
5380        switch (CC) {
5381        default: break;
5382        case ISD::SETOGT: // (X > Y) ? Y : X -> min
5383        case ISD::SETUGT:
5384        case ISD::SETGT:
5385          if (!UnsafeFPMath) break;
5386          // FALL THROUGH.
5387        case ISD::SETUGE:  // (X uge/ge Y) ? Y : X -> min
5388        case ISD::SETGE:
5389          IntNo = LHS.getValueType() == MVT::f32 ? Intrinsic::x86_sse_min_ss :
5390                                                   Intrinsic::x86_sse2_min_sd;
5391          break;
5392
5393        case ISD::SETOLE:   // (X <= Y) ? Y : X -> max
5394        case ISD::SETULE:
5395        case ISD::SETLE:
5396          if (!UnsafeFPMath) break;
5397          // FALL THROUGH.
5398        case ISD::SETOLT:   // (X olt/lt Y) ? Y : X -> max
5399        case ISD::SETLT:
5400          IntNo = LHS.getValueType() == MVT::f32 ? Intrinsic::x86_sse_max_ss :
5401                                                   Intrinsic::x86_sse2_max_sd;
5402          break;
5403        }
5404      }
5405
5406      // minss/maxss take a v4f32 operand.
5407      if (IntNo) {
5408        if (LHS.getValueType() == MVT::f32) {
5409          LHS = DAG.getNode(ISD::SCALAR_TO_VECTOR, MVT::v4f32, LHS);
5410          RHS = DAG.getNode(ISD::SCALAR_TO_VECTOR, MVT::v4f32, RHS);
5411        } else {
5412          LHS = DAG.getNode(ISD::SCALAR_TO_VECTOR, MVT::v2f64, LHS);
5413          RHS = DAG.getNode(ISD::SCALAR_TO_VECTOR, MVT::v2f64, RHS);
5414        }
5415
5416        MVT::ValueType PtrTy = Subtarget->is64Bit() ? MVT::i64 : MVT::i32;
5417        SDOperand IntNoN = DAG.getConstant(IntNo, PtrTy);
5418
5419        SDOperand Val = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, LHS.getValueType(),
5420                                    IntNoN, LHS, RHS);
5421        return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, N->getValueType(0), Val,
5422                           DAG.getConstant(0, PtrTy));
5423      }
5424    }
5425
5426  }
5427
5428  return SDOperand();
5429}
5430
5431
5432SDOperand X86TargetLowering::PerformDAGCombine(SDNode *N,
5433                                               DAGCombinerInfo &DCI) const {
5434  TargetMachine &TM = getTargetMachine();
5435  SelectionDAG &DAG = DCI.DAG;
5436  switch (N->getOpcode()) {
5437  default: break;
5438  case ISD::VECTOR_SHUFFLE:
5439    return PerformShuffleCombine(N, DAG, Subtarget);
5440  case ISD::SELECT:
5441    return PerformSELECTCombine(N, DAG, Subtarget);
5442  }
5443
5444  return SDOperand();
5445}
5446
5447//===----------------------------------------------------------------------===//
5448//                           X86 Inline Assembly Support
5449//===----------------------------------------------------------------------===//
5450
5451/// getConstraintType - Given a constraint letter, return the type of
5452/// constraint it is for this target.
5453X86TargetLowering::ConstraintType
5454X86TargetLowering::getConstraintType(char ConstraintLetter) const {
5455  switch (ConstraintLetter) {
5456  case 'A':
5457  case 'r':
5458  case 'R':
5459  case 'l':
5460  case 'q':
5461  case 'Q':
5462  case 'x':
5463  case 'Y':
5464    return C_RegisterClass;
5465  default: return TargetLowering::getConstraintType(ConstraintLetter);
5466  }
5467}
5468
5469/// isOperandValidForConstraint - Return the specified operand (possibly
5470/// modified) if the specified SDOperand is valid for the specified target
5471/// constraint letter, otherwise return null.
5472SDOperand X86TargetLowering::
5473isOperandValidForConstraint(SDOperand Op, char Constraint, SelectionDAG &DAG) {
5474  switch (Constraint) {
5475  default: break;
5476  case 'i':
5477    // Literal immediates are always ok.
5478    if (isa<ConstantSDNode>(Op)) return Op;
5479
5480    // If we are in non-pic codegen mode, we allow the address of a global to
5481    // be used with 'i'.
5482    if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
5483      if (getTargetMachine().getRelocationModel() == Reloc::PIC_)
5484        return SDOperand(0, 0);
5485
5486      if (GA->getOpcode() != ISD::TargetGlobalAddress)
5487        Op = DAG.getTargetGlobalAddress(GA->getGlobal(), GA->getValueType(0),
5488                                        GA->getOffset());
5489      return Op;
5490    }
5491
5492    // Otherwise, not valid for this mode.
5493    return SDOperand(0, 0);
5494  }
5495  return TargetLowering::isOperandValidForConstraint(Op, Constraint, DAG);
5496}
5497
5498
5499std::vector<unsigned> X86TargetLowering::
5500getRegClassForInlineAsmConstraint(const std::string &Constraint,
5501                                  MVT::ValueType VT) const {
5502  if (Constraint.size() == 1) {
5503    // FIXME: not handling fp-stack yet!
5504    // FIXME: not handling MMX registers yet ('y' constraint).
5505    switch (Constraint[0]) {      // GCC X86 Constraint Letters
5506    default: break;  // Unknown constraint letter
5507    case 'A':   // EAX/EDX
5508      if (VT == MVT::i32 || VT == MVT::i64)
5509        return make_vector<unsigned>(X86::EAX, X86::EDX, 0);
5510      break;
5511    case 'r':   // GENERAL_REGS
5512    case 'R':   // LEGACY_REGS
5513      if (VT == MVT::i32)
5514        return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX,
5515                                     X86::ESI, X86::EDI, X86::EBP, X86::ESP, 0);
5516      else if (VT == MVT::i16)
5517        return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX,
5518                                     X86::SI, X86::DI, X86::BP, X86::SP, 0);
5519      else if (VT == MVT::i8)
5520        return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::DL, 0);
5521      break;
5522    case 'l':   // INDEX_REGS
5523      if (VT == MVT::i32)
5524        return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX,
5525                                     X86::ESI, X86::EDI, X86::EBP, 0);
5526      else if (VT == MVT::i16)
5527        return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX,
5528                                     X86::SI, X86::DI, X86::BP, 0);
5529      else if (VT == MVT::i8)
5530        return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::DL, 0);
5531      break;
5532    case 'q':   // Q_REGS (GENERAL_REGS in 64-bit mode)
5533    case 'Q':   // Q_REGS
5534      if (VT == MVT::i32)
5535        return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX, 0);
5536      else if (VT == MVT::i16)
5537        return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX, 0);
5538      else if (VT == MVT::i8)
5539        return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::DL, 0);
5540        break;
5541    case 'x':   // SSE_REGS if SSE1 allowed
5542      if (Subtarget->hasSSE1())
5543        return make_vector<unsigned>(X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
5544                                     X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7,
5545                                     0);
5546      return std::vector<unsigned>();
5547    case 'Y':   // SSE_REGS if SSE2 allowed
5548      if (Subtarget->hasSSE2())
5549        return make_vector<unsigned>(X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
5550                                     X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7,
5551                                     0);
5552      return std::vector<unsigned>();
5553    }
5554  }
5555
5556  return std::vector<unsigned>();
5557}
5558
5559std::pair<unsigned, const TargetRegisterClass*>
5560X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
5561                                                MVT::ValueType VT) const {
5562  // Use the default implementation in TargetLowering to convert the register
5563  // constraint into a member of a register class.
5564  std::pair<unsigned, const TargetRegisterClass*> Res;
5565  Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
5566
5567  // Not found as a standard register?
5568  if (Res.second == 0) {
5569    // GCC calls "st(0)" just plain "st".
5570    if (StringsEqualNoCase("{st}", Constraint)) {
5571      Res.first = X86::ST0;
5572      Res.second = X86::RSTRegisterClass;
5573    }
5574
5575    return Res;
5576  }
5577
5578  // Otherwise, check to see if this is a register class of the wrong value
5579  // type.  For example, we want to map "{ax},i32" -> {eax}, we don't want it to
5580  // turn into {ax},{dx}.
5581  if (Res.second->hasType(VT))
5582    return Res;   // Correct type already, nothing to do.
5583
5584  // All of the single-register GCC register classes map their values onto
5585  // 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp".  If we
5586  // really want an 8-bit or 32-bit register, map to the appropriate register
5587  // class and return the appropriate register.
5588  if (Res.second != X86::GR16RegisterClass)
5589    return Res;
5590
5591  if (VT == MVT::i8) {
5592    unsigned DestReg = 0;
5593    switch (Res.first) {
5594    default: break;
5595    case X86::AX: DestReg = X86::AL; break;
5596    case X86::DX: DestReg = X86::DL; break;
5597    case X86::CX: DestReg = X86::CL; break;
5598    case X86::BX: DestReg = X86::BL; break;
5599    }
5600    if (DestReg) {
5601      Res.first = DestReg;
5602      Res.second = Res.second = X86::GR8RegisterClass;
5603    }
5604  } else if (VT == MVT::i32) {
5605    unsigned DestReg = 0;
5606    switch (Res.first) {
5607    default: break;
5608    case X86::AX: DestReg = X86::EAX; break;
5609    case X86::DX: DestReg = X86::EDX; break;
5610    case X86::CX: DestReg = X86::ECX; break;
5611    case X86::BX: DestReg = X86::EBX; break;
5612    case X86::SI: DestReg = X86::ESI; break;
5613    case X86::DI: DestReg = X86::EDI; break;
5614    case X86::BP: DestReg = X86::EBP; break;
5615    case X86::SP: DestReg = X86::ESP; break;
5616    }
5617    if (DestReg) {
5618      Res.first = DestReg;
5619      Res.second = Res.second = X86::GR32RegisterClass;
5620    }
5621  } else if (VT == MVT::i64) {
5622    unsigned DestReg = 0;
5623    switch (Res.first) {
5624    default: break;
5625    case X86::AX: DestReg = X86::RAX; break;
5626    case X86::DX: DestReg = X86::RDX; break;
5627    case X86::CX: DestReg = X86::RCX; break;
5628    case X86::BX: DestReg = X86::RBX; break;
5629    case X86::SI: DestReg = X86::RSI; break;
5630    case X86::DI: DestReg = X86::RDI; break;
5631    case X86::BP: DestReg = X86::RBP; break;
5632    case X86::SP: DestReg = X86::RSP; break;
5633    }
5634    if (DestReg) {
5635      Res.first = DestReg;
5636      Res.second = Res.second = X86::GR64RegisterClass;
5637    }
5638  }
5639
5640  return Res;
5641}
5642
5643