PPCISelLowering.cpp revision 8cc3474f72388836fa4ca7d3622289fb9ee08b41
1//===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the PPCISelLowering class.
11//
12//===----------------------------------------------------------------------===//
13
14#include "PPCISelLowering.h"
15#include "PPCMachineFunctionInfo.h"
16#include "PPCPerfectShuffle.h"
17#include "PPCTargetMachine.h"
18#include "MCTargetDesc/PPCPredicates.h"
19#include "llvm/CallingConv.h"
20#include "llvm/Constants.h"
21#include "llvm/DerivedTypes.h"
22#include "llvm/Function.h"
23#include "llvm/Intrinsics.h"
24#include "llvm/ADT/STLExtras.h"
25#include "llvm/CodeGen/CallingConvLower.h"
26#include "llvm/CodeGen/MachineFrameInfo.h"
27#include "llvm/CodeGen/MachineFunction.h"
28#include "llvm/CodeGen/MachineInstrBuilder.h"
29#include "llvm/CodeGen/MachineRegisterInfo.h"
30#include "llvm/CodeGen/SelectionDAG.h"
31#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
32#include "llvm/Support/CommandLine.h"
33#include "llvm/Support/ErrorHandling.h"
34#include "llvm/Support/MathExtras.h"
35#include "llvm/Support/raw_ostream.h"
36#include "llvm/Target/TargetOptions.h"
37using namespace llvm;
38
39static bool CC_PPC_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
40                                     CCValAssign::LocInfo &LocInfo,
41                                     ISD::ArgFlagsTy &ArgFlags,
42                                     CCState &State);
43static bool CC_PPC_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT,
44                                            MVT &LocVT,
45                                            CCValAssign::LocInfo &LocInfo,
46                                            ISD::ArgFlagsTy &ArgFlags,
47                                            CCState &State);
48static bool CC_PPC_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT,
49                                              MVT &LocVT,
50                                              CCValAssign::LocInfo &LocInfo,
51                                              ISD::ArgFlagsTy &ArgFlags,
52                                              CCState &State);
53
54static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc",
55cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden);
56
57static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref",
58cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden);
59
60static TargetLoweringObjectFile *CreateTLOF(const PPCTargetMachine &TM) {
61  if (TM.getSubtargetImpl()->isDarwin())
62    return new TargetLoweringObjectFileMachO();
63
64  return new TargetLoweringObjectFileELF();
65}
66
67PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
68  : TargetLowering(TM, CreateTLOF(TM)), PPCSubTarget(*TM.getSubtargetImpl()) {
69  const PPCSubtarget *Subtarget = &TM.getSubtarget<PPCSubtarget>();
70
71  setPow2DivIsCheap();
72
73  // Use _setjmp/_longjmp instead of setjmp/longjmp.
74  setUseUnderscoreSetJmp(true);
75  setUseUnderscoreLongJmp(true);
76
77  // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all
78  // arguments are at least 4/8 bytes aligned.
79  bool isPPC64 = Subtarget->isPPC64();
80  setMinStackArgumentAlignment(isPPC64 ? 8:4);
81
82  // Set up the register classes.
83  addRegisterClass(MVT::i32, &PPC::GPRCRegClass);
84  addRegisterClass(MVT::f32, &PPC::F4RCRegClass);
85  addRegisterClass(MVT::f64, &PPC::F8RCRegClass);
86
87  // PowerPC has an i16 but no i8 (or i1) SEXTLOAD
88  setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
89  setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Expand);
90
91  setTruncStoreAction(MVT::f64, MVT::f32, Expand);
92
93  // PowerPC has pre-inc load and store's.
94  setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal);
95  setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal);
96  setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal);
97  setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal);
98  setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal);
99  setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal);
100  setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal);
101  setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal);
102  setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal);
103  setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal);
104
105  // This is used in the ppcf128->int sequence.  Note it has different semantics
106  // from FP_ROUND:  that rounds to nearest, this rounds to zero.
107  setOperationAction(ISD::FP_ROUND_INREG, MVT::ppcf128, Custom);
108
109  // We do not currently implment this libm ops for PowerPC.
110  setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand);
111  setOperationAction(ISD::FCEIL,  MVT::ppcf128, Expand);
112  setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand);
113  setOperationAction(ISD::FRINT,  MVT::ppcf128, Expand);
114  setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand);
115
116  // PowerPC has no SREM/UREM instructions
117  setOperationAction(ISD::SREM, MVT::i32, Expand);
118  setOperationAction(ISD::UREM, MVT::i32, Expand);
119  setOperationAction(ISD::SREM, MVT::i64, Expand);
120  setOperationAction(ISD::UREM, MVT::i64, Expand);
121
122  // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM.
123  setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
124  setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
125  setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
126  setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
127  setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
128  setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
129  setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
130  setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
131
132  // We don't support sin/cos/sqrt/fmod/pow
133  setOperationAction(ISD::FSIN , MVT::f64, Expand);
134  setOperationAction(ISD::FCOS , MVT::f64, Expand);
135  setOperationAction(ISD::FREM , MVT::f64, Expand);
136  setOperationAction(ISD::FPOW , MVT::f64, Expand);
137  setOperationAction(ISD::FMA  , MVT::f64, Legal);
138  setOperationAction(ISD::FSIN , MVT::f32, Expand);
139  setOperationAction(ISD::FCOS , MVT::f32, Expand);
140  setOperationAction(ISD::FREM , MVT::f32, Expand);
141  setOperationAction(ISD::FPOW , MVT::f32, Expand);
142  setOperationAction(ISD::FMA  , MVT::f32, Legal);
143
144  setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
145
146  // If we're enabling GP optimizations, use hardware square root
147  if (!Subtarget->hasFSQRT()) {
148    setOperationAction(ISD::FSQRT, MVT::f64, Expand);
149    setOperationAction(ISD::FSQRT, MVT::f32, Expand);
150  }
151
152  setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
153  setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
154
155  // PowerPC does not have BSWAP, CTPOP or CTTZ
156  setOperationAction(ISD::BSWAP, MVT::i32  , Expand);
157  setOperationAction(ISD::CTPOP, MVT::i32  , Expand);
158  setOperationAction(ISD::CTTZ , MVT::i32  , Expand);
159  setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand);
160  setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand);
161  setOperationAction(ISD::BSWAP, MVT::i64  , Expand);
162  setOperationAction(ISD::CTPOP, MVT::i64  , Expand);
163  setOperationAction(ISD::CTTZ , MVT::i64  , Expand);
164  setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
165  setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand);
166
167  // PowerPC does not have ROTR
168  setOperationAction(ISD::ROTR, MVT::i32   , Expand);
169  setOperationAction(ISD::ROTR, MVT::i64   , Expand);
170
171  // PowerPC does not have Select
172  setOperationAction(ISD::SELECT, MVT::i32, Expand);
173  setOperationAction(ISD::SELECT, MVT::i64, Expand);
174  setOperationAction(ISD::SELECT, MVT::f32, Expand);
175  setOperationAction(ISD::SELECT, MVT::f64, Expand);
176
177  // PowerPC wants to turn select_cc of FP into fsel when possible.
178  setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
179  setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
180
181  // PowerPC wants to optimize integer setcc a bit
182  setOperationAction(ISD::SETCC, MVT::i32, Custom);
183
184  // PowerPC does not have BRCOND which requires SetCC
185  setOperationAction(ISD::BRCOND, MVT::Other, Expand);
186
187  setOperationAction(ISD::BR_JT,  MVT::Other, Expand);
188
189  // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores.
190  setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
191
192  // PowerPC does not have [U|S]INT_TO_FP
193  setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
194  setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
195
196  setOperationAction(ISD::BITCAST, MVT::f32, Expand);
197  setOperationAction(ISD::BITCAST, MVT::i32, Expand);
198  setOperationAction(ISD::BITCAST, MVT::i64, Expand);
199  setOperationAction(ISD::BITCAST, MVT::f64, Expand);
200
201  // We cannot sextinreg(i1).  Expand to shifts.
202  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
203
204  setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand);
205  setOperationAction(ISD::EHSELECTION,   MVT::i64, Expand);
206  setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand);
207  setOperationAction(ISD::EHSELECTION,   MVT::i32, Expand);
208
209
210  // We want to legalize GlobalAddress and ConstantPool nodes into the
211  // appropriate instructions to materialize the address.
212  setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
213  setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
214  setOperationAction(ISD::BlockAddress,  MVT::i32, Custom);
215  setOperationAction(ISD::ConstantPool,  MVT::i32, Custom);
216  setOperationAction(ISD::JumpTable,     MVT::i32, Custom);
217  setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
218  setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
219  setOperationAction(ISD::BlockAddress,  MVT::i64, Custom);
220  setOperationAction(ISD::ConstantPool,  MVT::i64, Custom);
221  setOperationAction(ISD::JumpTable,     MVT::i64, Custom);
222
223  // TRAP is legal.
224  setOperationAction(ISD::TRAP, MVT::Other, Legal);
225
226  // TRAMPOLINE is custom lowered.
227  setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
228  setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
229
230  // VASTART needs to be custom lowered to use the VarArgsFrameIndex
231  setOperationAction(ISD::VASTART           , MVT::Other, Custom);
232
233  if (Subtarget->isSVR4ABI()) {
234    if (isPPC64) {
235      // VAARG always uses double-word chunks, so promote anything smaller.
236      setOperationAction(ISD::VAARG, MVT::i1, Promote);
237      AddPromotedToType (ISD::VAARG, MVT::i1, MVT::i64);
238      setOperationAction(ISD::VAARG, MVT::i8, Promote);
239      AddPromotedToType (ISD::VAARG, MVT::i8, MVT::i64);
240      setOperationAction(ISD::VAARG, MVT::i16, Promote);
241      AddPromotedToType (ISD::VAARG, MVT::i16, MVT::i64);
242      setOperationAction(ISD::VAARG, MVT::i32, Promote);
243      AddPromotedToType (ISD::VAARG, MVT::i32, MVT::i64);
244      setOperationAction(ISD::VAARG, MVT::Other, Expand);
245    } else {
246      // VAARG is custom lowered with the 32-bit SVR4 ABI.
247      setOperationAction(ISD::VAARG, MVT::Other, Custom);
248      setOperationAction(ISD::VAARG, MVT::i64, Custom);
249    }
250  } else
251    setOperationAction(ISD::VAARG, MVT::Other, Expand);
252
253  // Use the default implementation.
254  setOperationAction(ISD::VACOPY            , MVT::Other, Expand);
255  setOperationAction(ISD::VAEND             , MVT::Other, Expand);
256  setOperationAction(ISD::STACKSAVE         , MVT::Other, Expand);
257  setOperationAction(ISD::STACKRESTORE      , MVT::Other, Custom);
258  setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32  , Custom);
259  setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64  , Custom);
260
261  // We want to custom lower some of our intrinsics.
262  setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
263
264  // Comparisons that require checking two conditions.
265  setCondCodeAction(ISD::SETULT, MVT::f32, Expand);
266  setCondCodeAction(ISD::SETULT, MVT::f64, Expand);
267  setCondCodeAction(ISD::SETUGT, MVT::f32, Expand);
268  setCondCodeAction(ISD::SETUGT, MVT::f64, Expand);
269  setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand);
270  setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand);
271  setCondCodeAction(ISD::SETOGE, MVT::f32, Expand);
272  setCondCodeAction(ISD::SETOGE, MVT::f64, Expand);
273  setCondCodeAction(ISD::SETOLE, MVT::f32, Expand);
274  setCondCodeAction(ISD::SETOLE, MVT::f64, Expand);
275  setCondCodeAction(ISD::SETONE, MVT::f32, Expand);
276  setCondCodeAction(ISD::SETONE, MVT::f64, Expand);
277
278  if (Subtarget->has64BitSupport()) {
279    // They also have instructions for converting between i64 and fp.
280    setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
281    setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
282    setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
283    setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
284    // This is just the low 32 bits of a (signed) fp->i64 conversion.
285    // We cannot do this with Promote because i64 is not a legal type.
286    setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
287
288    // FIXME: disable this lowered code.  This generates 64-bit register values,
289    // and we don't model the fact that the top part is clobbered by calls.  We
290    // need to flag these together so that the value isn't live across a call.
291    //setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
292  } else {
293    // PowerPC does not have FP_TO_UINT on 32-bit implementations.
294    setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
295  }
296
297  if (Subtarget->use64BitRegs()) {
298    // 64-bit PowerPC implementations can support i64 types directly
299    addRegisterClass(MVT::i64, &PPC::G8RCRegClass);
300    // BUILD_PAIR can't be handled natively, and should be expanded to shl/or
301    setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
302    // 64-bit PowerPC wants to expand i128 shifts itself.
303    setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom);
304    setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom);
305    setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom);
306  } else {
307    // 32-bit PowerPC wants to expand i64 shifts itself.
308    setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
309    setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
310    setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
311  }
312
313  if (Subtarget->hasAltivec()) {
314    // First set operation action for all vector types to expand. Then we
315    // will selectively turn on ones that can be effectively codegen'd.
316    for (unsigned i = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
317         i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
318      MVT::SimpleValueType VT = (MVT::SimpleValueType)i;
319
320      // add/sub are legal for all supported vector VT's.
321      setOperationAction(ISD::ADD , VT, Legal);
322      setOperationAction(ISD::SUB , VT, Legal);
323
324      // We promote all shuffles to v16i8.
325      setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote);
326      AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8);
327
328      // We promote all non-typed operations to v4i32.
329      setOperationAction(ISD::AND   , VT, Promote);
330      AddPromotedToType (ISD::AND   , VT, MVT::v4i32);
331      setOperationAction(ISD::OR    , VT, Promote);
332      AddPromotedToType (ISD::OR    , VT, MVT::v4i32);
333      setOperationAction(ISD::XOR   , VT, Promote);
334      AddPromotedToType (ISD::XOR   , VT, MVT::v4i32);
335      setOperationAction(ISD::LOAD  , VT, Promote);
336      AddPromotedToType (ISD::LOAD  , VT, MVT::v4i32);
337      setOperationAction(ISD::SELECT, VT, Promote);
338      AddPromotedToType (ISD::SELECT, VT, MVT::v4i32);
339      setOperationAction(ISD::STORE, VT, Promote);
340      AddPromotedToType (ISD::STORE, VT, MVT::v4i32);
341
342      // No other operations are legal.
343      setOperationAction(ISD::MUL , VT, Expand);
344      setOperationAction(ISD::SDIV, VT, Expand);
345      setOperationAction(ISD::SREM, VT, Expand);
346      setOperationAction(ISD::UDIV, VT, Expand);
347      setOperationAction(ISD::UREM, VT, Expand);
348      setOperationAction(ISD::FDIV, VT, Expand);
349      setOperationAction(ISD::FNEG, VT, Expand);
350      setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand);
351      setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
352      setOperationAction(ISD::BUILD_VECTOR, VT, Expand);
353      setOperationAction(ISD::UMUL_LOHI, VT, Expand);
354      setOperationAction(ISD::SMUL_LOHI, VT, Expand);
355      setOperationAction(ISD::UDIVREM, VT, Expand);
356      setOperationAction(ISD::SDIVREM, VT, Expand);
357      setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand);
358      setOperationAction(ISD::FPOW, VT, Expand);
359      setOperationAction(ISD::CTPOP, VT, Expand);
360      setOperationAction(ISD::CTLZ, VT, Expand);
361      setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
362      setOperationAction(ISD::CTTZ, VT, Expand);
363      setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
364    }
365
366    // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle
367    // with merges, splats, etc.
368    setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom);
369
370    setOperationAction(ISD::AND   , MVT::v4i32, Legal);
371    setOperationAction(ISD::OR    , MVT::v4i32, Legal);
372    setOperationAction(ISD::XOR   , MVT::v4i32, Legal);
373    setOperationAction(ISD::LOAD  , MVT::v4i32, Legal);
374    setOperationAction(ISD::SELECT, MVT::v4i32, Expand);
375    setOperationAction(ISD::STORE , MVT::v4i32, Legal);
376
377    addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass);
378    addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass);
379    addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass);
380    addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass);
381
382    setOperationAction(ISD::MUL, MVT::v4f32, Legal);
383    setOperationAction(ISD::FMA, MVT::v4f32, Legal);
384    setOperationAction(ISD::MUL, MVT::v4i32, Custom);
385    setOperationAction(ISD::MUL, MVT::v8i16, Custom);
386    setOperationAction(ISD::MUL, MVT::v16i8, Custom);
387
388    setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom);
389    setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom);
390
391    setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom);
392    setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom);
393    setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom);
394    setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
395  }
396
397  if (Subtarget->has64BitSupport()) {
398    setOperationAction(ISD::PREFETCH, MVT::Other, Legal);
399    setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal);
400  }
401
402  setOperationAction(ISD::ATOMIC_LOAD,  MVT::i32, Expand);
403  setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Expand);
404
405  setBooleanContents(ZeroOrOneBooleanContent);
406  setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct?
407
408  if (isPPC64) {
409    setStackPointerRegisterToSaveRestore(PPC::X1);
410    setExceptionPointerRegister(PPC::X3);
411    setExceptionSelectorRegister(PPC::X4);
412  } else {
413    setStackPointerRegisterToSaveRestore(PPC::R1);
414    setExceptionPointerRegister(PPC::R3);
415    setExceptionSelectorRegister(PPC::R4);
416  }
417
418  // We have target-specific dag combine patterns for the following nodes:
419  setTargetDAGCombine(ISD::SINT_TO_FP);
420  setTargetDAGCombine(ISD::STORE);
421  setTargetDAGCombine(ISD::BR_CC);
422  setTargetDAGCombine(ISD::BSWAP);
423
424  // Darwin long double math library functions have $LDBL128 appended.
425  if (Subtarget->isDarwin()) {
426    setLibcallName(RTLIB::COS_PPCF128, "cosl$LDBL128");
427    setLibcallName(RTLIB::POW_PPCF128, "powl$LDBL128");
428    setLibcallName(RTLIB::REM_PPCF128, "fmodl$LDBL128");
429    setLibcallName(RTLIB::SIN_PPCF128, "sinl$LDBL128");
430    setLibcallName(RTLIB::SQRT_PPCF128, "sqrtl$LDBL128");
431    setLibcallName(RTLIB::LOG_PPCF128, "logl$LDBL128");
432    setLibcallName(RTLIB::LOG2_PPCF128, "log2l$LDBL128");
433    setLibcallName(RTLIB::LOG10_PPCF128, "log10l$LDBL128");
434    setLibcallName(RTLIB::EXP_PPCF128, "expl$LDBL128");
435    setLibcallName(RTLIB::EXP2_PPCF128, "exp2l$LDBL128");
436  }
437
438  setMinFunctionAlignment(2);
439  if (PPCSubTarget.isDarwin())
440    setPrefFunctionAlignment(4);
441
442  if (isPPC64 && Subtarget->isJITCodeModel())
443    // Temporary workaround for the inability of PPC64 JIT to handle jump
444    // tables.
445    setSupportJumpTables(false);
446
447  setInsertFencesForAtomic(true);
448
449  setSchedulingPreference(Sched::Hybrid);
450
451  computeRegisterProperties();
452}
453
454/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
455/// function arguments in the caller parameter area.
456unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty) const {
457  const TargetMachine &TM = getTargetMachine();
458  // Darwin passes everything on 4 byte boundary.
459  if (TM.getSubtarget<PPCSubtarget>().isDarwin())
460    return 4;
461
462  // 16byte and wider vectors are passed on 16byte boundary.
463  if (VectorType *VTy = dyn_cast<VectorType>(Ty))
464    if (VTy->getBitWidth() >= 128)
465      return 16;
466
467  // The rest is 8 on PPC64 and 4 on PPC32 boundary.
468   if (PPCSubTarget.isPPC64())
469     return 8;
470
471  return 4;
472}
473
474const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
475  switch (Opcode) {
476  default: return 0;
477  case PPCISD::FSEL:            return "PPCISD::FSEL";
478  case PPCISD::FCFID:           return "PPCISD::FCFID";
479  case PPCISD::FCTIDZ:          return "PPCISD::FCTIDZ";
480  case PPCISD::FCTIWZ:          return "PPCISD::FCTIWZ";
481  case PPCISD::STFIWX:          return "PPCISD::STFIWX";
482  case PPCISD::VMADDFP:         return "PPCISD::VMADDFP";
483  case PPCISD::VNMSUBFP:        return "PPCISD::VNMSUBFP";
484  case PPCISD::VPERM:           return "PPCISD::VPERM";
485  case PPCISD::Hi:              return "PPCISD::Hi";
486  case PPCISD::Lo:              return "PPCISD::Lo";
487  case PPCISD::TOC_ENTRY:       return "PPCISD::TOC_ENTRY";
488  case PPCISD::TOC_RESTORE:     return "PPCISD::TOC_RESTORE";
489  case PPCISD::LOAD:            return "PPCISD::LOAD";
490  case PPCISD::LOAD_TOC:        return "PPCISD::LOAD_TOC";
491  case PPCISD::DYNALLOC:        return "PPCISD::DYNALLOC";
492  case PPCISD::GlobalBaseReg:   return "PPCISD::GlobalBaseReg";
493  case PPCISD::SRL:             return "PPCISD::SRL";
494  case PPCISD::SRA:             return "PPCISD::SRA";
495  case PPCISD::SHL:             return "PPCISD::SHL";
496  case PPCISD::EXTSW_32:        return "PPCISD::EXTSW_32";
497  case PPCISD::STD_32:          return "PPCISD::STD_32";
498  case PPCISD::CALL_SVR4:       return "PPCISD::CALL_SVR4";
499  case PPCISD::CALL_NOP_SVR4:   return "PPCISD::CALL_NOP_SVR4";
500  case PPCISD::CALL_Darwin:     return "PPCISD::CALL_Darwin";
501  case PPCISD::NOP:             return "PPCISD::NOP";
502  case PPCISD::MTCTR:           return "PPCISD::MTCTR";
503  case PPCISD::BCTRL_Darwin:    return "PPCISD::BCTRL_Darwin";
504  case PPCISD::BCTRL_SVR4:      return "PPCISD::BCTRL_SVR4";
505  case PPCISD::RET_FLAG:        return "PPCISD::RET_FLAG";
506  case PPCISD::MFCR:            return "PPCISD::MFCR";
507  case PPCISD::VCMP:            return "PPCISD::VCMP";
508  case PPCISD::VCMPo:           return "PPCISD::VCMPo";
509  case PPCISD::LBRX:            return "PPCISD::LBRX";
510  case PPCISD::STBRX:           return "PPCISD::STBRX";
511  case PPCISD::LARX:            return "PPCISD::LARX";
512  case PPCISD::STCX:            return "PPCISD::STCX";
513  case PPCISD::COND_BRANCH:     return "PPCISD::COND_BRANCH";
514  case PPCISD::MFFS:            return "PPCISD::MFFS";
515  case PPCISD::MTFSB0:          return "PPCISD::MTFSB0";
516  case PPCISD::MTFSB1:          return "PPCISD::MTFSB1";
517  case PPCISD::FADDRTZ:         return "PPCISD::FADDRTZ";
518  case PPCISD::MTFSF:           return "PPCISD::MTFSF";
519  case PPCISD::TC_RETURN:       return "PPCISD::TC_RETURN";
520  }
521}
522
523EVT PPCTargetLowering::getSetCCResultType(EVT VT) const {
524  return MVT::i32;
525}
526
527//===----------------------------------------------------------------------===//
528// Node matching predicates, for use by the tblgen matching code.
529//===----------------------------------------------------------------------===//
530
531/// isFloatingPointZero - Return true if this is 0.0 or -0.0.
532static bool isFloatingPointZero(SDValue Op) {
533  if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
534    return CFP->getValueAPF().isZero();
535  else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) {
536    // Maybe this has already been legalized into the constant pool?
537    if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1)))
538      if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
539        return CFP->getValueAPF().isZero();
540  }
541  return false;
542}
543
544/// isConstantOrUndef - Op is either an undef node or a ConstantSDNode.  Return
545/// true if Op is undef or if it matches the specified value.
546static bool isConstantOrUndef(int Op, int Val) {
547  return Op < 0 || Op == Val;
548}
549
550/// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
551/// VPKUHUM instruction.
552bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, bool isUnary) {
553  if (!isUnary) {
554    for (unsigned i = 0; i != 16; ++i)
555      if (!isConstantOrUndef(N->getMaskElt(i),  i*2+1))
556        return false;
557  } else {
558    for (unsigned i = 0; i != 8; ++i)
559      if (!isConstantOrUndef(N->getMaskElt(i),    i*2+1) ||
560          !isConstantOrUndef(N->getMaskElt(i+8),  i*2+1))
561        return false;
562  }
563  return true;
564}
565
566/// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a
567/// VPKUWUM instruction.
568bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, bool isUnary) {
569  if (!isUnary) {
570    for (unsigned i = 0; i != 16; i += 2)
571      if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+2) ||
572          !isConstantOrUndef(N->getMaskElt(i+1),  i*2+3))
573        return false;
574  } else {
575    for (unsigned i = 0; i != 8; i += 2)
576      if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+2) ||
577          !isConstantOrUndef(N->getMaskElt(i+1),  i*2+3) ||
578          !isConstantOrUndef(N->getMaskElt(i+8),  i*2+2) ||
579          !isConstantOrUndef(N->getMaskElt(i+9),  i*2+3))
580        return false;
581  }
582  return true;
583}
584
585/// isVMerge - Common function, used to match vmrg* shuffles.
586///
587static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize,
588                     unsigned LHSStart, unsigned RHSStart) {
589  assert(N->getValueType(0) == MVT::v16i8 &&
590         "PPC only supports shuffles by bytes!");
591  assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) &&
592         "Unsupported merge size!");
593
594  for (unsigned i = 0; i != 8/UnitSize; ++i)     // Step over units
595    for (unsigned j = 0; j != UnitSize; ++j) {   // Step over bytes within unit
596      if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j),
597                             LHSStart+j+i*UnitSize) ||
598          !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j),
599                             RHSStart+j+i*UnitSize))
600        return false;
601    }
602  return true;
603}
604
605/// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for
606/// a VRGL* instruction with the specified unit size (1,2 or 4 bytes).
607bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
608                             bool isUnary) {
609  if (!isUnary)
610    return isVMerge(N, UnitSize, 8, 24);
611  return isVMerge(N, UnitSize, 8, 8);
612}
613
614/// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for
615/// a VRGH* instruction with the specified unit size (1,2 or 4 bytes).
616bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
617                             bool isUnary) {
618  if (!isUnary)
619    return isVMerge(N, UnitSize, 0, 16);
620  return isVMerge(N, UnitSize, 0, 0);
621}
622
623
624/// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift
625/// amount, otherwise return -1.
626int PPC::isVSLDOIShuffleMask(SDNode *N, bool isUnary) {
627  assert(N->getValueType(0) == MVT::v16i8 &&
628         "PPC only supports shuffles by bytes!");
629
630  ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
631
632  // Find the first non-undef value in the shuffle mask.
633  unsigned i;
634  for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i)
635    /*search*/;
636
637  if (i == 16) return -1;  // all undef.
638
639  // Otherwise, check to see if the rest of the elements are consecutively
640  // numbered from this value.
641  unsigned ShiftAmt = SVOp->getMaskElt(i);
642  if (ShiftAmt < i) return -1;
643  ShiftAmt -= i;
644
645  if (!isUnary) {
646    // Check the rest of the elements to see if they are consecutive.
647    for (++i; i != 16; ++i)
648      if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i))
649        return -1;
650  } else {
651    // Check the rest of the elements to see if they are consecutive.
652    for (++i; i != 16; ++i)
653      if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15))
654        return -1;
655  }
656  return ShiftAmt;
657}
658
659/// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
660/// specifies a splat of a single element that is suitable for input to
661/// VSPLTB/VSPLTH/VSPLTW.
662bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) {
663  assert(N->getValueType(0) == MVT::v16i8 &&
664         (EltSize == 1 || EltSize == 2 || EltSize == 4));
665
666  // This is a splat operation if each element of the permute is the same, and
667  // if the value doesn't reference the second vector.
668  unsigned ElementBase = N->getMaskElt(0);
669
670  // FIXME: Handle UNDEF elements too!
671  if (ElementBase >= 16)
672    return false;
673
674  // Check that the indices are consecutive, in the case of a multi-byte element
675  // splatted with a v16i8 mask.
676  for (unsigned i = 1; i != EltSize; ++i)
677    if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase))
678      return false;
679
680  for (unsigned i = EltSize, e = 16; i != e; i += EltSize) {
681    if (N->getMaskElt(i) < 0) continue;
682    for (unsigned j = 0; j != EltSize; ++j)
683      if (N->getMaskElt(i+j) != N->getMaskElt(j))
684        return false;
685  }
686  return true;
687}
688
689/// isAllNegativeZeroVector - Returns true if all elements of build_vector
690/// are -0.0.
691bool PPC::isAllNegativeZeroVector(SDNode *N) {
692  BuildVectorSDNode *BV = cast<BuildVectorSDNode>(N);
693
694  APInt APVal, APUndef;
695  unsigned BitSize;
696  bool HasAnyUndefs;
697
698  if (BV->isConstantSplat(APVal, APUndef, BitSize, HasAnyUndefs, 32, true))
699    if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
700      return CFP->getValueAPF().isNegZero();
701
702  return false;
703}
704
705/// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the
706/// specified isSplatShuffleMask VECTOR_SHUFFLE mask.
707unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize) {
708  ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
709  assert(isSplatShuffleMask(SVOp, EltSize));
710  return SVOp->getMaskElt(0) / EltSize;
711}
712
713/// get_VSPLTI_elt - If this is a build_vector of constants which can be formed
714/// by using a vspltis[bhw] instruction of the specified element size, return
715/// the constant being splatted.  The ByteSize field indicates the number of
716/// bytes of each element [124] -> [bhw].
717SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
718  SDValue OpVal(0, 0);
719
720  // If ByteSize of the splat is bigger than the element size of the
721  // build_vector, then we have a case where we are checking for a splat where
722  // multiple elements of the buildvector are folded together into a single
723  // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8).
724  unsigned EltSize = 16/N->getNumOperands();
725  if (EltSize < ByteSize) {
726    unsigned Multiple = ByteSize/EltSize;   // Number of BV entries per spltval.
727    SDValue UniquedVals[4];
728    assert(Multiple > 1 && Multiple <= 4 && "How can this happen?");
729
730    // See if all of the elements in the buildvector agree across.
731    for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
732      if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue;
733      // If the element isn't a constant, bail fully out.
734      if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue();
735
736
737      if (UniquedVals[i&(Multiple-1)].getNode() == 0)
738        UniquedVals[i&(Multiple-1)] = N->getOperand(i);
739      else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i))
740        return SDValue();  // no match.
741    }
742
743    // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains
744    // either constant or undef values that are identical for each chunk.  See
745    // if these chunks can form into a larger vspltis*.
746
747    // Check to see if all of the leading entries are either 0 or -1.  If
748    // neither, then this won't fit into the immediate field.
749    bool LeadingZero = true;
750    bool LeadingOnes = true;
751    for (unsigned i = 0; i != Multiple-1; ++i) {
752      if (UniquedVals[i].getNode() == 0) continue;  // Must have been undefs.
753
754      LeadingZero &= cast<ConstantSDNode>(UniquedVals[i])->isNullValue();
755      LeadingOnes &= cast<ConstantSDNode>(UniquedVals[i])->isAllOnesValue();
756    }
757    // Finally, check the least significant entry.
758    if (LeadingZero) {
759      if (UniquedVals[Multiple-1].getNode() == 0)
760        return DAG.getTargetConstant(0, MVT::i32);  // 0,0,0,undef
761      int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue();
762      if (Val < 16)
763        return DAG.getTargetConstant(Val, MVT::i32);  // 0,0,0,4 -> vspltisw(4)
764    }
765    if (LeadingOnes) {
766      if (UniquedVals[Multiple-1].getNode() == 0)
767        return DAG.getTargetConstant(~0U, MVT::i32);  // -1,-1,-1,undef
768      int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue();
769      if (Val >= -16)                            // -1,-1,-1,-2 -> vspltisw(-2)
770        return DAG.getTargetConstant(Val, MVT::i32);
771    }
772
773    return SDValue();
774  }
775
776  // Check to see if this buildvec has a single non-undef value in its elements.
777  for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
778    if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue;
779    if (OpVal.getNode() == 0)
780      OpVal = N->getOperand(i);
781    else if (OpVal != N->getOperand(i))
782      return SDValue();
783  }
784
785  if (OpVal.getNode() == 0) return SDValue();  // All UNDEF: use implicit def.
786
787  unsigned ValSizeInBytes = EltSize;
788  uint64_t Value = 0;
789  if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
790    Value = CN->getZExtValue();
791  } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) {
792    assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!");
793    Value = FloatToBits(CN->getValueAPF().convertToFloat());
794  }
795
796  // If the splat value is larger than the element value, then we can never do
797  // this splat.  The only case that we could fit the replicated bits into our
798  // immediate field for would be zero, and we prefer to use vxor for it.
799  if (ValSizeInBytes < ByteSize) return SDValue();
800
801  // If the element value is larger than the splat value, cut it in half and
802  // check to see if the two halves are equal.  Continue doing this until we
803  // get to ByteSize.  This allows us to handle 0x01010101 as 0x01.
804  while (ValSizeInBytes > ByteSize) {
805    ValSizeInBytes >>= 1;
806
807    // If the top half equals the bottom half, we're still ok.
808    if (((Value >> (ValSizeInBytes*8)) & ((1 << (8*ValSizeInBytes))-1)) !=
809         (Value                        & ((1 << (8*ValSizeInBytes))-1)))
810      return SDValue();
811  }
812
813  // Properly sign extend the value.
814  int ShAmt = (4-ByteSize)*8;
815  int MaskVal = ((int)Value << ShAmt) >> ShAmt;
816
817  // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros.
818  if (MaskVal == 0) return SDValue();
819
820  // Finally, if this value fits in a 5 bit sext field, return it
821  if (((MaskVal << (32-5)) >> (32-5)) == MaskVal)
822    return DAG.getTargetConstant(MaskVal, MVT::i32);
823  return SDValue();
824}
825
826//===----------------------------------------------------------------------===//
827//  Addressing Mode Selection
828//===----------------------------------------------------------------------===//
829
830/// isIntS16Immediate - This method tests to see if the node is either a 32-bit
831/// or 64-bit immediate, and if the value can be accurately represented as a
832/// sign extension from a 16-bit value.  If so, this returns true and the
833/// immediate.
834static bool isIntS16Immediate(SDNode *N, short &Imm) {
835  if (N->getOpcode() != ISD::Constant)
836    return false;
837
838  Imm = (short)cast<ConstantSDNode>(N)->getZExtValue();
839  if (N->getValueType(0) == MVT::i32)
840    return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue();
841  else
842    return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue();
843}
844static bool isIntS16Immediate(SDValue Op, short &Imm) {
845  return isIntS16Immediate(Op.getNode(), Imm);
846}
847
848
849/// SelectAddressRegReg - Given the specified addressed, check to see if it
850/// can be represented as an indexed [r+r] operation.  Returns false if it
851/// can be more efficiently represented with [r+imm].
852bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base,
853                                            SDValue &Index,
854                                            SelectionDAG &DAG) const {
855  short imm = 0;
856  if (N.getOpcode() == ISD::ADD) {
857    if (isIntS16Immediate(N.getOperand(1), imm))
858      return false;    // r+i
859    if (N.getOperand(1).getOpcode() == PPCISD::Lo)
860      return false;    // r+i
861
862    Base = N.getOperand(0);
863    Index = N.getOperand(1);
864    return true;
865  } else if (N.getOpcode() == ISD::OR) {
866    if (isIntS16Immediate(N.getOperand(1), imm))
867      return false;    // r+i can fold it if we can.
868
869    // If this is an or of disjoint bitfields, we can codegen this as an add
870    // (for better address arithmetic) if the LHS and RHS of the OR are provably
871    // disjoint.
872    APInt LHSKnownZero, LHSKnownOne;
873    APInt RHSKnownZero, RHSKnownOne;
874    DAG.ComputeMaskedBits(N.getOperand(0),
875                          LHSKnownZero, LHSKnownOne);
876
877    if (LHSKnownZero.getBoolValue()) {
878      DAG.ComputeMaskedBits(N.getOperand(1),
879                            RHSKnownZero, RHSKnownOne);
880      // If all of the bits are known zero on the LHS or RHS, the add won't
881      // carry.
882      if (~(LHSKnownZero | RHSKnownZero) == 0) {
883        Base = N.getOperand(0);
884        Index = N.getOperand(1);
885        return true;
886      }
887    }
888  }
889
890  return false;
891}
892
893/// Returns true if the address N can be represented by a base register plus
894/// a signed 16-bit displacement [r+imm], and if it is not better
895/// represented as reg+reg.
896bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp,
897                                            SDValue &Base,
898                                            SelectionDAG &DAG) const {
899  // FIXME dl should come from parent load or store, not from address
900  DebugLoc dl = N.getDebugLoc();
901  // If this can be more profitably realized as r+r, fail.
902  if (SelectAddressRegReg(N, Disp, Base, DAG))
903    return false;
904
905  if (N.getOpcode() == ISD::ADD) {
906    short imm = 0;
907    if (isIntS16Immediate(N.getOperand(1), imm)) {
908      Disp = DAG.getTargetConstant((int)imm & 0xFFFF, MVT::i32);
909      if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
910        Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
911      } else {
912        Base = N.getOperand(0);
913      }
914      return true; // [r+i]
915    } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) {
916      // Match LOAD (ADD (X, Lo(G))).
917      assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue()
918             && "Cannot handle constant offsets yet!");
919      Disp = N.getOperand(1).getOperand(0);  // The global address.
920      assert(Disp.getOpcode() == ISD::TargetGlobalAddress ||
921             Disp.getOpcode() == ISD::TargetGlobalTLSAddress ||
922             Disp.getOpcode() == ISD::TargetConstantPool ||
923             Disp.getOpcode() == ISD::TargetJumpTable);
924      Base = N.getOperand(0);
925      return true;  // [&g+r]
926    }
927  } else if (N.getOpcode() == ISD::OR) {
928    short imm = 0;
929    if (isIntS16Immediate(N.getOperand(1), imm)) {
930      // If this is an or of disjoint bitfields, we can codegen this as an add
931      // (for better address arithmetic) if the LHS and RHS of the OR are
932      // provably disjoint.
933      APInt LHSKnownZero, LHSKnownOne;
934      DAG.ComputeMaskedBits(N.getOperand(0), LHSKnownZero, LHSKnownOne);
935
936      if ((LHSKnownZero.getZExtValue()|~(uint64_t)imm) == ~0ULL) {
937        // If all of the bits are known zero on the LHS or RHS, the add won't
938        // carry.
939        Base = N.getOperand(0);
940        Disp = DAG.getTargetConstant((int)imm & 0xFFFF, MVT::i32);
941        return true;
942      }
943    }
944  } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
945    // Loading from a constant address.
946
947    // If this address fits entirely in a 16-bit sext immediate field, codegen
948    // this as "d, 0"
949    short Imm;
950    if (isIntS16Immediate(CN, Imm)) {
951      Disp = DAG.getTargetConstant(Imm, CN->getValueType(0));
952      Base = DAG.getRegister(PPCSubTarget.isPPC64() ? PPC::X0 : PPC::R0,
953                             CN->getValueType(0));
954      return true;
955    }
956
957    // Handle 32-bit sext immediates with LIS + addr mode.
958    if (CN->getValueType(0) == MVT::i32 ||
959        (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) {
960      int Addr = (int)CN->getZExtValue();
961
962      // Otherwise, break this down into an LIS + disp.
963      Disp = DAG.getTargetConstant((short)Addr, MVT::i32);
964
965      Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, MVT::i32);
966      unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8;
967      Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0);
968      return true;
969    }
970  }
971
972  Disp = DAG.getTargetConstant(0, getPointerTy());
973  if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N))
974    Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
975  else
976    Base = N;
977  return true;      // [r+0]
978}
979
980/// SelectAddressRegRegOnly - Given the specified addressed, force it to be
981/// represented as an indexed [r+r] operation.
982bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base,
983                                                SDValue &Index,
984                                                SelectionDAG &DAG) const {
985  // Check to see if we can easily represent this as an [r+r] address.  This
986  // will fail if it thinks that the address is more profitably represented as
987  // reg+imm, e.g. where imm = 0.
988  if (SelectAddressRegReg(N, Base, Index, DAG))
989    return true;
990
991  // If the operand is an addition, always emit this as [r+r], since this is
992  // better (for code size, and execution, as the memop does the add for free)
993  // than emitting an explicit add.
994  if (N.getOpcode() == ISD::ADD) {
995    Base = N.getOperand(0);
996    Index = N.getOperand(1);
997    return true;
998  }
999
1000  // Otherwise, do it the hard way, using R0 as the base register.
1001  Base = DAG.getRegister(PPCSubTarget.isPPC64() ? PPC::X0 : PPC::R0,
1002                         N.getValueType());
1003  Index = N;
1004  return true;
1005}
1006
1007/// SelectAddressRegImmShift - Returns true if the address N can be
1008/// represented by a base register plus a signed 14-bit displacement
1009/// [r+imm*4].  Suitable for use by STD and friends.
1010bool PPCTargetLowering::SelectAddressRegImmShift(SDValue N, SDValue &Disp,
1011                                                 SDValue &Base,
1012                                                 SelectionDAG &DAG) const {
1013  // FIXME dl should come from the parent load or store, not the address
1014  DebugLoc dl = N.getDebugLoc();
1015  // If this can be more profitably realized as r+r, fail.
1016  if (SelectAddressRegReg(N, Disp, Base, DAG))
1017    return false;
1018
1019  if (N.getOpcode() == ISD::ADD) {
1020    short imm = 0;
1021    if (isIntS16Immediate(N.getOperand(1), imm) && (imm & 3) == 0) {
1022      Disp = DAG.getTargetConstant(((int)imm & 0xFFFF) >> 2, MVT::i32);
1023      if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
1024        Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
1025      } else {
1026        Base = N.getOperand(0);
1027      }
1028      return true; // [r+i]
1029    } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) {
1030      // Match LOAD (ADD (X, Lo(G))).
1031      assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue()
1032             && "Cannot handle constant offsets yet!");
1033      Disp = N.getOperand(1).getOperand(0);  // The global address.
1034      assert(Disp.getOpcode() == ISD::TargetGlobalAddress ||
1035             Disp.getOpcode() == ISD::TargetConstantPool ||
1036             Disp.getOpcode() == ISD::TargetJumpTable);
1037      Base = N.getOperand(0);
1038      return true;  // [&g+r]
1039    }
1040  } else if (N.getOpcode() == ISD::OR) {
1041    short imm = 0;
1042    if (isIntS16Immediate(N.getOperand(1), imm) && (imm & 3) == 0) {
1043      // If this is an or of disjoint bitfields, we can codegen this as an add
1044      // (for better address arithmetic) if the LHS and RHS of the OR are
1045      // provably disjoint.
1046      APInt LHSKnownZero, LHSKnownOne;
1047      DAG.ComputeMaskedBits(N.getOperand(0), LHSKnownZero, LHSKnownOne);
1048      if ((LHSKnownZero.getZExtValue()|~(uint64_t)imm) == ~0ULL) {
1049        // If all of the bits are known zero on the LHS or RHS, the add won't
1050        // carry.
1051        Base = N.getOperand(0);
1052        Disp = DAG.getTargetConstant(((int)imm & 0xFFFF) >> 2, MVT::i32);
1053        return true;
1054      }
1055    }
1056  } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
1057    // Loading from a constant address.  Verify low two bits are clear.
1058    if ((CN->getZExtValue() & 3) == 0) {
1059      // If this address fits entirely in a 14-bit sext immediate field, codegen
1060      // this as "d, 0"
1061      short Imm;
1062      if (isIntS16Immediate(CN, Imm)) {
1063        Disp = DAG.getTargetConstant((unsigned short)Imm >> 2, getPointerTy());
1064        Base = DAG.getRegister(PPCSubTarget.isPPC64() ? PPC::X0 : PPC::R0,
1065                               CN->getValueType(0));
1066        return true;
1067      }
1068
1069      // Fold the low-part of 32-bit absolute addresses into addr mode.
1070      if (CN->getValueType(0) == MVT::i32 ||
1071          (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) {
1072        int Addr = (int)CN->getZExtValue();
1073
1074        // Otherwise, break this down into an LIS + disp.
1075        Disp = DAG.getTargetConstant((short)Addr >> 2, MVT::i32);
1076        Base = DAG.getTargetConstant((Addr-(signed short)Addr) >> 16, MVT::i32);
1077        unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8;
1078        Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base),0);
1079        return true;
1080      }
1081    }
1082  }
1083
1084  Disp = DAG.getTargetConstant(0, getPointerTy());
1085  if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N))
1086    Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
1087  else
1088    Base = N;
1089  return true;      // [r+0]
1090}
1091
1092
1093/// getPreIndexedAddressParts - returns true by value, base pointer and
1094/// offset pointer and addressing mode by reference if the node's address
1095/// can be legally represented as pre-indexed load / store address.
1096bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
1097                                                  SDValue &Offset,
1098                                                  ISD::MemIndexedMode &AM,
1099                                                  SelectionDAG &DAG) const {
1100  if (DisablePPCPreinc) return false;
1101
1102  SDValue Ptr;
1103  EVT VT;
1104  if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
1105    Ptr = LD->getBasePtr();
1106    VT = LD->getMemoryVT();
1107
1108  } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
1109    Ptr = ST->getBasePtr();
1110    VT  = ST->getMemoryVT();
1111  } else
1112    return false;
1113
1114  // PowerPC doesn't have preinc load/store instructions for vectors.
1115  if (VT.isVector())
1116    return false;
1117
1118  if (SelectAddressRegReg(Ptr, Offset, Base, DAG)) {
1119    AM = ISD::PRE_INC;
1120    return true;
1121  }
1122
1123  // LDU/STU use reg+imm*4, others use reg+imm.
1124  if (VT != MVT::i64) {
1125    // reg + imm
1126    if (!SelectAddressRegImm(Ptr, Offset, Base, DAG))
1127      return false;
1128  } else {
1129    // reg + imm * 4.
1130    if (!SelectAddressRegImmShift(Ptr, Offset, Base, DAG))
1131      return false;
1132  }
1133
1134  if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
1135    // PPC64 doesn't have lwau, but it does have lwaux.  Reject preinc load of
1136    // sext i32 to i64 when addr mode is r+i.
1137    if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 &&
1138        LD->getExtensionType() == ISD::SEXTLOAD &&
1139        isa<ConstantSDNode>(Offset))
1140      return false;
1141  }
1142
1143  AM = ISD::PRE_INC;
1144  return true;
1145}
1146
1147//===----------------------------------------------------------------------===//
1148//  LowerOperation implementation
1149//===----------------------------------------------------------------------===//
1150
1151/// GetLabelAccessInfo - Return true if we should reference labels using a
1152/// PICBase, set the HiOpFlags and LoOpFlags to the target MO flags.
1153static bool GetLabelAccessInfo(const TargetMachine &TM, unsigned &HiOpFlags,
1154                               unsigned &LoOpFlags, const GlobalValue *GV = 0) {
1155  HiOpFlags = PPCII::MO_HA16;
1156  LoOpFlags = PPCII::MO_LO16;
1157
1158  // Don't use the pic base if not in PIC relocation model.  Or if we are on a
1159  // non-darwin platform.  We don't support PIC on other platforms yet.
1160  bool isPIC = TM.getRelocationModel() == Reloc::PIC_ &&
1161               TM.getSubtarget<PPCSubtarget>().isDarwin();
1162  if (isPIC) {
1163    HiOpFlags |= PPCII::MO_PIC_FLAG;
1164    LoOpFlags |= PPCII::MO_PIC_FLAG;
1165  }
1166
1167  // If this is a reference to a global value that requires a non-lazy-ptr, make
1168  // sure that instruction lowering adds it.
1169  if (GV && TM.getSubtarget<PPCSubtarget>().hasLazyResolverStub(GV, TM)) {
1170    HiOpFlags |= PPCII::MO_NLP_FLAG;
1171    LoOpFlags |= PPCII::MO_NLP_FLAG;
1172
1173    if (GV->hasHiddenVisibility()) {
1174      HiOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG;
1175      LoOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG;
1176    }
1177  }
1178
1179  return isPIC;
1180}
1181
1182static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC,
1183                             SelectionDAG &DAG) {
1184  EVT PtrVT = HiPart.getValueType();
1185  SDValue Zero = DAG.getConstant(0, PtrVT);
1186  DebugLoc DL = HiPart.getDebugLoc();
1187
1188  SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero);
1189  SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero);
1190
1191  // With PIC, the first instruction is actually "GR+hi(&G)".
1192  if (isPIC)
1193    Hi = DAG.getNode(ISD::ADD, DL, PtrVT,
1194                     DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi);
1195
1196  // Generate non-pic code that has direct accesses to the constant pool.
1197  // The address of the global is just (hi(&g)+lo(&g)).
1198  return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo);
1199}
1200
1201SDValue PPCTargetLowering::LowerConstantPool(SDValue Op,
1202                                             SelectionDAG &DAG) const {
1203  EVT PtrVT = Op.getValueType();
1204  ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
1205  const Constant *C = CP->getConstVal();
1206
1207  unsigned MOHiFlag, MOLoFlag;
1208  bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag);
1209  SDValue CPIHi =
1210    DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOHiFlag);
1211  SDValue CPILo =
1212    DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOLoFlag);
1213  return LowerLabelRef(CPIHi, CPILo, isPIC, DAG);
1214}
1215
1216SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
1217  EVT PtrVT = Op.getValueType();
1218  JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
1219
1220  unsigned MOHiFlag, MOLoFlag;
1221  bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag);
1222  SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag);
1223  SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag);
1224  return LowerLabelRef(JTIHi, JTILo, isPIC, DAG);
1225}
1226
1227SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op,
1228                                             SelectionDAG &DAG) const {
1229  EVT PtrVT = Op.getValueType();
1230
1231  const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
1232
1233  unsigned MOHiFlag, MOLoFlag;
1234  bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag);
1235  SDValue TgtBAHi = DAG.getBlockAddress(BA, PtrVT, /*isTarget=*/true, MOHiFlag);
1236  SDValue TgtBALo = DAG.getBlockAddress(BA, PtrVT, /*isTarget=*/true, MOLoFlag);
1237  return LowerLabelRef(TgtBAHi, TgtBALo, isPIC, DAG);
1238}
1239
1240SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op,
1241                                              SelectionDAG &DAG) const {
1242
1243  GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
1244  DebugLoc dl = GA->getDebugLoc();
1245  const GlobalValue *GV = GA->getGlobal();
1246  EVT PtrVT = getPointerTy();
1247  bool is64bit = PPCSubTarget.isPPC64();
1248
1249  TLSModel::Model model = getTargetMachine().getTLSModel(GV);
1250
1251  SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
1252                                             PPCII::MO_TPREL16_HA);
1253  SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
1254                                             PPCII::MO_TPREL16_LO);
1255
1256  if (model != TLSModel::LocalExec)
1257    llvm_unreachable("only local-exec TLS mode supported");
1258  SDValue TLSReg = DAG.getRegister(is64bit ? PPC::X13 : PPC::R2,
1259                                   is64bit ? MVT::i64 : MVT::i32);
1260  SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg);
1261  return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi);
1262}
1263
1264SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op,
1265                                              SelectionDAG &DAG) const {
1266  EVT PtrVT = Op.getValueType();
1267  GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
1268  DebugLoc DL = GSDN->getDebugLoc();
1269  const GlobalValue *GV = GSDN->getGlobal();
1270
1271  // 64-bit SVR4 ABI code is always position-independent.
1272  // The actual address of the GlobalValue is stored in the TOC.
1273  if (PPCSubTarget.isSVR4ABI() && PPCSubTarget.isPPC64()) {
1274    SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset());
1275    return DAG.getNode(PPCISD::TOC_ENTRY, DL, MVT::i64, GA,
1276                       DAG.getRegister(PPC::X2, MVT::i64));
1277  }
1278
1279  unsigned MOHiFlag, MOLoFlag;
1280  bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag, GV);
1281
1282  SDValue GAHi =
1283    DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag);
1284  SDValue GALo =
1285    DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag);
1286
1287  SDValue Ptr = LowerLabelRef(GAHi, GALo, isPIC, DAG);
1288
1289  // If the global reference is actually to a non-lazy-pointer, we have to do an
1290  // extra load to get the address of the global.
1291  if (MOHiFlag & PPCII::MO_NLP_FLAG)
1292    Ptr = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo(),
1293                      false, false, false, 0);
1294  return Ptr;
1295}
1296
1297SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
1298  ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
1299  DebugLoc dl = Op.getDebugLoc();
1300
1301  // If we're comparing for equality to zero, expose the fact that this is
1302  // implented as a ctlz/srl pair on ppc, so that the dag combiner can
1303  // fold the new nodes.
1304  if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
1305    if (C->isNullValue() && CC == ISD::SETEQ) {
1306      EVT VT = Op.getOperand(0).getValueType();
1307      SDValue Zext = Op.getOperand(0);
1308      if (VT.bitsLT(MVT::i32)) {
1309        VT = MVT::i32;
1310        Zext = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Op.getOperand(0));
1311      }
1312      unsigned Log2b = Log2_32(VT.getSizeInBits());
1313      SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Zext);
1314      SDValue Scc = DAG.getNode(ISD::SRL, dl, VT, Clz,
1315                                DAG.getConstant(Log2b, MVT::i32));
1316      return DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Scc);
1317    }
1318    // Leave comparisons against 0 and -1 alone for now, since they're usually
1319    // optimized.  FIXME: revisit this when we can custom lower all setcc
1320    // optimizations.
1321    if (C->isAllOnesValue() || C->isNullValue())
1322      return SDValue();
1323  }
1324
1325  // If we have an integer seteq/setne, turn it into a compare against zero
1326  // by xor'ing the rhs with the lhs, which is faster than setting a
1327  // condition register, reading it back out, and masking the correct bit.  The
1328  // normal approach here uses sub to do this instead of xor.  Using xor exposes
1329  // the result to other bit-twiddling opportunities.
1330  EVT LHSVT = Op.getOperand(0).getValueType();
1331  if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
1332    EVT VT = Op.getValueType();
1333    SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0),
1334                                Op.getOperand(1));
1335    return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, LHSVT), CC);
1336  }
1337  return SDValue();
1338}
1339
1340SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG,
1341                                      const PPCSubtarget &Subtarget) const {
1342  SDNode *Node = Op.getNode();
1343  EVT VT = Node->getValueType(0);
1344  EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1345  SDValue InChain = Node->getOperand(0);
1346  SDValue VAListPtr = Node->getOperand(1);
1347  const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
1348  DebugLoc dl = Node->getDebugLoc();
1349
1350  assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only");
1351
1352  // gpr_index
1353  SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
1354                                    VAListPtr, MachinePointerInfo(SV), MVT::i8,
1355                                    false, false, 0);
1356  InChain = GprIndex.getValue(1);
1357
1358  if (VT == MVT::i64) {
1359    // Check if GprIndex is even
1360    SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex,
1361                                 DAG.getConstant(1, MVT::i32));
1362    SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd,
1363                                DAG.getConstant(0, MVT::i32), ISD::SETNE);
1364    SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex,
1365                                          DAG.getConstant(1, MVT::i32));
1366    // Align GprIndex to be even if it isn't
1367    GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne,
1368                           GprIndex);
1369  }
1370
1371  // fpr index is 1 byte after gpr
1372  SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
1373                               DAG.getConstant(1, MVT::i32));
1374
1375  // fpr
1376  SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
1377                                    FprPtr, MachinePointerInfo(SV), MVT::i8,
1378                                    false, false, 0);
1379  InChain = FprIndex.getValue(1);
1380
1381  SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
1382                                       DAG.getConstant(8, MVT::i32));
1383
1384  SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
1385                                        DAG.getConstant(4, MVT::i32));
1386
1387  // areas
1388  SDValue OverflowArea = DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr,
1389                                     MachinePointerInfo(), false, false,
1390                                     false, 0);
1391  InChain = OverflowArea.getValue(1);
1392
1393  SDValue RegSaveArea = DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr,
1394                                    MachinePointerInfo(), false, false,
1395                                    false, 0);
1396  InChain = RegSaveArea.getValue(1);
1397
1398  // select overflow_area if index > 8
1399  SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex,
1400                            DAG.getConstant(8, MVT::i32), ISD::SETLT);
1401
1402  // adjustment constant gpr_index * 4/8
1403  SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32,
1404                                    VT.isInteger() ? GprIndex : FprIndex,
1405                                    DAG.getConstant(VT.isInteger() ? 4 : 8,
1406                                                    MVT::i32));
1407
1408  // OurReg = RegSaveArea + RegConstant
1409  SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea,
1410                               RegConstant);
1411
1412  // Floating types are 32 bytes into RegSaveArea
1413  if (VT.isFloatingPoint())
1414    OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg,
1415                         DAG.getConstant(32, MVT::i32));
1416
1417  // increase {f,g}pr_index by 1 (or 2 if VT is i64)
1418  SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32,
1419                                   VT.isInteger() ? GprIndex : FprIndex,
1420                                   DAG.getConstant(VT == MVT::i64 ? 2 : 1,
1421                                                   MVT::i32));
1422
1423  InChain = DAG.getTruncStore(InChain, dl, IndexPlus1,
1424                              VT.isInteger() ? VAListPtr : FprPtr,
1425                              MachinePointerInfo(SV),
1426                              MVT::i8, false, false, 0);
1427
1428  // determine if we should load from reg_save_area or overflow_area
1429  SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea);
1430
1431  // increase overflow_area by 4/8 if gpr/fpr > 8
1432  SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea,
1433                                          DAG.getConstant(VT.isInteger() ? 4 : 8,
1434                                          MVT::i32));
1435
1436  OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea,
1437                             OverflowAreaPlusN);
1438
1439  InChain = DAG.getTruncStore(InChain, dl, OverflowArea,
1440                              OverflowAreaPtr,
1441                              MachinePointerInfo(),
1442                              MVT::i32, false, false, 0);
1443
1444  return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo(),
1445                     false, false, false, 0);
1446}
1447
1448SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op,
1449                                                  SelectionDAG &DAG) const {
1450  return Op.getOperand(0);
1451}
1452
1453SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
1454                                                SelectionDAG &DAG) const {
1455  SDValue Chain = Op.getOperand(0);
1456  SDValue Trmp = Op.getOperand(1); // trampoline
1457  SDValue FPtr = Op.getOperand(2); // nested function
1458  SDValue Nest = Op.getOperand(3); // 'nest' parameter value
1459  DebugLoc dl = Op.getDebugLoc();
1460
1461  EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1462  bool isPPC64 = (PtrVT == MVT::i64);
1463  Type *IntPtrTy =
1464    DAG.getTargetLoweringInfo().getTargetData()->getIntPtrType(
1465                                                             *DAG.getContext());
1466
1467  TargetLowering::ArgListTy Args;
1468  TargetLowering::ArgListEntry Entry;
1469
1470  Entry.Ty = IntPtrTy;
1471  Entry.Node = Trmp; Args.push_back(Entry);
1472
1473  // TrampSize == (isPPC64 ? 48 : 40);
1474  Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40,
1475                               isPPC64 ? MVT::i64 : MVT::i32);
1476  Args.push_back(Entry);
1477
1478  Entry.Node = FPtr; Args.push_back(Entry);
1479  Entry.Node = Nest; Args.push_back(Entry);
1480
1481  // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg)
1482  TargetLowering::CallLoweringInfo CLI(Chain,
1483                                       Type::getVoidTy(*DAG.getContext()),
1484                                       false, false, false, false, 0,
1485                                       CallingConv::C,
1486                /*isTailCall=*/false,
1487                                       /*doesNotRet=*/false,
1488                                       /*isReturnValueUsed=*/true,
1489                DAG.getExternalSymbol("__trampoline_setup", PtrVT),
1490                Args, DAG, dl);
1491  std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
1492
1493  return CallResult.second;
1494}
1495
1496SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG,
1497                                        const PPCSubtarget &Subtarget) const {
1498  MachineFunction &MF = DAG.getMachineFunction();
1499  PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
1500
1501  DebugLoc dl = Op.getDebugLoc();
1502
1503  if (Subtarget.isDarwinABI() || Subtarget.isPPC64()) {
1504    // vastart just stores the address of the VarArgsFrameIndex slot into the
1505    // memory location argument.
1506    EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1507    SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
1508    const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
1509    return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1),
1510                        MachinePointerInfo(SV),
1511                        false, false, 0);
1512  }
1513
1514  // For the 32-bit SVR4 ABI we follow the layout of the va_list struct.
1515  // We suppose the given va_list is already allocated.
1516  //
1517  // typedef struct {
1518  //  char gpr;     /* index into the array of 8 GPRs
1519  //                 * stored in the register save area
1520  //                 * gpr=0 corresponds to r3,
1521  //                 * gpr=1 to r4, etc.
1522  //                 */
1523  //  char fpr;     /* index into the array of 8 FPRs
1524  //                 * stored in the register save area
1525  //                 * fpr=0 corresponds to f1,
1526  //                 * fpr=1 to f2, etc.
1527  //                 */
1528  //  char *overflow_arg_area;
1529  //                /* location on stack that holds
1530  //                 * the next overflow argument
1531  //                 */
1532  //  char *reg_save_area;
1533  //               /* where r3:r10 and f1:f8 (if saved)
1534  //                * are stored
1535  //                */
1536  // } va_list[1];
1537
1538
1539  SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), MVT::i32);
1540  SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), MVT::i32);
1541
1542
1543  EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1544
1545  SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(),
1546                                            PtrVT);
1547  SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
1548                                 PtrVT);
1549
1550  uint64_t FrameOffset = PtrVT.getSizeInBits()/8;
1551  SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, PtrVT);
1552
1553  uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1;
1554  SDValue ConstStackOffset = DAG.getConstant(StackOffset, PtrVT);
1555
1556  uint64_t FPROffset = 1;
1557  SDValue ConstFPROffset = DAG.getConstant(FPROffset, PtrVT);
1558
1559  const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
1560
1561  // Store first byte : number of int regs
1562  SDValue firstStore = DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR,
1563                                         Op.getOperand(1),
1564                                         MachinePointerInfo(SV),
1565                                         MVT::i8, false, false, 0);
1566  uint64_t nextOffset = FPROffset;
1567  SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1),
1568                                  ConstFPROffset);
1569
1570  // Store second byte : number of float regs
1571  SDValue secondStore =
1572    DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr,
1573                      MachinePointerInfo(SV, nextOffset), MVT::i8,
1574                      false, false, 0);
1575  nextOffset += StackOffset;
1576  nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset);
1577
1578  // Store second word : arguments given on stack
1579  SDValue thirdStore =
1580    DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr,
1581                 MachinePointerInfo(SV, nextOffset),
1582                 false, false, 0);
1583  nextOffset += FrameOffset;
1584  nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset);
1585
1586  // Store third word : arguments given in registers
1587  return DAG.getStore(thirdStore, dl, FR, nextPtr,
1588                      MachinePointerInfo(SV, nextOffset),
1589                      false, false, 0);
1590
1591}
1592
1593#include "PPCGenCallingConv.inc"
1594
1595static bool CC_PPC_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
1596                                     CCValAssign::LocInfo &LocInfo,
1597                                     ISD::ArgFlagsTy &ArgFlags,
1598                                     CCState &State) {
1599  return true;
1600}
1601
1602static bool CC_PPC_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT,
1603                                            MVT &LocVT,
1604                                            CCValAssign::LocInfo &LocInfo,
1605                                            ISD::ArgFlagsTy &ArgFlags,
1606                                            CCState &State) {
1607  static const uint16_t ArgRegs[] = {
1608    PPC::R3, PPC::R4, PPC::R5, PPC::R6,
1609    PPC::R7, PPC::R8, PPC::R9, PPC::R10,
1610  };
1611  const unsigned NumArgRegs = array_lengthof(ArgRegs);
1612
1613  unsigned RegNum = State.getFirstUnallocated(ArgRegs, NumArgRegs);
1614
1615  // Skip one register if the first unallocated register has an even register
1616  // number and there are still argument registers available which have not been
1617  // allocated yet. RegNum is actually an index into ArgRegs, which means we
1618  // need to skip a register if RegNum is odd.
1619  if (RegNum != NumArgRegs && RegNum % 2 == 1) {
1620    State.AllocateReg(ArgRegs[RegNum]);
1621  }
1622
1623  // Always return false here, as this function only makes sure that the first
1624  // unallocated register has an odd register number and does not actually
1625  // allocate a register for the current argument.
1626  return false;
1627}
1628
1629static bool CC_PPC_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT,
1630                                              MVT &LocVT,
1631                                              CCValAssign::LocInfo &LocInfo,
1632                                              ISD::ArgFlagsTy &ArgFlags,
1633                                              CCState &State) {
1634  static const uint16_t ArgRegs[] = {
1635    PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
1636    PPC::F8
1637  };
1638
1639  const unsigned NumArgRegs = array_lengthof(ArgRegs);
1640
1641  unsigned RegNum = State.getFirstUnallocated(ArgRegs, NumArgRegs);
1642
1643  // If there is only one Floating-point register left we need to put both f64
1644  // values of a split ppc_fp128 value on the stack.
1645  if (RegNum != NumArgRegs && ArgRegs[RegNum] == PPC::F8) {
1646    State.AllocateReg(ArgRegs[RegNum]);
1647  }
1648
1649  // Always return false here, as this function only makes sure that the two f64
1650  // values a ppc_fp128 value is split into are both passed in registers or both
1651  // passed on the stack and does not actually allocate a register for the
1652  // current argument.
1653  return false;
1654}
1655
1656/// GetFPR - Get the set of FP registers that should be allocated for arguments,
1657/// on Darwin.
1658static const uint16_t *GetFPR() {
1659  static const uint16_t FPR[] = {
1660    PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
1661    PPC::F8, PPC::F9, PPC::F10, PPC::F11, PPC::F12, PPC::F13
1662  };
1663
1664  return FPR;
1665}
1666
1667/// CalculateStackSlotSize - Calculates the size reserved for this argument on
1668/// the stack.
1669static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags,
1670                                       unsigned PtrByteSize) {
1671  unsigned ArgSize = ArgVT.getSizeInBits()/8;
1672  if (Flags.isByVal())
1673    ArgSize = Flags.getByValSize();
1674  ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
1675
1676  return ArgSize;
1677}
1678
1679SDValue
1680PPCTargetLowering::LowerFormalArguments(SDValue Chain,
1681                                        CallingConv::ID CallConv, bool isVarArg,
1682                                        const SmallVectorImpl<ISD::InputArg>
1683                                          &Ins,
1684                                        DebugLoc dl, SelectionDAG &DAG,
1685                                        SmallVectorImpl<SDValue> &InVals)
1686                                          const {
1687  if (PPCSubTarget.isSVR4ABI() && !PPCSubTarget.isPPC64()) {
1688    return LowerFormalArguments_SVR4(Chain, CallConv, isVarArg, Ins,
1689                                     dl, DAG, InVals);
1690  } else {
1691    return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins,
1692                                       dl, DAG, InVals);
1693  }
1694}
1695
1696SDValue
1697PPCTargetLowering::LowerFormalArguments_SVR4(
1698                                      SDValue Chain,
1699                                      CallingConv::ID CallConv, bool isVarArg,
1700                                      const SmallVectorImpl<ISD::InputArg>
1701                                        &Ins,
1702                                      DebugLoc dl, SelectionDAG &DAG,
1703                                      SmallVectorImpl<SDValue> &InVals) const {
1704
1705  // 32-bit SVR4 ABI Stack Frame Layout:
1706  //              +-----------------------------------+
1707  //        +-->  |            Back chain             |
1708  //        |     +-----------------------------------+
1709  //        |     | Floating-point register save area |
1710  //        |     +-----------------------------------+
1711  //        |     |    General register save area     |
1712  //        |     +-----------------------------------+
1713  //        |     |          CR save word             |
1714  //        |     +-----------------------------------+
1715  //        |     |         VRSAVE save word          |
1716  //        |     +-----------------------------------+
1717  //        |     |         Alignment padding         |
1718  //        |     +-----------------------------------+
1719  //        |     |     Vector register save area     |
1720  //        |     +-----------------------------------+
1721  //        |     |       Local variable space        |
1722  //        |     +-----------------------------------+
1723  //        |     |        Parameter list area        |
1724  //        |     +-----------------------------------+
1725  //        |     |           LR save word            |
1726  //        |     +-----------------------------------+
1727  // SP-->  +---  |            Back chain             |
1728  //              +-----------------------------------+
1729  //
1730  // Specifications:
1731  //   System V Application Binary Interface PowerPC Processor Supplement
1732  //   AltiVec Technology Programming Interface Manual
1733
1734  MachineFunction &MF = DAG.getMachineFunction();
1735  MachineFrameInfo *MFI = MF.getFrameInfo();
1736  PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
1737
1738  EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1739  // Potential tail calls could cause overwriting of argument stack slots.
1740  bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
1741                       (CallConv == CallingConv::Fast));
1742  unsigned PtrByteSize = 4;
1743
1744  // Assign locations to all of the incoming arguments.
1745  SmallVector<CCValAssign, 16> ArgLocs;
1746  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
1747                 getTargetMachine(), ArgLocs, *DAG.getContext());
1748
1749  // Reserve space for the linkage area on the stack.
1750  CCInfo.AllocateStack(PPCFrameLowering::getLinkageSize(false, false), PtrByteSize);
1751
1752  CCInfo.AnalyzeFormalArguments(Ins, CC_PPC_SVR4);
1753
1754  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1755    CCValAssign &VA = ArgLocs[i];
1756
1757    // Arguments stored in registers.
1758    if (VA.isRegLoc()) {
1759      const TargetRegisterClass *RC;
1760      EVT ValVT = VA.getValVT();
1761
1762      switch (ValVT.getSimpleVT().SimpleTy) {
1763        default:
1764          llvm_unreachable("ValVT not supported by formal arguments Lowering");
1765        case MVT::i32:
1766          RC = &PPC::GPRCRegClass;
1767          break;
1768        case MVT::f32:
1769          RC = &PPC::F4RCRegClass;
1770          break;
1771        case MVT::f64:
1772          RC = &PPC::F8RCRegClass;
1773          break;
1774        case MVT::v16i8:
1775        case MVT::v8i16:
1776        case MVT::v4i32:
1777        case MVT::v4f32:
1778          RC = &PPC::VRRCRegClass;
1779          break;
1780      }
1781
1782      // Transform the arguments stored in physical registers into virtual ones.
1783      unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
1784      SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, ValVT);
1785
1786      InVals.push_back(ArgValue);
1787    } else {
1788      // Argument stored in memory.
1789      assert(VA.isMemLoc());
1790
1791      unsigned ArgSize = VA.getLocVT().getSizeInBits() / 8;
1792      int FI = MFI->CreateFixedObject(ArgSize, VA.getLocMemOffset(),
1793                                      isImmutable);
1794
1795      // Create load nodes to retrieve arguments from the stack.
1796      SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
1797      InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN,
1798                                   MachinePointerInfo(),
1799                                   false, false, false, 0));
1800    }
1801  }
1802
1803  // Assign locations to all of the incoming aggregate by value arguments.
1804  // Aggregates passed by value are stored in the local variable space of the
1805  // caller's stack frame, right above the parameter list area.
1806  SmallVector<CCValAssign, 16> ByValArgLocs;
1807  CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(),
1808                      getTargetMachine(), ByValArgLocs, *DAG.getContext());
1809
1810  // Reserve stack space for the allocations in CCInfo.
1811  CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize);
1812
1813  CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC_SVR4_ByVal);
1814
1815  // Area that is at least reserved in the caller of this function.
1816  unsigned MinReservedArea = CCByValInfo.getNextStackOffset();
1817
1818  // Set the size that is at least reserved in caller of this function.  Tail
1819  // call optimized function's reserved stack space needs to be aligned so that
1820  // taking the difference between two stack areas will result in an aligned
1821  // stack.
1822  PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
1823
1824  MinReservedArea =
1825    std::max(MinReservedArea,
1826             PPCFrameLowering::getMinCallFrameSize(false, false));
1827
1828  unsigned TargetAlign = DAG.getMachineFunction().getTarget().getFrameLowering()->
1829    getStackAlignment();
1830  unsigned AlignMask = TargetAlign-1;
1831  MinReservedArea = (MinReservedArea + AlignMask) & ~AlignMask;
1832
1833  FI->setMinReservedArea(MinReservedArea);
1834
1835  SmallVector<SDValue, 8> MemOps;
1836
1837  // If the function takes variable number of arguments, make a frame index for
1838  // the start of the first vararg value... for expansion of llvm.va_start.
1839  if (isVarArg) {
1840    static const uint16_t GPArgRegs[] = {
1841      PPC::R3, PPC::R4, PPC::R5, PPC::R6,
1842      PPC::R7, PPC::R8, PPC::R9, PPC::R10,
1843    };
1844    const unsigned NumGPArgRegs = array_lengthof(GPArgRegs);
1845
1846    static const uint16_t FPArgRegs[] = {
1847      PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
1848      PPC::F8
1849    };
1850    const unsigned NumFPArgRegs = array_lengthof(FPArgRegs);
1851
1852    FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs,
1853                                                          NumGPArgRegs));
1854    FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs,
1855                                                          NumFPArgRegs));
1856
1857    // Make room for NumGPArgRegs and NumFPArgRegs.
1858    int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 +
1859                NumFPArgRegs * EVT(MVT::f64).getSizeInBits()/8;
1860
1861    FuncInfo->setVarArgsStackOffset(
1862      MFI->CreateFixedObject(PtrVT.getSizeInBits()/8,
1863                             CCInfo.getNextStackOffset(), true));
1864
1865    FuncInfo->setVarArgsFrameIndex(MFI->CreateStackObject(Depth, 8, false));
1866    SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
1867
1868    // The fixed integer arguments of a variadic function are stored to the
1869    // VarArgsFrameIndex on the stack so that they may be loaded by deferencing
1870    // the result of va_next.
1871    for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) {
1872      // Get an existing live-in vreg, or add a new one.
1873      unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]);
1874      if (!VReg)
1875        VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass);
1876
1877      SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
1878      SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
1879                                   MachinePointerInfo(), false, false, 0);
1880      MemOps.push_back(Store);
1881      // Increment the address by four for the next argument to store
1882      SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT);
1883      FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
1884    }
1885
1886    // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6
1887    // is set.
1888    // The double arguments are stored to the VarArgsFrameIndex
1889    // on the stack.
1890    for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) {
1891      // Get an existing live-in vreg, or add a new one.
1892      unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]);
1893      if (!VReg)
1894        VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass);
1895
1896      SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64);
1897      SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
1898                                   MachinePointerInfo(), false, false, 0);
1899      MemOps.push_back(Store);
1900      // Increment the address by eight for the next argument to store
1901      SDValue PtrOff = DAG.getConstant(EVT(MVT::f64).getSizeInBits()/8,
1902                                         PtrVT);
1903      FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
1904    }
1905  }
1906
1907  if (!MemOps.empty())
1908    Chain = DAG.getNode(ISD::TokenFactor, dl,
1909                        MVT::Other, &MemOps[0], MemOps.size());
1910
1911  return Chain;
1912}
1913
1914SDValue
1915PPCTargetLowering::LowerFormalArguments_Darwin(
1916                                      SDValue Chain,
1917                                      CallingConv::ID CallConv, bool isVarArg,
1918                                      const SmallVectorImpl<ISD::InputArg>
1919                                        &Ins,
1920                                      DebugLoc dl, SelectionDAG &DAG,
1921                                      SmallVectorImpl<SDValue> &InVals) const {
1922  // TODO: add description of PPC stack frame format, or at least some docs.
1923  //
1924  MachineFunction &MF = DAG.getMachineFunction();
1925  MachineFrameInfo *MFI = MF.getFrameInfo();
1926  PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
1927
1928  EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1929  bool isPPC64 = PtrVT == MVT::i64;
1930  // Potential tail calls could cause overwriting of argument stack slots.
1931  bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
1932                       (CallConv == CallingConv::Fast));
1933  unsigned PtrByteSize = isPPC64 ? 8 : 4;
1934
1935  unsigned ArgOffset = PPCFrameLowering::getLinkageSize(isPPC64, true);
1936  // Area that is at least reserved in caller of this function.
1937  unsigned MinReservedArea = ArgOffset;
1938
1939  static const uint16_t GPR_32[] = {           // 32-bit registers.
1940    PPC::R3, PPC::R4, PPC::R5, PPC::R6,
1941    PPC::R7, PPC::R8, PPC::R9, PPC::R10,
1942  };
1943  static const uint16_t GPR_64[] = {           // 64-bit registers.
1944    PPC::X3, PPC::X4, PPC::X5, PPC::X6,
1945    PPC::X7, PPC::X8, PPC::X9, PPC::X10,
1946  };
1947
1948  static const uint16_t *FPR = GetFPR();
1949
1950  static const uint16_t VR[] = {
1951    PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
1952    PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
1953  };
1954
1955  const unsigned Num_GPR_Regs = array_lengthof(GPR_32);
1956  const unsigned Num_FPR_Regs = 13;
1957  const unsigned Num_VR_Regs  = array_lengthof( VR);
1958
1959  unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
1960
1961  const uint16_t *GPR = isPPC64 ? GPR_64 : GPR_32;
1962
1963  // In 32-bit non-varargs functions, the stack space for vectors is after the
1964  // stack space for non-vectors.  We do not use this space unless we have
1965  // too many vectors to fit in registers, something that only occurs in
1966  // constructed examples:), but we have to walk the arglist to figure
1967  // that out...for the pathological case, compute VecArgOffset as the
1968  // start of the vector parameter area.  Computing VecArgOffset is the
1969  // entire point of the following loop.
1970  unsigned VecArgOffset = ArgOffset;
1971  if (!isVarArg && !isPPC64) {
1972    for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e;
1973         ++ArgNo) {
1974      EVT ObjectVT = Ins[ArgNo].VT;
1975      ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
1976
1977      if (Flags.isByVal()) {
1978        // ObjSize is the true size, ArgSize rounded up to multiple of regs.
1979        unsigned ObjSize = Flags.getByValSize();
1980        unsigned ArgSize =
1981                ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
1982        VecArgOffset += ArgSize;
1983        continue;
1984      }
1985
1986      switch(ObjectVT.getSimpleVT().SimpleTy) {
1987      default: llvm_unreachable("Unhandled argument type!");
1988      case MVT::i32:
1989      case MVT::f32:
1990        VecArgOffset += isPPC64 ? 8 : 4;
1991        break;
1992      case MVT::i64:  // PPC64
1993      case MVT::f64:
1994        VecArgOffset += 8;
1995        break;
1996      case MVT::v4f32:
1997      case MVT::v4i32:
1998      case MVT::v8i16:
1999      case MVT::v16i8:
2000        // Nothing to do, we're only looking at Nonvector args here.
2001        break;
2002      }
2003    }
2004  }
2005  // We've found where the vector parameter area in memory is.  Skip the
2006  // first 12 parameters; these don't use that memory.
2007  VecArgOffset = ((VecArgOffset+15)/16)*16;
2008  VecArgOffset += 12*16;
2009
2010  // Add DAG nodes to load the arguments or copy them out of registers.  On
2011  // entry to a function on PPC, the arguments start after the linkage area,
2012  // although the first ones are often in registers.
2013
2014  SmallVector<SDValue, 8> MemOps;
2015  unsigned nAltivecParamsAtEnd = 0;
2016  for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
2017    SDValue ArgVal;
2018    bool needsLoad = false;
2019    EVT ObjectVT = Ins[ArgNo].VT;
2020    unsigned ObjSize = ObjectVT.getSizeInBits()/8;
2021    unsigned ArgSize = ObjSize;
2022    ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
2023
2024    unsigned CurArgOffset = ArgOffset;
2025
2026    // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary.
2027    if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 ||
2028        ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) {
2029      if (isVarArg || isPPC64) {
2030        MinReservedArea = ((MinReservedArea+15)/16)*16;
2031        MinReservedArea += CalculateStackSlotSize(ObjectVT,
2032                                                  Flags,
2033                                                  PtrByteSize);
2034      } else  nAltivecParamsAtEnd++;
2035    } else
2036      // Calculate min reserved area.
2037      MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT,
2038                                                Flags,
2039                                                PtrByteSize);
2040
2041    // FIXME the codegen can be much improved in some cases.
2042    // We do not have to keep everything in memory.
2043    if (Flags.isByVal()) {
2044      // ObjSize is the true size, ArgSize rounded up to multiple of registers.
2045      ObjSize = Flags.getByValSize();
2046      ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
2047      // Objects of size 1 and 2 are right justified, everything else is
2048      // left justified.  This means the memory address is adjusted forwards.
2049      if (ObjSize==1 || ObjSize==2) {
2050        CurArgOffset = CurArgOffset + (4 - ObjSize);
2051      }
2052      // The value of the object is its address.
2053      int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset, true);
2054      SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
2055      InVals.push_back(FIN);
2056      if (ObjSize==1 || ObjSize==2) {
2057        if (GPR_idx != Num_GPR_Regs) {
2058          unsigned VReg;
2059          if (isPPC64)
2060            VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
2061          else
2062            VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
2063          SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
2064          SDValue Store = DAG.getTruncStore(Val.getValue(1), dl, Val, FIN,
2065                                            MachinePointerInfo(),
2066                                            ObjSize==1 ? MVT::i8 : MVT::i16,
2067                                            false, false, 0);
2068          MemOps.push_back(Store);
2069          ++GPR_idx;
2070        }
2071
2072        ArgOffset += PtrByteSize;
2073
2074        continue;
2075      }
2076      for (unsigned j = 0; j < ArgSize; j += PtrByteSize) {
2077        // Store whatever pieces of the object are in registers
2078        // to memory.  ArgVal will be address of the beginning of
2079        // the object.
2080        if (GPR_idx != Num_GPR_Regs) {
2081          unsigned VReg;
2082          if (isPPC64)
2083            VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
2084          else
2085            VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
2086          int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true);
2087          SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
2088          SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
2089          SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
2090                                       MachinePointerInfo(),
2091                                       false, false, 0);
2092          MemOps.push_back(Store);
2093          ++GPR_idx;
2094          ArgOffset += PtrByteSize;
2095        } else {
2096          ArgOffset += ArgSize - (ArgOffset-CurArgOffset);
2097          break;
2098        }
2099      }
2100      continue;
2101    }
2102
2103    switch (ObjectVT.getSimpleVT().SimpleTy) {
2104    default: llvm_unreachable("Unhandled argument type!");
2105    case MVT::i32:
2106      if (!isPPC64) {
2107        if (GPR_idx != Num_GPR_Regs) {
2108          unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
2109          ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
2110          ++GPR_idx;
2111        } else {
2112          needsLoad = true;
2113          ArgSize = PtrByteSize;
2114        }
2115        // All int arguments reserve stack space in the Darwin ABI.
2116        ArgOffset += PtrByteSize;
2117        break;
2118      }
2119      // FALLTHROUGH
2120    case MVT::i64:  // PPC64
2121      if (GPR_idx != Num_GPR_Regs) {
2122        unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
2123        ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
2124
2125        if (ObjectVT == MVT::i32) {
2126          // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
2127          // value to MVT::i64 and then truncate to the correct register size.
2128          if (Flags.isSExt())
2129            ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal,
2130                                 DAG.getValueType(ObjectVT));
2131          else if (Flags.isZExt())
2132            ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal,
2133                                 DAG.getValueType(ObjectVT));
2134
2135          ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal);
2136        }
2137
2138        ++GPR_idx;
2139      } else {
2140        needsLoad = true;
2141        ArgSize = PtrByteSize;
2142      }
2143      // All int arguments reserve stack space in the Darwin ABI.
2144      ArgOffset += 8;
2145      break;
2146
2147    case MVT::f32:
2148    case MVT::f64:
2149      // Every 4 bytes of argument space consumes one of the GPRs available for
2150      // argument passing.
2151      if (GPR_idx != Num_GPR_Regs) {
2152        ++GPR_idx;
2153        if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64)
2154          ++GPR_idx;
2155      }
2156      if (FPR_idx != Num_FPR_Regs) {
2157        unsigned VReg;
2158
2159        if (ObjectVT == MVT::f32)
2160          VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass);
2161        else
2162          VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass);
2163
2164        ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
2165        ++FPR_idx;
2166      } else {
2167        needsLoad = true;
2168      }
2169
2170      // All FP arguments reserve stack space in the Darwin ABI.
2171      ArgOffset += isPPC64 ? 8 : ObjSize;
2172      break;
2173    case MVT::v4f32:
2174    case MVT::v4i32:
2175    case MVT::v8i16:
2176    case MVT::v16i8:
2177      // Note that vector arguments in registers don't reserve stack space,
2178      // except in varargs functions.
2179      if (VR_idx != Num_VR_Regs) {
2180        unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
2181        ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
2182        if (isVarArg) {
2183          while ((ArgOffset % 16) != 0) {
2184            ArgOffset += PtrByteSize;
2185            if (GPR_idx != Num_GPR_Regs)
2186              GPR_idx++;
2187          }
2188          ArgOffset += 16;
2189          GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64?
2190        }
2191        ++VR_idx;
2192      } else {
2193        if (!isVarArg && !isPPC64) {
2194          // Vectors go after all the nonvectors.
2195          CurArgOffset = VecArgOffset;
2196          VecArgOffset += 16;
2197        } else {
2198          // Vectors are aligned.
2199          ArgOffset = ((ArgOffset+15)/16)*16;
2200          CurArgOffset = ArgOffset;
2201          ArgOffset += 16;
2202        }
2203        needsLoad = true;
2204      }
2205      break;
2206    }
2207
2208    // We need to load the argument to a virtual register if we determined above
2209    // that we ran out of physical registers of the appropriate type.
2210    if (needsLoad) {
2211      int FI = MFI->CreateFixedObject(ObjSize,
2212                                      CurArgOffset + (ArgSize - ObjSize),
2213                                      isImmutable);
2214      SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
2215      ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo(),
2216                           false, false, false, 0);
2217    }
2218
2219    InVals.push_back(ArgVal);
2220  }
2221
2222  // Set the size that is at least reserved in caller of this function.  Tail
2223  // call optimized function's reserved stack space needs to be aligned so that
2224  // taking the difference between two stack areas will result in an aligned
2225  // stack.
2226  PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
2227  // Add the Altivec parameters at the end, if needed.
2228  if (nAltivecParamsAtEnd) {
2229    MinReservedArea = ((MinReservedArea+15)/16)*16;
2230    MinReservedArea += 16*nAltivecParamsAtEnd;
2231  }
2232  MinReservedArea =
2233    std::max(MinReservedArea,
2234             PPCFrameLowering::getMinCallFrameSize(isPPC64, true));
2235  unsigned TargetAlign = DAG.getMachineFunction().getTarget().getFrameLowering()->
2236    getStackAlignment();
2237  unsigned AlignMask = TargetAlign-1;
2238  MinReservedArea = (MinReservedArea + AlignMask) & ~AlignMask;
2239  FI->setMinReservedArea(MinReservedArea);
2240
2241  // If the function takes variable number of arguments, make a frame index for
2242  // the start of the first vararg value... for expansion of llvm.va_start.
2243  if (isVarArg) {
2244    int Depth = ArgOffset;
2245
2246    FuncInfo->setVarArgsFrameIndex(
2247      MFI->CreateFixedObject(PtrVT.getSizeInBits()/8,
2248                             Depth, true));
2249    SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
2250
2251    // If this function is vararg, store any remaining integer argument regs
2252    // to their spots on the stack so that they may be loaded by deferencing the
2253    // result of va_next.
2254    for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) {
2255      unsigned VReg;
2256
2257      if (isPPC64)
2258        VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
2259      else
2260        VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
2261
2262      SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
2263      SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
2264                                   MachinePointerInfo(), false, false, 0);
2265      MemOps.push_back(Store);
2266      // Increment the address by four for the next argument to store
2267      SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT);
2268      FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
2269    }
2270  }
2271
2272  if (!MemOps.empty())
2273    Chain = DAG.getNode(ISD::TokenFactor, dl,
2274                        MVT::Other, &MemOps[0], MemOps.size());
2275
2276  return Chain;
2277}
2278
2279/// CalculateParameterAndLinkageAreaSize - Get the size of the paramter plus
2280/// linkage area for the Darwin ABI.
2281static unsigned
2282CalculateParameterAndLinkageAreaSize(SelectionDAG &DAG,
2283                                     bool isPPC64,
2284                                     bool isVarArg,
2285                                     unsigned CC,
2286                                     const SmallVectorImpl<ISD::OutputArg>
2287                                       &Outs,
2288                                     const SmallVectorImpl<SDValue> &OutVals,
2289                                     unsigned &nAltivecParamsAtEnd) {
2290  // Count how many bytes are to be pushed on the stack, including the linkage
2291  // area, and parameter passing area.  We start with 24/48 bytes, which is
2292  // prereserved space for [SP][CR][LR][3 x unused].
2293  unsigned NumBytes = PPCFrameLowering::getLinkageSize(isPPC64, true);
2294  unsigned NumOps = Outs.size();
2295  unsigned PtrByteSize = isPPC64 ? 8 : 4;
2296
2297  // Add up all the space actually used.
2298  // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually
2299  // they all go in registers, but we must reserve stack space for them for
2300  // possible use by the caller.  In varargs or 64-bit calls, parameters are
2301  // assigned stack space in order, with padding so Altivec parameters are
2302  // 16-byte aligned.
2303  nAltivecParamsAtEnd = 0;
2304  for (unsigned i = 0; i != NumOps; ++i) {
2305    ISD::ArgFlagsTy Flags = Outs[i].Flags;
2306    EVT ArgVT = Outs[i].VT;
2307    // Varargs Altivec parameters are padded to a 16 byte boundary.
2308    if (ArgVT==MVT::v4f32 || ArgVT==MVT::v4i32 ||
2309        ArgVT==MVT::v8i16 || ArgVT==MVT::v16i8) {
2310      if (!isVarArg && !isPPC64) {
2311        // Non-varargs Altivec parameters go after all the non-Altivec
2312        // parameters; handle those later so we know how much padding we need.
2313        nAltivecParamsAtEnd++;
2314        continue;
2315      }
2316      // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary.
2317      NumBytes = ((NumBytes+15)/16)*16;
2318    }
2319    NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
2320  }
2321
2322   // Allow for Altivec parameters at the end, if needed.
2323  if (nAltivecParamsAtEnd) {
2324    NumBytes = ((NumBytes+15)/16)*16;
2325    NumBytes += 16*nAltivecParamsAtEnd;
2326  }
2327
2328  // The prolog code of the callee may store up to 8 GPR argument registers to
2329  // the stack, allowing va_start to index over them in memory if its varargs.
2330  // Because we cannot tell if this is needed on the caller side, we have to
2331  // conservatively assume that it is needed.  As such, make sure we have at
2332  // least enough stack space for the caller to store the 8 GPRs.
2333  NumBytes = std::max(NumBytes,
2334                      PPCFrameLowering::getMinCallFrameSize(isPPC64, true));
2335
2336  // Tail call needs the stack to be aligned.
2337  if (CC == CallingConv::Fast && DAG.getTarget().Options.GuaranteedTailCallOpt){
2338    unsigned TargetAlign = DAG.getMachineFunction().getTarget().
2339      getFrameLowering()->getStackAlignment();
2340    unsigned AlignMask = TargetAlign-1;
2341    NumBytes = (NumBytes + AlignMask) & ~AlignMask;
2342  }
2343
2344  return NumBytes;
2345}
2346
2347/// CalculateTailCallSPDiff - Get the amount the stack pointer has to be
2348/// adjusted to accommodate the arguments for the tailcall.
2349static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall,
2350                                   unsigned ParamSize) {
2351
2352  if (!isTailCall) return 0;
2353
2354  PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>();
2355  unsigned CallerMinReservedArea = FI->getMinReservedArea();
2356  int SPDiff = (int)CallerMinReservedArea - (int)ParamSize;
2357  // Remember only if the new adjustement is bigger.
2358  if (SPDiff < FI->getTailCallSPDelta())
2359    FI->setTailCallSPDelta(SPDiff);
2360
2361  return SPDiff;
2362}
2363
2364/// IsEligibleForTailCallOptimization - Check whether the call is eligible
2365/// for tail call optimization. Targets which want to do tail call
2366/// optimization should implement this function.
2367bool
2368PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
2369                                                     CallingConv::ID CalleeCC,
2370                                                     bool isVarArg,
2371                                      const SmallVectorImpl<ISD::InputArg> &Ins,
2372                                                     SelectionDAG& DAG) const {
2373  if (!getTargetMachine().Options.GuaranteedTailCallOpt)
2374    return false;
2375
2376  // Variable argument functions are not supported.
2377  if (isVarArg)
2378    return false;
2379
2380  MachineFunction &MF = DAG.getMachineFunction();
2381  CallingConv::ID CallerCC = MF.getFunction()->getCallingConv();
2382  if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) {
2383    // Functions containing by val parameters are not supported.
2384    for (unsigned i = 0; i != Ins.size(); i++) {
2385       ISD::ArgFlagsTy Flags = Ins[i].Flags;
2386       if (Flags.isByVal()) return false;
2387    }
2388
2389    // Non PIC/GOT  tail calls are supported.
2390    if (getTargetMachine().getRelocationModel() != Reloc::PIC_)
2391      return true;
2392
2393    // At the moment we can only do local tail calls (in same module, hidden
2394    // or protected) if we are generating PIC.
2395    if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
2396      return G->getGlobal()->hasHiddenVisibility()
2397          || G->getGlobal()->hasProtectedVisibility();
2398  }
2399
2400  return false;
2401}
2402
2403/// isCallCompatibleAddress - Return the immediate to use if the specified
2404/// 32-bit value is representable in the immediate field of a BxA instruction.
2405static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) {
2406  ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
2407  if (!C) return 0;
2408
2409  int Addr = C->getZExtValue();
2410  if ((Addr & 3) != 0 ||  // Low 2 bits are implicitly zero.
2411      (Addr << 6 >> 6) != Addr)
2412    return 0;  // Top 6 bits have to be sext of immediate.
2413
2414  return DAG.getConstant((int)C->getZExtValue() >> 2,
2415                         DAG.getTargetLoweringInfo().getPointerTy()).getNode();
2416}
2417
2418namespace {
2419
2420struct TailCallArgumentInfo {
2421  SDValue Arg;
2422  SDValue FrameIdxOp;
2423  int       FrameIdx;
2424
2425  TailCallArgumentInfo() : FrameIdx(0) {}
2426};
2427
2428}
2429
2430/// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot.
2431static void
2432StoreTailCallArgumentsToStackSlot(SelectionDAG &DAG,
2433                                           SDValue Chain,
2434                   const SmallVector<TailCallArgumentInfo, 8> &TailCallArgs,
2435                   SmallVector<SDValue, 8> &MemOpChains,
2436                   DebugLoc dl) {
2437  for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) {
2438    SDValue Arg = TailCallArgs[i].Arg;
2439    SDValue FIN = TailCallArgs[i].FrameIdxOp;
2440    int FI = TailCallArgs[i].FrameIdx;
2441    // Store relative to framepointer.
2442    MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, FIN,
2443                                       MachinePointerInfo::getFixedStack(FI),
2444                                       false, false, 0));
2445  }
2446}
2447
2448/// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to
2449/// the appropriate stack slot for the tail call optimized function call.
2450static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG,
2451                                               MachineFunction &MF,
2452                                               SDValue Chain,
2453                                               SDValue OldRetAddr,
2454                                               SDValue OldFP,
2455                                               int SPDiff,
2456                                               bool isPPC64,
2457                                               bool isDarwinABI,
2458                                               DebugLoc dl) {
2459  if (SPDiff) {
2460    // Calculate the new stack slot for the return address.
2461    int SlotSize = isPPC64 ? 8 : 4;
2462    int NewRetAddrLoc = SPDiff + PPCFrameLowering::getReturnSaveOffset(isPPC64,
2463                                                                   isDarwinABI);
2464    int NewRetAddr = MF.getFrameInfo()->CreateFixedObject(SlotSize,
2465                                                          NewRetAddrLoc, true);
2466    EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
2467    SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT);
2468    Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx,
2469                         MachinePointerInfo::getFixedStack(NewRetAddr),
2470                         false, false, 0);
2471
2472    // When using the 32/64-bit SVR4 ABI there is no need to move the FP stack
2473    // slot as the FP is never overwritten.
2474    if (isDarwinABI) {
2475      int NewFPLoc =
2476        SPDiff + PPCFrameLowering::getFramePointerSaveOffset(isPPC64, isDarwinABI);
2477      int NewFPIdx = MF.getFrameInfo()->CreateFixedObject(SlotSize, NewFPLoc,
2478                                                          true);
2479      SDValue NewFramePtrIdx = DAG.getFrameIndex(NewFPIdx, VT);
2480      Chain = DAG.getStore(Chain, dl, OldFP, NewFramePtrIdx,
2481                           MachinePointerInfo::getFixedStack(NewFPIdx),
2482                           false, false, 0);
2483    }
2484  }
2485  return Chain;
2486}
2487
2488/// CalculateTailCallArgDest - Remember Argument for later processing. Calculate
2489/// the position of the argument.
2490static void
2491CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64,
2492                         SDValue Arg, int SPDiff, unsigned ArgOffset,
2493                      SmallVector<TailCallArgumentInfo, 8>& TailCallArguments) {
2494  int Offset = ArgOffset + SPDiff;
2495  uint32_t OpSize = (Arg.getValueType().getSizeInBits()+7)/8;
2496  int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true);
2497  EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
2498  SDValue FIN = DAG.getFrameIndex(FI, VT);
2499  TailCallArgumentInfo Info;
2500  Info.Arg = Arg;
2501  Info.FrameIdxOp = FIN;
2502  Info.FrameIdx = FI;
2503  TailCallArguments.push_back(Info);
2504}
2505
2506/// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address
2507/// stack slot. Returns the chain as result and the loaded frame pointers in
2508/// LROpOut/FPOpout. Used when tail calling.
2509SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(SelectionDAG & DAG,
2510                                                        int SPDiff,
2511                                                        SDValue Chain,
2512                                                        SDValue &LROpOut,
2513                                                        SDValue &FPOpOut,
2514                                                        bool isDarwinABI,
2515                                                        DebugLoc dl) const {
2516  if (SPDiff) {
2517    // Load the LR and FP stack slot for later adjusting.
2518    EVT VT = PPCSubTarget.isPPC64() ? MVT::i64 : MVT::i32;
2519    LROpOut = getReturnAddrFrameIndex(DAG);
2520    LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo(),
2521                          false, false, false, 0);
2522    Chain = SDValue(LROpOut.getNode(), 1);
2523
2524    // When using the 32/64-bit SVR4 ABI there is no need to load the FP stack
2525    // slot as the FP is never overwritten.
2526    if (isDarwinABI) {
2527      FPOpOut = getFramePointerFrameIndex(DAG);
2528      FPOpOut = DAG.getLoad(VT, dl, Chain, FPOpOut, MachinePointerInfo(),
2529                            false, false, false, 0);
2530      Chain = SDValue(FPOpOut.getNode(), 1);
2531    }
2532  }
2533  return Chain;
2534}
2535
2536/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
2537/// by "Src" to address "Dst" of size "Size".  Alignment information is
2538/// specified by the specific parameter attribute. The copy will be passed as
2539/// a byval function parameter.
2540/// Sometimes what we are copying is the end of a larger object, the part that
2541/// does not fit in registers.
2542static SDValue
2543CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
2544                          ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
2545                          DebugLoc dl) {
2546  SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
2547  return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
2548                       false, false, MachinePointerInfo(0),
2549                       MachinePointerInfo(0));
2550}
2551
2552/// LowerMemOpCallTo - Store the argument to the stack or remember it in case of
2553/// tail calls.
2554static void
2555LowerMemOpCallTo(SelectionDAG &DAG, MachineFunction &MF, SDValue Chain,
2556                 SDValue Arg, SDValue PtrOff, int SPDiff,
2557                 unsigned ArgOffset, bool isPPC64, bool isTailCall,
2558                 bool isVector, SmallVector<SDValue, 8> &MemOpChains,
2559                 SmallVector<TailCallArgumentInfo, 8> &TailCallArguments,
2560                 DebugLoc dl) {
2561  EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
2562  if (!isTailCall) {
2563    if (isVector) {
2564      SDValue StackPtr;
2565      if (isPPC64)
2566        StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
2567      else
2568        StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
2569      PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
2570                           DAG.getConstant(ArgOffset, PtrVT));
2571    }
2572    MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
2573                                       MachinePointerInfo(), false, false, 0));
2574  // Calculate and remember argument location.
2575  } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset,
2576                                  TailCallArguments);
2577}
2578
2579static
2580void PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain,
2581                     DebugLoc dl, bool isPPC64, int SPDiff, unsigned NumBytes,
2582                     SDValue LROp, SDValue FPOp, bool isDarwinABI,
2583                     SmallVector<TailCallArgumentInfo, 8> &TailCallArguments) {
2584  MachineFunction &MF = DAG.getMachineFunction();
2585
2586  // Emit a sequence of copyto/copyfrom virtual registers for arguments that
2587  // might overwrite each other in case of tail call optimization.
2588  SmallVector<SDValue, 8> MemOpChains2;
2589  // Do not flag preceding copytoreg stuff together with the following stuff.
2590  InFlag = SDValue();
2591  StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments,
2592                                    MemOpChains2, dl);
2593  if (!MemOpChains2.empty())
2594    Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
2595                        &MemOpChains2[0], MemOpChains2.size());
2596
2597  // Store the return address to the appropriate stack slot.
2598  Chain = EmitTailCallStoreFPAndRetAddr(DAG, MF, Chain, LROp, FPOp, SPDiff,
2599                                        isPPC64, isDarwinABI, dl);
2600
2601  // Emit callseq_end just before tailcall node.
2602  Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
2603                             DAG.getIntPtrConstant(0, true), InFlag);
2604  InFlag = Chain.getValue(1);
2605}
2606
2607static
2608unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag,
2609                     SDValue &Chain, DebugLoc dl, int SPDiff, bool isTailCall,
2610                     SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass,
2611                     SmallVector<SDValue, 8> &Ops, std::vector<EVT> &NodeTys,
2612                     const PPCSubtarget &PPCSubTarget) {
2613
2614  bool isPPC64 = PPCSubTarget.isPPC64();
2615  bool isSVR4ABI = PPCSubTarget.isSVR4ABI();
2616
2617  EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
2618  NodeTys.push_back(MVT::Other);   // Returns a chain
2619  NodeTys.push_back(MVT::Glue);    // Returns a flag for retval copy to use.
2620
2621  unsigned CallOpc = isSVR4ABI ? PPCISD::CALL_SVR4 : PPCISD::CALL_Darwin;
2622
2623  bool needIndirectCall = true;
2624  if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) {
2625    // If this is an absolute destination address, use the munged value.
2626    Callee = SDValue(Dest, 0);
2627    needIndirectCall = false;
2628  }
2629
2630  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
2631    // XXX Work around for http://llvm.org/bugs/show_bug.cgi?id=5201
2632    // Use indirect calls for ALL functions calls in JIT mode, since the
2633    // far-call stubs may be outside relocation limits for a BL instruction.
2634    if (!DAG.getTarget().getSubtarget<PPCSubtarget>().isJITCodeModel()) {
2635      unsigned OpFlags = 0;
2636      if (DAG.getTarget().getRelocationModel() != Reloc::Static &&
2637          (PPCSubTarget.getTargetTriple().isMacOSX() &&
2638           PPCSubTarget.getTargetTriple().isMacOSXVersionLT(10, 5)) &&
2639          (G->getGlobal()->isDeclaration() ||
2640           G->getGlobal()->isWeakForLinker())) {
2641        // PC-relative references to external symbols should go through $stub,
2642        // unless we're building with the leopard linker or later, which
2643        // automatically synthesizes these stubs.
2644        OpFlags = PPCII::MO_DARWIN_STUB;
2645      }
2646
2647      // If the callee is a GlobalAddress/ExternalSymbol node (quite common,
2648      // every direct call is) turn it into a TargetGlobalAddress /
2649      // TargetExternalSymbol node so that legalize doesn't hack it.
2650      Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl,
2651                                          Callee.getValueType(),
2652                                          0, OpFlags);
2653      needIndirectCall = false;
2654    }
2655  }
2656
2657  if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
2658    unsigned char OpFlags = 0;
2659
2660    if (DAG.getTarget().getRelocationModel() != Reloc::Static &&
2661        (PPCSubTarget.getTargetTriple().isMacOSX() &&
2662         PPCSubTarget.getTargetTriple().isMacOSXVersionLT(10, 5))) {
2663      // PC-relative references to external symbols should go through $stub,
2664      // unless we're building with the leopard linker or later, which
2665      // automatically synthesizes these stubs.
2666      OpFlags = PPCII::MO_DARWIN_STUB;
2667    }
2668
2669    Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType(),
2670                                         OpFlags);
2671    needIndirectCall = false;
2672  }
2673
2674  if (needIndirectCall) {
2675    // Otherwise, this is an indirect call.  We have to use a MTCTR/BCTRL pair
2676    // to do the call, we can't use PPCISD::CALL.
2677    SDValue MTCTROps[] = {Chain, Callee, InFlag};
2678
2679    if (isSVR4ABI && isPPC64) {
2680      // Function pointers in the 64-bit SVR4 ABI do not point to the function
2681      // entry point, but to the function descriptor (the function entry point
2682      // address is part of the function descriptor though).
2683      // The function descriptor is a three doubleword structure with the
2684      // following fields: function entry point, TOC base address and
2685      // environment pointer.
2686      // Thus for a call through a function pointer, the following actions need
2687      // to be performed:
2688      //   1. Save the TOC of the caller in the TOC save area of its stack
2689      //      frame (this is done in LowerCall_Darwin()).
2690      //   2. Load the address of the function entry point from the function
2691      //      descriptor.
2692      //   3. Load the TOC of the callee from the function descriptor into r2.
2693      //   4. Load the environment pointer from the function descriptor into
2694      //      r11.
2695      //   5. Branch to the function entry point address.
2696      //   6. On return of the callee, the TOC of the caller needs to be
2697      //      restored (this is done in FinishCall()).
2698      //
2699      // All those operations are flagged together to ensure that no other
2700      // operations can be scheduled in between. E.g. without flagging the
2701      // operations together, a TOC access in the caller could be scheduled
2702      // between the load of the callee TOC and the branch to the callee, which
2703      // results in the TOC access going through the TOC of the callee instead
2704      // of going through the TOC of the caller, which leads to incorrect code.
2705
2706      // Load the address of the function entry point from the function
2707      // descriptor.
2708      SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other, MVT::Glue);
2709      SDValue LoadFuncPtr = DAG.getNode(PPCISD::LOAD, dl, VTs, MTCTROps,
2710                                        InFlag.getNode() ? 3 : 2);
2711      Chain = LoadFuncPtr.getValue(1);
2712      InFlag = LoadFuncPtr.getValue(2);
2713
2714      // Load environment pointer into r11.
2715      // Offset of the environment pointer within the function descriptor.
2716      SDValue PtrOff = DAG.getIntPtrConstant(16);
2717
2718      SDValue AddPtr = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, PtrOff);
2719      SDValue LoadEnvPtr = DAG.getNode(PPCISD::LOAD, dl, VTs, Chain, AddPtr,
2720                                       InFlag);
2721      Chain = LoadEnvPtr.getValue(1);
2722      InFlag = LoadEnvPtr.getValue(2);
2723
2724      SDValue EnvVal = DAG.getCopyToReg(Chain, dl, PPC::X11, LoadEnvPtr,
2725                                        InFlag);
2726      Chain = EnvVal.getValue(0);
2727      InFlag = EnvVal.getValue(1);
2728
2729      // Load TOC of the callee into r2. We are using a target-specific load
2730      // with r2 hard coded, because the result of a target-independent load
2731      // would never go directly into r2, since r2 is a reserved register (which
2732      // prevents the register allocator from allocating it), resulting in an
2733      // additional register being allocated and an unnecessary move instruction
2734      // being generated.
2735      VTs = DAG.getVTList(MVT::Other, MVT::Glue);
2736      SDValue LoadTOCPtr = DAG.getNode(PPCISD::LOAD_TOC, dl, VTs, Chain,
2737                                       Callee, InFlag);
2738      Chain = LoadTOCPtr.getValue(0);
2739      InFlag = LoadTOCPtr.getValue(1);
2740
2741      MTCTROps[0] = Chain;
2742      MTCTROps[1] = LoadFuncPtr;
2743      MTCTROps[2] = InFlag;
2744    }
2745
2746    Chain = DAG.getNode(PPCISD::MTCTR, dl, NodeTys, MTCTROps,
2747                        2 + (InFlag.getNode() != 0));
2748    InFlag = Chain.getValue(1);
2749
2750    NodeTys.clear();
2751    NodeTys.push_back(MVT::Other);
2752    NodeTys.push_back(MVT::Glue);
2753    Ops.push_back(Chain);
2754    CallOpc = isSVR4ABI ? PPCISD::BCTRL_SVR4 : PPCISD::BCTRL_Darwin;
2755    Callee.setNode(0);
2756    // Add CTR register as callee so a bctr can be emitted later.
2757    if (isTailCall)
2758      Ops.push_back(DAG.getRegister(isPPC64 ? PPC::CTR8 : PPC::CTR, PtrVT));
2759  }
2760
2761  // If this is a direct call, pass the chain and the callee.
2762  if (Callee.getNode()) {
2763    Ops.push_back(Chain);
2764    Ops.push_back(Callee);
2765  }
2766  // If this is a tail call add stack pointer delta.
2767  if (isTailCall)
2768    Ops.push_back(DAG.getConstant(SPDiff, MVT::i32));
2769
2770  // Add argument registers to the end of the list so that they are known live
2771  // into the call.
2772  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
2773    Ops.push_back(DAG.getRegister(RegsToPass[i].first,
2774                                  RegsToPass[i].second.getValueType()));
2775
2776  return CallOpc;
2777}
2778
2779SDValue
2780PPCTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
2781                                   CallingConv::ID CallConv, bool isVarArg,
2782                                   const SmallVectorImpl<ISD::InputArg> &Ins,
2783                                   DebugLoc dl, SelectionDAG &DAG,
2784                                   SmallVectorImpl<SDValue> &InVals) const {
2785
2786  SmallVector<CCValAssign, 16> RVLocs;
2787  CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(),
2788                    getTargetMachine(), RVLocs, *DAG.getContext());
2789  CCRetInfo.AnalyzeCallResult(Ins, RetCC_PPC);
2790
2791  // Copy all of the result registers out of their specified physreg.
2792  for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
2793    CCValAssign &VA = RVLocs[i];
2794    EVT VT = VA.getValVT();
2795    assert(VA.isRegLoc() && "Can only return in registers!");
2796    Chain = DAG.getCopyFromReg(Chain, dl,
2797                               VA.getLocReg(), VT, InFlag).getValue(1);
2798    InVals.push_back(Chain.getValue(0));
2799    InFlag = Chain.getValue(2);
2800  }
2801
2802  return Chain;
2803}
2804
2805SDValue
2806PPCTargetLowering::FinishCall(CallingConv::ID CallConv, DebugLoc dl,
2807                              bool isTailCall, bool isVarArg,
2808                              SelectionDAG &DAG,
2809                              SmallVector<std::pair<unsigned, SDValue>, 8>
2810                                &RegsToPass,
2811                              SDValue InFlag, SDValue Chain,
2812                              SDValue &Callee,
2813                              int SPDiff, unsigned NumBytes,
2814                              const SmallVectorImpl<ISD::InputArg> &Ins,
2815                              SmallVectorImpl<SDValue> &InVals) const {
2816  std::vector<EVT> NodeTys;
2817  SmallVector<SDValue, 8> Ops;
2818  unsigned CallOpc = PrepareCall(DAG, Callee, InFlag, Chain, dl, SPDiff,
2819                                 isTailCall, RegsToPass, Ops, NodeTys,
2820                                 PPCSubTarget);
2821
2822  // When performing tail call optimization the callee pops its arguments off
2823  // the stack. Account for this here so these bytes can be pushed back on in
2824  // PPCRegisterInfo::eliminateCallFramePseudoInstr.
2825  int BytesCalleePops =
2826    (CallConv == CallingConv::Fast &&
2827     getTargetMachine().Options.GuaranteedTailCallOpt) ? NumBytes : 0;
2828
2829  // Add a register mask operand representing the call-preserved registers.
2830  const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
2831  const uint32_t *Mask = TRI->getCallPreservedMask(CallConv);
2832  assert(Mask && "Missing call preserved mask for calling convention");
2833  Ops.push_back(DAG.getRegisterMask(Mask));
2834
2835  if (InFlag.getNode())
2836    Ops.push_back(InFlag);
2837
2838  // Emit tail call.
2839  if (isTailCall) {
2840    // If this is the first return lowered for this function, add the regs
2841    // to the liveout set for the function.
2842    if (DAG.getMachineFunction().getRegInfo().liveout_empty()) {
2843      SmallVector<CCValAssign, 16> RVLocs;
2844      CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
2845                     getTargetMachine(), RVLocs, *DAG.getContext());
2846      CCInfo.AnalyzeCallResult(Ins, RetCC_PPC);
2847      for (unsigned i = 0; i != RVLocs.size(); ++i)
2848        DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg());
2849    }
2850
2851    assert(((Callee.getOpcode() == ISD::Register &&
2852             cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) ||
2853            Callee.getOpcode() == ISD::TargetExternalSymbol ||
2854            Callee.getOpcode() == ISD::TargetGlobalAddress ||
2855            isa<ConstantSDNode>(Callee)) &&
2856    "Expecting an global address, external symbol, absolute value or register");
2857
2858    return DAG.getNode(PPCISD::TC_RETURN, dl, MVT::Other, &Ops[0], Ops.size());
2859  }
2860
2861  // Add a NOP immediately after the branch instruction when using the 64-bit
2862  // SVR4 ABI. At link time, if caller and callee are in a different module and
2863  // thus have a different TOC, the call will be replaced with a call to a stub
2864  // function which saves the current TOC, loads the TOC of the callee and
2865  // branches to the callee. The NOP will be replaced with a load instruction
2866  // which restores the TOC of the caller from the TOC save slot of the current
2867  // stack frame. If caller and callee belong to the same module (and have the
2868  // same TOC), the NOP will remain unchanged.
2869
2870  bool needsTOCRestore = false;
2871  if (!isTailCall && PPCSubTarget.isSVR4ABI()&& PPCSubTarget.isPPC64()) {
2872    if (CallOpc == PPCISD::BCTRL_SVR4) {
2873      // This is a call through a function pointer.
2874      // Restore the caller TOC from the save area into R2.
2875      // See PrepareCall() for more information about calls through function
2876      // pointers in the 64-bit SVR4 ABI.
2877      // We are using a target-specific load with r2 hard coded, because the
2878      // result of a target-independent load would never go directly into r2,
2879      // since r2 is a reserved register (which prevents the register allocator
2880      // from allocating it), resulting in an additional register being
2881      // allocated and an unnecessary move instruction being generated.
2882      needsTOCRestore = true;
2883    } else if (CallOpc == PPCISD::CALL_SVR4) {
2884      // Otherwise insert NOP.
2885      CallOpc = PPCISD::CALL_NOP_SVR4;
2886    }
2887  }
2888
2889  Chain = DAG.getNode(CallOpc, dl, NodeTys, &Ops[0], Ops.size());
2890  InFlag = Chain.getValue(1);
2891
2892  if (needsTOCRestore) {
2893    SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
2894    Chain = DAG.getNode(PPCISD::TOC_RESTORE, dl, VTs, Chain, InFlag);
2895    InFlag = Chain.getValue(1);
2896  }
2897
2898  Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
2899                             DAG.getIntPtrConstant(BytesCalleePops, true),
2900                             InFlag);
2901  if (!Ins.empty())
2902    InFlag = Chain.getValue(1);
2903
2904  return LowerCallResult(Chain, InFlag, CallConv, isVarArg,
2905                         Ins, dl, DAG, InVals);
2906}
2907
2908SDValue
2909PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
2910                             SmallVectorImpl<SDValue> &InVals) const {
2911  SelectionDAG &DAG                     = CLI.DAG;
2912  DebugLoc &dl                          = CLI.DL;
2913  SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
2914  SmallVector<SDValue, 32> &OutVals     = CLI.OutVals;
2915  SmallVector<ISD::InputArg, 32> &Ins   = CLI.Ins;
2916  SDValue Chain                         = CLI.Chain;
2917  SDValue Callee                        = CLI.Callee;
2918  bool &isTailCall                      = CLI.IsTailCall;
2919  CallingConv::ID CallConv              = CLI.CallConv;
2920  bool isVarArg                         = CLI.IsVarArg;
2921
2922  if (isTailCall)
2923    isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg,
2924                                                   Ins, DAG);
2925
2926  if (PPCSubTarget.isSVR4ABI() && !PPCSubTarget.isPPC64())
2927    return LowerCall_SVR4(Chain, Callee, CallConv, isVarArg,
2928                          isTailCall, Outs, OutVals, Ins,
2929                          dl, DAG, InVals);
2930
2931  return LowerCall_Darwin(Chain, Callee, CallConv, isVarArg,
2932                          isTailCall, Outs, OutVals, Ins,
2933                          dl, DAG, InVals);
2934}
2935
2936SDValue
2937PPCTargetLowering::LowerCall_SVR4(SDValue Chain, SDValue Callee,
2938                                  CallingConv::ID CallConv, bool isVarArg,
2939                                  bool isTailCall,
2940                                  const SmallVectorImpl<ISD::OutputArg> &Outs,
2941                                  const SmallVectorImpl<SDValue> &OutVals,
2942                                  const SmallVectorImpl<ISD::InputArg> &Ins,
2943                                  DebugLoc dl, SelectionDAG &DAG,
2944                                  SmallVectorImpl<SDValue> &InVals) const {
2945  // See PPCTargetLowering::LowerFormalArguments_SVR4() for a description
2946  // of the 32-bit SVR4 ABI stack frame layout.
2947
2948  assert((CallConv == CallingConv::C ||
2949          CallConv == CallingConv::Fast) && "Unknown calling convention!");
2950
2951  unsigned PtrByteSize = 4;
2952
2953  MachineFunction &MF = DAG.getMachineFunction();
2954
2955  // Mark this function as potentially containing a function that contains a
2956  // tail call. As a consequence the frame pointer will be used for dynamicalloc
2957  // and restoring the callers stack pointer in this functions epilog. This is
2958  // done because by tail calling the called function might overwrite the value
2959  // in this function's (MF) stack pointer stack slot 0(SP).
2960  if (getTargetMachine().Options.GuaranteedTailCallOpt &&
2961      CallConv == CallingConv::Fast)
2962    MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
2963
2964  // Count how many bytes are to be pushed on the stack, including the linkage
2965  // area, parameter list area and the part of the local variable space which
2966  // contains copies of aggregates which are passed by value.
2967
2968  // Assign locations to all of the outgoing arguments.
2969  SmallVector<CCValAssign, 16> ArgLocs;
2970  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
2971                 getTargetMachine(), ArgLocs, *DAG.getContext());
2972
2973  // Reserve space for the linkage area on the stack.
2974  CCInfo.AllocateStack(PPCFrameLowering::getLinkageSize(false, false), PtrByteSize);
2975
2976  if (isVarArg) {
2977    // Handle fixed and variable vector arguments differently.
2978    // Fixed vector arguments go into registers as long as registers are
2979    // available. Variable vector arguments always go into memory.
2980    unsigned NumArgs = Outs.size();
2981
2982    for (unsigned i = 0; i != NumArgs; ++i) {
2983      MVT ArgVT = Outs[i].VT;
2984      ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
2985      bool Result;
2986
2987      if (Outs[i].IsFixed) {
2988        Result = CC_PPC_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags,
2989                             CCInfo);
2990      } else {
2991        Result = CC_PPC_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full,
2992                                    ArgFlags, CCInfo);
2993      }
2994
2995      if (Result) {
2996#ifndef NDEBUG
2997        errs() << "Call operand #" << i << " has unhandled type "
2998             << EVT(ArgVT).getEVTString() << "\n";
2999#endif
3000        llvm_unreachable(0);
3001      }
3002    }
3003  } else {
3004    // All arguments are treated the same.
3005    CCInfo.AnalyzeCallOperands(Outs, CC_PPC_SVR4);
3006  }
3007
3008  // Assign locations to all of the outgoing aggregate by value arguments.
3009  SmallVector<CCValAssign, 16> ByValArgLocs;
3010  CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(),
3011                      getTargetMachine(), ByValArgLocs, *DAG.getContext());
3012
3013  // Reserve stack space for the allocations in CCInfo.
3014  CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize);
3015
3016  CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC_SVR4_ByVal);
3017
3018  // Size of the linkage area, parameter list area and the part of the local
3019  // space variable where copies of aggregates which are passed by value are
3020  // stored.
3021  unsigned NumBytes = CCByValInfo.getNextStackOffset();
3022
3023  // Calculate by how many bytes the stack has to be adjusted in case of tail
3024  // call optimization.
3025  int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes);
3026
3027  // Adjust the stack pointer for the new arguments...
3028  // These operations are automatically eliminated by the prolog/epilog pass
3029  Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true));
3030  SDValue CallSeqStart = Chain;
3031
3032  // Load the return address and frame pointer so it can be moved somewhere else
3033  // later.
3034  SDValue LROp, FPOp;
3035  Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, false,
3036                                       dl);
3037
3038  // Set up a copy of the stack pointer for use loading and storing any
3039  // arguments that may not fit in the registers available for argument
3040  // passing.
3041  SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
3042
3043  SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
3044  SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
3045  SmallVector<SDValue, 8> MemOpChains;
3046
3047  bool seenFloatArg = false;
3048  // Walk the register/memloc assignments, inserting copies/loads.
3049  for (unsigned i = 0, j = 0, e = ArgLocs.size();
3050       i != e;
3051       ++i) {
3052    CCValAssign &VA = ArgLocs[i];
3053    SDValue Arg = OutVals[i];
3054    ISD::ArgFlagsTy Flags = Outs[i].Flags;
3055
3056    if (Flags.isByVal()) {
3057      // Argument is an aggregate which is passed by value, thus we need to
3058      // create a copy of it in the local variable space of the current stack
3059      // frame (which is the stack frame of the caller) and pass the address of
3060      // this copy to the callee.
3061      assert((j < ByValArgLocs.size()) && "Index out of bounds!");
3062      CCValAssign &ByValVA = ByValArgLocs[j++];
3063      assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!");
3064
3065      // Memory reserved in the local variable space of the callers stack frame.
3066      unsigned LocMemOffset = ByValVA.getLocMemOffset();
3067
3068      SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
3069      PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
3070
3071      // Create a copy of the argument in the local area of the current
3072      // stack frame.
3073      SDValue MemcpyCall =
3074        CreateCopyOfByValArgument(Arg, PtrOff,
3075                                  CallSeqStart.getNode()->getOperand(0),
3076                                  Flags, DAG, dl);
3077
3078      // This must go outside the CALLSEQ_START..END.
3079      SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall,
3080                           CallSeqStart.getNode()->getOperand(1));
3081      DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
3082                             NewCallSeqStart.getNode());
3083      Chain = CallSeqStart = NewCallSeqStart;
3084
3085      // Pass the address of the aggregate copy on the stack either in a
3086      // physical register or in the parameter list area of the current stack
3087      // frame to the callee.
3088      Arg = PtrOff;
3089    }
3090
3091    if (VA.isRegLoc()) {
3092      seenFloatArg |= VA.getLocVT().isFloatingPoint();
3093      // Put argument in a physical register.
3094      RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
3095    } else {
3096      // Put argument in the parameter list area of the current stack frame.
3097      assert(VA.isMemLoc());
3098      unsigned LocMemOffset = VA.getLocMemOffset();
3099
3100      if (!isTailCall) {
3101        SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
3102        PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
3103
3104        MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
3105                                           MachinePointerInfo(),
3106                                           false, false, 0));
3107      } else {
3108        // Calculate and remember argument location.
3109        CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset,
3110                                 TailCallArguments);
3111      }
3112    }
3113  }
3114
3115  if (!MemOpChains.empty())
3116    Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3117                        &MemOpChains[0], MemOpChains.size());
3118
3119  // Set CR6 to true if this is a vararg call with floating args passed in
3120  // registers.
3121  if (isVarArg) {
3122    SDValue SetCR(DAG.getMachineNode(seenFloatArg ? PPC::CRSET : PPC::CRUNSET,
3123                                     dl, MVT::i32), 0);
3124    RegsToPass.push_back(std::make_pair(unsigned(PPC::CR1EQ), SetCR));
3125  }
3126
3127  // Build a sequence of copy-to-reg nodes chained together with token chain
3128  // and flag operands which copy the outgoing args into the appropriate regs.
3129  SDValue InFlag;
3130  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
3131    Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
3132                             RegsToPass[i].second, InFlag);
3133    InFlag = Chain.getValue(1);
3134  }
3135
3136  if (isTailCall)
3137    PrepareTailCall(DAG, InFlag, Chain, dl, false, SPDiff, NumBytes, LROp, FPOp,
3138                    false, TailCallArguments);
3139
3140  return FinishCall(CallConv, dl, isTailCall, isVarArg, DAG,
3141                    RegsToPass, InFlag, Chain, Callee, SPDiff, NumBytes,
3142                    Ins, InVals);
3143}
3144
3145SDValue
3146PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
3147                                    CallingConv::ID CallConv, bool isVarArg,
3148                                    bool isTailCall,
3149                                    const SmallVectorImpl<ISD::OutputArg> &Outs,
3150                                    const SmallVectorImpl<SDValue> &OutVals,
3151                                    const SmallVectorImpl<ISD::InputArg> &Ins,
3152                                    DebugLoc dl, SelectionDAG &DAG,
3153                                    SmallVectorImpl<SDValue> &InVals) const {
3154
3155  unsigned NumOps  = Outs.size();
3156
3157  EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
3158  bool isPPC64 = PtrVT == MVT::i64;
3159  unsigned PtrByteSize = isPPC64 ? 8 : 4;
3160
3161  MachineFunction &MF = DAG.getMachineFunction();
3162
3163  // Mark this function as potentially containing a function that contains a
3164  // tail call. As a consequence the frame pointer will be used for dynamicalloc
3165  // and restoring the callers stack pointer in this functions epilog. This is
3166  // done because by tail calling the called function might overwrite the value
3167  // in this function's (MF) stack pointer stack slot 0(SP).
3168  if (getTargetMachine().Options.GuaranteedTailCallOpt &&
3169      CallConv == CallingConv::Fast)
3170    MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
3171
3172  unsigned nAltivecParamsAtEnd = 0;
3173
3174  // Count how many bytes are to be pushed on the stack, including the linkage
3175  // area, and parameter passing area.  We start with 24/48 bytes, which is
3176  // prereserved space for [SP][CR][LR][3 x unused].
3177  unsigned NumBytes =
3178    CalculateParameterAndLinkageAreaSize(DAG, isPPC64, isVarArg, CallConv,
3179                                         Outs, OutVals,
3180                                         nAltivecParamsAtEnd);
3181
3182  // Calculate by how many bytes the stack has to be adjusted in case of tail
3183  // call optimization.
3184  int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes);
3185
3186  // To protect arguments on the stack from being clobbered in a tail call,
3187  // force all the loads to happen before doing any other lowering.
3188  if (isTailCall)
3189    Chain = DAG.getStackArgumentTokenFactor(Chain);
3190
3191  // Adjust the stack pointer for the new arguments...
3192  // These operations are automatically eliminated by the prolog/epilog pass
3193  Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true));
3194  SDValue CallSeqStart = Chain;
3195
3196  // Load the return address and frame pointer so it can be move somewhere else
3197  // later.
3198  SDValue LROp, FPOp;
3199  Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, true,
3200                                       dl);
3201
3202  // Set up a copy of the stack pointer for use loading and storing any
3203  // arguments that may not fit in the registers available for argument
3204  // passing.
3205  SDValue StackPtr;
3206  if (isPPC64)
3207    StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
3208  else
3209    StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
3210
3211  // Figure out which arguments are going to go in registers, and which in
3212  // memory.  Also, if this is a vararg function, floating point operations
3213  // must be stored to our stack, and loaded into integer regs as well, if
3214  // any integer regs are available for argument passing.
3215  unsigned ArgOffset = PPCFrameLowering::getLinkageSize(isPPC64, true);
3216  unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
3217
3218  static const uint16_t GPR_32[] = {           // 32-bit registers.
3219    PPC::R3, PPC::R4, PPC::R5, PPC::R6,
3220    PPC::R7, PPC::R8, PPC::R9, PPC::R10,
3221  };
3222  static const uint16_t GPR_64[] = {           // 64-bit registers.
3223    PPC::X3, PPC::X4, PPC::X5, PPC::X6,
3224    PPC::X7, PPC::X8, PPC::X9, PPC::X10,
3225  };
3226  static const uint16_t *FPR = GetFPR();
3227
3228  static const uint16_t VR[] = {
3229    PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
3230    PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
3231  };
3232  const unsigned NumGPRs = array_lengthof(GPR_32);
3233  const unsigned NumFPRs = 13;
3234  const unsigned NumVRs  = array_lengthof(VR);
3235
3236  const uint16_t *GPR = isPPC64 ? GPR_64 : GPR_32;
3237
3238  SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
3239  SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
3240
3241  SmallVector<SDValue, 8> MemOpChains;
3242  for (unsigned i = 0; i != NumOps; ++i) {
3243    SDValue Arg = OutVals[i];
3244    ISD::ArgFlagsTy Flags = Outs[i].Flags;
3245
3246    // PtrOff will be used to store the current argument to the stack if a
3247    // register cannot be found for it.
3248    SDValue PtrOff;
3249
3250    PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType());
3251
3252    PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
3253
3254    // On PPC64, promote integers to 64-bit values.
3255    if (isPPC64 && Arg.getValueType() == MVT::i32) {
3256      // FIXME: Should this use ANY_EXTEND if neither sext nor zext?
3257      unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
3258      Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg);
3259    }
3260
3261    // FIXME memcpy is used way more than necessary.  Correctness first.
3262    if (Flags.isByVal()) {
3263      unsigned Size = Flags.getByValSize();
3264      if (Size==1 || Size==2) {
3265        // Very small objects are passed right-justified.
3266        // Everything else is passed left-justified.
3267        EVT VT = (Size==1) ? MVT::i8 : MVT::i16;
3268        if (GPR_idx != NumGPRs) {
3269          SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg,
3270                                        MachinePointerInfo(), VT,
3271                                        false, false, 0);
3272          MemOpChains.push_back(Load.getValue(1));
3273          RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
3274
3275          ArgOffset += PtrByteSize;
3276        } else {
3277          SDValue Const = DAG.getConstant(4 - Size, PtrOff.getValueType());
3278          SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
3279          SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, AddPtr,
3280                                CallSeqStart.getNode()->getOperand(0),
3281                                Flags, DAG, dl);
3282          // This must go outside the CALLSEQ_START..END.
3283          SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall,
3284                               CallSeqStart.getNode()->getOperand(1));
3285          DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
3286                                 NewCallSeqStart.getNode());
3287          Chain = CallSeqStart = NewCallSeqStart;
3288          ArgOffset += PtrByteSize;
3289        }
3290        continue;
3291      }
3292      // Copy entire object into memory.  There are cases where gcc-generated
3293      // code assumes it is there, even if it could be put entirely into
3294      // registers.  (This is not what the doc says.)
3295      SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff,
3296                            CallSeqStart.getNode()->getOperand(0),
3297                            Flags, DAG, dl);
3298      // This must go outside the CALLSEQ_START..END.
3299      SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall,
3300                           CallSeqStart.getNode()->getOperand(1));
3301      DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), NewCallSeqStart.getNode());
3302      Chain = CallSeqStart = NewCallSeqStart;
3303      // And copy the pieces of it that fit into registers.
3304      for (unsigned j=0; j<Size; j+=PtrByteSize) {
3305        SDValue Const = DAG.getConstant(j, PtrOff.getValueType());
3306        SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
3307        if (GPR_idx != NumGPRs) {
3308          SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg,
3309                                     MachinePointerInfo(),
3310                                     false, false, false, 0);
3311          MemOpChains.push_back(Load.getValue(1));
3312          RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
3313          ArgOffset += PtrByteSize;
3314        } else {
3315          ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
3316          break;
3317        }
3318      }
3319      continue;
3320    }
3321
3322    switch (Arg.getValueType().getSimpleVT().SimpleTy) {
3323    default: llvm_unreachable("Unexpected ValueType for argument!");
3324    case MVT::i32:
3325    case MVT::i64:
3326      if (GPR_idx != NumGPRs) {
3327        RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
3328      } else {
3329        LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
3330                         isPPC64, isTailCall, false, MemOpChains,
3331                         TailCallArguments, dl);
3332      }
3333      ArgOffset += PtrByteSize;
3334      break;
3335    case MVT::f32:
3336    case MVT::f64:
3337      if (FPR_idx != NumFPRs) {
3338        RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
3339
3340        if (isVarArg) {
3341          SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff,
3342                                       MachinePointerInfo(), false, false, 0);
3343          MemOpChains.push_back(Store);
3344
3345          // Float varargs are always shadowed in available integer registers
3346          if (GPR_idx != NumGPRs) {
3347            SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff,
3348                                       MachinePointerInfo(), false, false,
3349                                       false, 0);
3350            MemOpChains.push_back(Load.getValue(1));
3351            RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
3352          }
3353          if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){
3354            SDValue ConstFour = DAG.getConstant(4, PtrOff.getValueType());
3355            PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
3356            SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff,
3357                                       MachinePointerInfo(),
3358                                       false, false, false, 0);
3359            MemOpChains.push_back(Load.getValue(1));
3360            RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
3361          }
3362        } else {
3363          // If we have any FPRs remaining, we may also have GPRs remaining.
3364          // Args passed in FPRs consume either 1 (f32) or 2 (f64) available
3365          // GPRs.
3366          if (GPR_idx != NumGPRs)
3367            ++GPR_idx;
3368          if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 &&
3369              !isPPC64)  // PPC64 has 64-bit GPR's obviously :)
3370            ++GPR_idx;
3371        }
3372      } else {
3373        LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
3374                         isPPC64, isTailCall, false, MemOpChains,
3375                         TailCallArguments, dl);
3376      }
3377      if (isPPC64)
3378        ArgOffset += 8;
3379      else
3380        ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8;
3381      break;
3382    case MVT::v4f32:
3383    case MVT::v4i32:
3384    case MVT::v8i16:
3385    case MVT::v16i8:
3386      if (isVarArg) {
3387        // These go aligned on the stack, or in the corresponding R registers
3388        // when within range.  The Darwin PPC ABI doc claims they also go in
3389        // V registers; in fact gcc does this only for arguments that are
3390        // prototyped, not for those that match the ...  We do it for all
3391        // arguments, seems to work.
3392        while (ArgOffset % 16 !=0) {
3393          ArgOffset += PtrByteSize;
3394          if (GPR_idx != NumGPRs)
3395            GPR_idx++;
3396        }
3397        // We could elide this store in the case where the object fits
3398        // entirely in R registers.  Maybe later.
3399        PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
3400                            DAG.getConstant(ArgOffset, PtrVT));
3401        SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff,
3402                                     MachinePointerInfo(), false, false, 0);
3403        MemOpChains.push_back(Store);
3404        if (VR_idx != NumVRs) {
3405          SDValue Load = DAG.getLoad(MVT::v4f32, dl, Store, PtrOff,
3406                                     MachinePointerInfo(),
3407                                     false, false, false, 0);
3408          MemOpChains.push_back(Load.getValue(1));
3409          RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load));
3410        }
3411        ArgOffset += 16;
3412        for (unsigned i=0; i<16; i+=PtrByteSize) {
3413          if (GPR_idx == NumGPRs)
3414            break;
3415          SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
3416                                  DAG.getConstant(i, PtrVT));
3417          SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo(),
3418                                     false, false, false, 0);
3419          MemOpChains.push_back(Load.getValue(1));
3420          RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
3421        }
3422        break;
3423      }
3424
3425      // Non-varargs Altivec params generally go in registers, but have
3426      // stack space allocated at the end.
3427      if (VR_idx != NumVRs) {
3428        // Doesn't have GPR space allocated.
3429        RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg));
3430      } else if (nAltivecParamsAtEnd==0) {
3431        // We are emitting Altivec params in order.
3432        LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
3433                         isPPC64, isTailCall, true, MemOpChains,
3434                         TailCallArguments, dl);
3435        ArgOffset += 16;
3436      }
3437      break;
3438    }
3439  }
3440  // If all Altivec parameters fit in registers, as they usually do,
3441  // they get stack space following the non-Altivec parameters.  We
3442  // don't track this here because nobody below needs it.
3443  // If there are more Altivec parameters than fit in registers emit
3444  // the stores here.
3445  if (!isVarArg && nAltivecParamsAtEnd > NumVRs) {
3446    unsigned j = 0;
3447    // Offset is aligned; skip 1st 12 params which go in V registers.
3448    ArgOffset = ((ArgOffset+15)/16)*16;
3449    ArgOffset += 12*16;
3450    for (unsigned i = 0; i != NumOps; ++i) {
3451      SDValue Arg = OutVals[i];
3452      EVT ArgType = Outs[i].VT;
3453      if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 ||
3454          ArgType==MVT::v8i16 || ArgType==MVT::v16i8) {
3455        if (++j > NumVRs) {
3456          SDValue PtrOff;
3457          // We are emitting Altivec params in order.
3458          LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
3459                           isPPC64, isTailCall, true, MemOpChains,
3460                           TailCallArguments, dl);
3461          ArgOffset += 16;
3462        }
3463      }
3464    }
3465  }
3466
3467  if (!MemOpChains.empty())
3468    Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3469                        &MemOpChains[0], MemOpChains.size());
3470
3471  // Check if this is an indirect call (MTCTR/BCTRL).
3472  // See PrepareCall() for more information about calls through function
3473  // pointers in the 64-bit SVR4 ABI.
3474  if (!isTailCall && isPPC64 && PPCSubTarget.isSVR4ABI() &&
3475      !dyn_cast<GlobalAddressSDNode>(Callee) &&
3476      !dyn_cast<ExternalSymbolSDNode>(Callee) &&
3477      !isBLACompatibleAddress(Callee, DAG)) {
3478    // Load r2 into a virtual register and store it to the TOC save area.
3479    SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64);
3480    // TOC save area offset.
3481    SDValue PtrOff = DAG.getIntPtrConstant(40);
3482    SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
3483    Chain = DAG.getStore(Val.getValue(1), dl, Val, AddPtr, MachinePointerInfo(),
3484                         false, false, 0);
3485  }
3486
3487  // On Darwin, R12 must contain the address of an indirect callee.  This does
3488  // not mean the MTCTR instruction must use R12; it's easier to model this as
3489  // an extra parameter, so do that.
3490  if (!isTailCall &&
3491      !dyn_cast<GlobalAddressSDNode>(Callee) &&
3492      !dyn_cast<ExternalSymbolSDNode>(Callee) &&
3493      !isBLACompatibleAddress(Callee, DAG))
3494    RegsToPass.push_back(std::make_pair((unsigned)(isPPC64 ? PPC::X12 :
3495                                                   PPC::R12), Callee));
3496
3497  // Build a sequence of copy-to-reg nodes chained together with token chain
3498  // and flag operands which copy the outgoing args into the appropriate regs.
3499  SDValue InFlag;
3500  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
3501    Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
3502                             RegsToPass[i].second, InFlag);
3503    InFlag = Chain.getValue(1);
3504  }
3505
3506  if (isTailCall)
3507    PrepareTailCall(DAG, InFlag, Chain, dl, isPPC64, SPDiff, NumBytes, LROp,
3508                    FPOp, true, TailCallArguments);
3509
3510  return FinishCall(CallConv, dl, isTailCall, isVarArg, DAG,
3511                    RegsToPass, InFlag, Chain, Callee, SPDiff, NumBytes,
3512                    Ins, InVals);
3513}
3514
3515bool
3516PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
3517                                  MachineFunction &MF, bool isVarArg,
3518                                  const SmallVectorImpl<ISD::OutputArg> &Outs,
3519                                  LLVMContext &Context) const {
3520  SmallVector<CCValAssign, 16> RVLocs;
3521  CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(),
3522                 RVLocs, Context);
3523  return CCInfo.CheckReturn(Outs, RetCC_PPC);
3524}
3525
3526SDValue
3527PPCTargetLowering::LowerReturn(SDValue Chain,
3528                               CallingConv::ID CallConv, bool isVarArg,
3529                               const SmallVectorImpl<ISD::OutputArg> &Outs,
3530                               const SmallVectorImpl<SDValue> &OutVals,
3531                               DebugLoc dl, SelectionDAG &DAG) const {
3532
3533  SmallVector<CCValAssign, 16> RVLocs;
3534  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
3535                 getTargetMachine(), RVLocs, *DAG.getContext());
3536  CCInfo.AnalyzeReturn(Outs, RetCC_PPC);
3537
3538  // If this is the first return lowered for this function, add the regs to the
3539  // liveout set for the function.
3540  if (DAG.getMachineFunction().getRegInfo().liveout_empty()) {
3541    for (unsigned i = 0; i != RVLocs.size(); ++i)
3542      DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg());
3543  }
3544
3545  SDValue Flag;
3546
3547  // Copy the result values into the output registers.
3548  for (unsigned i = 0; i != RVLocs.size(); ++i) {
3549    CCValAssign &VA = RVLocs[i];
3550    assert(VA.isRegLoc() && "Can only return in registers!");
3551    Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
3552                             OutVals[i], Flag);
3553    Flag = Chain.getValue(1);
3554  }
3555
3556  if (Flag.getNode())
3557    return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, Chain, Flag);
3558  else
3559    return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, Chain);
3560}
3561
3562SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG,
3563                                   const PPCSubtarget &Subtarget) const {
3564  // When we pop the dynamic allocation we need to restore the SP link.
3565  DebugLoc dl = Op.getDebugLoc();
3566
3567  // Get the corect type for pointers.
3568  EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
3569
3570  // Construct the stack pointer operand.
3571  bool isPPC64 = Subtarget.isPPC64();
3572  unsigned SP = isPPC64 ? PPC::X1 : PPC::R1;
3573  SDValue StackPtr = DAG.getRegister(SP, PtrVT);
3574
3575  // Get the operands for the STACKRESTORE.
3576  SDValue Chain = Op.getOperand(0);
3577  SDValue SaveSP = Op.getOperand(1);
3578
3579  // Load the old link SP.
3580  SDValue LoadLinkSP = DAG.getLoad(PtrVT, dl, Chain, StackPtr,
3581                                   MachinePointerInfo(),
3582                                   false, false, false, 0);
3583
3584  // Restore the stack pointer.
3585  Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP);
3586
3587  // Store the old link SP.
3588  return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo(),
3589                      false, false, 0);
3590}
3591
3592
3593
3594SDValue
3595PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG & DAG) const {
3596  MachineFunction &MF = DAG.getMachineFunction();
3597  bool isPPC64 = PPCSubTarget.isPPC64();
3598  bool isDarwinABI = PPCSubTarget.isDarwinABI();
3599  EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
3600
3601  // Get current frame pointer save index.  The users of this index will be
3602  // primarily DYNALLOC instructions.
3603  PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
3604  int RASI = FI->getReturnAddrSaveIndex();
3605
3606  // If the frame pointer save index hasn't been defined yet.
3607  if (!RASI) {
3608    // Find out what the fix offset of the frame pointer save area.
3609    int LROffset = PPCFrameLowering::getReturnSaveOffset(isPPC64, isDarwinABI);
3610    // Allocate the frame index for frame pointer save area.
3611    RASI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, LROffset, true);
3612    // Save the result.
3613    FI->setReturnAddrSaveIndex(RASI);
3614  }
3615  return DAG.getFrameIndex(RASI, PtrVT);
3616}
3617
3618SDValue
3619PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const {
3620  MachineFunction &MF = DAG.getMachineFunction();
3621  bool isPPC64 = PPCSubTarget.isPPC64();
3622  bool isDarwinABI = PPCSubTarget.isDarwinABI();
3623  EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
3624
3625  // Get current frame pointer save index.  The users of this index will be
3626  // primarily DYNALLOC instructions.
3627  PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
3628  int FPSI = FI->getFramePointerSaveIndex();
3629
3630  // If the frame pointer save index hasn't been defined yet.
3631  if (!FPSI) {
3632    // Find out what the fix offset of the frame pointer save area.
3633    int FPOffset = PPCFrameLowering::getFramePointerSaveOffset(isPPC64,
3634                                                           isDarwinABI);
3635
3636    // Allocate the frame index for frame pointer save area.
3637    FPSI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, FPOffset, true);
3638    // Save the result.
3639    FI->setFramePointerSaveIndex(FPSI);
3640  }
3641  return DAG.getFrameIndex(FPSI, PtrVT);
3642}
3643
3644SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
3645                                         SelectionDAG &DAG,
3646                                         const PPCSubtarget &Subtarget) const {
3647  // Get the inputs.
3648  SDValue Chain = Op.getOperand(0);
3649  SDValue Size  = Op.getOperand(1);
3650  DebugLoc dl = Op.getDebugLoc();
3651
3652  // Get the corect type for pointers.
3653  EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
3654  // Negate the size.
3655  SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT,
3656                                  DAG.getConstant(0, PtrVT), Size);
3657  // Construct a node for the frame pointer save index.
3658  SDValue FPSIdx = getFramePointerFrameIndex(DAG);
3659  // Build a DYNALLOC node.
3660  SDValue Ops[3] = { Chain, NegSize, FPSIdx };
3661  SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other);
3662  return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops, 3);
3663}
3664
3665/// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when
3666/// possible.
3667SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
3668  // Not FP? Not a fsel.
3669  if (!Op.getOperand(0).getValueType().isFloatingPoint() ||
3670      !Op.getOperand(2).getValueType().isFloatingPoint())
3671    return Op;
3672
3673  ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
3674
3675  // Cannot handle SETEQ/SETNE.
3676  if (CC == ISD::SETEQ || CC == ISD::SETNE) return Op;
3677
3678  EVT ResVT = Op.getValueType();
3679  EVT CmpVT = Op.getOperand(0).getValueType();
3680  SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
3681  SDValue TV  = Op.getOperand(2), FV  = Op.getOperand(3);
3682  DebugLoc dl = Op.getDebugLoc();
3683
3684  // If the RHS of the comparison is a 0.0, we don't need to do the
3685  // subtraction at all.
3686  if (isFloatingPointZero(RHS))
3687    switch (CC) {
3688    default: break;       // SETUO etc aren't handled by fsel.
3689    case ISD::SETULT:
3690    case ISD::SETLT:
3691      std::swap(TV, FV);  // fsel is natively setge, swap operands for setlt
3692    case ISD::SETOGE:
3693    case ISD::SETGE:
3694      if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
3695        LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
3696      return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV);
3697    case ISD::SETUGT:
3698    case ISD::SETGT:
3699      std::swap(TV, FV);  // fsel is natively setge, swap operands for setlt
3700    case ISD::SETOLE:
3701    case ISD::SETLE:
3702      if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
3703        LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
3704      return DAG.getNode(PPCISD::FSEL, dl, ResVT,
3705                         DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV);
3706    }
3707
3708  SDValue Cmp;
3709  switch (CC) {
3710  default: break;       // SETUO etc aren't handled by fsel.
3711  case ISD::SETULT:
3712  case ISD::SETLT:
3713    Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS);
3714    if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
3715      Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
3716      return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
3717  case ISD::SETOGE:
3718  case ISD::SETGE:
3719    Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS);
3720    if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
3721      Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
3722      return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
3723  case ISD::SETUGT:
3724  case ISD::SETGT:
3725    Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS);
3726    if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
3727      Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
3728      return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
3729  case ISD::SETOLE:
3730  case ISD::SETLE:
3731    Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS);
3732    if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
3733      Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
3734      return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
3735  }
3736  return Op;
3737}
3738
3739// FIXME: Split this code up when LegalizeDAGTypes lands.
3740SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
3741                                           DebugLoc dl) const {
3742  assert(Op.getOperand(0).getValueType().isFloatingPoint());
3743  SDValue Src = Op.getOperand(0);
3744  if (Src.getValueType() == MVT::f32)
3745    Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
3746
3747  SDValue Tmp;
3748  switch (Op.getValueType().getSimpleVT().SimpleTy) {
3749  default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!");
3750  case MVT::i32:
3751    Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIWZ :
3752                                                         PPCISD::FCTIDZ,
3753                      dl, MVT::f64, Src);
3754    break;
3755  case MVT::i64:
3756    Tmp = DAG.getNode(PPCISD::FCTIDZ, dl, MVT::f64, Src);
3757    break;
3758  }
3759
3760  // Convert the FP value to an int value through memory.
3761  SDValue FIPtr = DAG.CreateStackTemporary(MVT::f64);
3762
3763  // Emit a store to the stack slot.
3764  SDValue Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr,
3765                               MachinePointerInfo(), false, false, 0);
3766
3767  // Result is a load from the stack slot.  If loading 4 bytes, make sure to
3768  // add in a bias.
3769  if (Op.getValueType() == MVT::i32)
3770    FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr,
3771                        DAG.getConstant(4, FIPtr.getValueType()));
3772  return DAG.getLoad(Op.getValueType(), dl, Chain, FIPtr, MachinePointerInfo(),
3773                     false, false, false, 0);
3774}
3775
3776SDValue PPCTargetLowering::LowerSINT_TO_FP(SDValue Op,
3777                                           SelectionDAG &DAG) const {
3778  DebugLoc dl = Op.getDebugLoc();
3779  // Don't handle ppc_fp128 here; let it be lowered to a libcall.
3780  if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
3781    return SDValue();
3782
3783  if (Op.getOperand(0).getValueType() == MVT::i64) {
3784    SDValue Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op.getOperand(0));
3785    SDValue FP = DAG.getNode(PPCISD::FCFID, dl, MVT::f64, Bits);
3786    if (Op.getValueType() == MVT::f32)
3787      FP = DAG.getNode(ISD::FP_ROUND, dl,
3788                       MVT::f32, FP, DAG.getIntPtrConstant(0));
3789    return FP;
3790  }
3791
3792  assert(Op.getOperand(0).getValueType() == MVT::i32 &&
3793         "Unhandled SINT_TO_FP type in custom expander!");
3794  // Since we only generate this in 64-bit mode, we can take advantage of
3795  // 64-bit registers.  In particular, sign extend the input value into the
3796  // 64-bit register with extsw, store the WHOLE 64-bit value into the stack
3797  // then lfd it and fcfid it.
3798  MachineFunction &MF = DAG.getMachineFunction();
3799  MachineFrameInfo *FrameInfo = MF.getFrameInfo();
3800  int FrameIdx = FrameInfo->CreateStackObject(8, 8, false);
3801  EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
3802  SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
3803
3804  SDValue Ext64 = DAG.getNode(PPCISD::EXTSW_32, dl, MVT::i32,
3805                                Op.getOperand(0));
3806
3807  // STD the extended value into the stack slot.
3808  MachineMemOperand *MMO =
3809    MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FrameIdx),
3810                            MachineMemOperand::MOStore, 8, 8);
3811  SDValue Ops[] = { DAG.getEntryNode(), Ext64, FIdx };
3812  SDValue Store =
3813    DAG.getMemIntrinsicNode(PPCISD::STD_32, dl, DAG.getVTList(MVT::Other),
3814                            Ops, 4, MVT::i64, MMO);
3815  // Load the value as a double.
3816  SDValue Ld = DAG.getLoad(MVT::f64, dl, Store, FIdx, MachinePointerInfo(),
3817                           false, false, false, 0);
3818
3819  // FCFID it and return it.
3820  SDValue FP = DAG.getNode(PPCISD::FCFID, dl, MVT::f64, Ld);
3821  if (Op.getValueType() == MVT::f32)
3822    FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP, DAG.getIntPtrConstant(0));
3823  return FP;
3824}
3825
3826SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
3827                                            SelectionDAG &DAG) const {
3828  DebugLoc dl = Op.getDebugLoc();
3829  /*
3830   The rounding mode is in bits 30:31 of FPSR, and has the following
3831   settings:
3832     00 Round to nearest
3833     01 Round to 0
3834     10 Round to +inf
3835     11 Round to -inf
3836
3837  FLT_ROUNDS, on the other hand, expects the following:
3838    -1 Undefined
3839     0 Round to 0
3840     1 Round to nearest
3841     2 Round to +inf
3842     3 Round to -inf
3843
3844  To perform the conversion, we do:
3845    ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1))
3846  */
3847
3848  MachineFunction &MF = DAG.getMachineFunction();
3849  EVT VT = Op.getValueType();
3850  EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
3851  std::vector<EVT> NodeTys;
3852  SDValue MFFSreg, InFlag;
3853
3854  // Save FP Control Word to register
3855  NodeTys.push_back(MVT::f64);    // return register
3856  NodeTys.push_back(MVT::Glue);   // unused in this context
3857  SDValue Chain = DAG.getNode(PPCISD::MFFS, dl, NodeTys, &InFlag, 0);
3858
3859  // Save FP register to stack slot
3860  int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8, false);
3861  SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
3862  SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Chain,
3863                               StackSlot, MachinePointerInfo(), false, false,0);
3864
3865  // Load FP Control Word from low 32 bits of stack slot.
3866  SDValue Four = DAG.getConstant(4, PtrVT);
3867  SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four);
3868  SDValue CWD = DAG.getLoad(MVT::i32, dl, Store, Addr, MachinePointerInfo(),
3869                            false, false, false, 0);
3870
3871  // Transform as necessary
3872  SDValue CWD1 =
3873    DAG.getNode(ISD::AND, dl, MVT::i32,
3874                CWD, DAG.getConstant(3, MVT::i32));
3875  SDValue CWD2 =
3876    DAG.getNode(ISD::SRL, dl, MVT::i32,
3877                DAG.getNode(ISD::AND, dl, MVT::i32,
3878                            DAG.getNode(ISD::XOR, dl, MVT::i32,
3879                                        CWD, DAG.getConstant(3, MVT::i32)),
3880                            DAG.getConstant(3, MVT::i32)),
3881                DAG.getConstant(1, MVT::i32));
3882
3883  SDValue RetVal =
3884    DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2);
3885
3886  return DAG.getNode((VT.getSizeInBits() < 16 ?
3887                      ISD::TRUNCATE : ISD::ZERO_EXTEND), dl, VT, RetVal);
3888}
3889
3890SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const {
3891  EVT VT = Op.getValueType();
3892  unsigned BitWidth = VT.getSizeInBits();
3893  DebugLoc dl = Op.getDebugLoc();
3894  assert(Op.getNumOperands() == 3 &&
3895         VT == Op.getOperand(1).getValueType() &&
3896         "Unexpected SHL!");
3897
3898  // Expand into a bunch of logical ops.  Note that these ops
3899  // depend on the PPC behavior for oversized shift amounts.
3900  SDValue Lo = Op.getOperand(0);
3901  SDValue Hi = Op.getOperand(1);
3902  SDValue Amt = Op.getOperand(2);
3903  EVT AmtVT = Amt.getValueType();
3904
3905  SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
3906                             DAG.getConstant(BitWidth, AmtVT), Amt);
3907  SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt);
3908  SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1);
3909  SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3);
3910  SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
3911                             DAG.getConstant(-BitWidth, AmtVT));
3912  SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5);
3913  SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
3914  SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt);
3915  SDValue OutOps[] = { OutLo, OutHi };
3916  return DAG.getMergeValues(OutOps, 2, dl);
3917}
3918
3919SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const {
3920  EVT VT = Op.getValueType();
3921  DebugLoc dl = Op.getDebugLoc();
3922  unsigned BitWidth = VT.getSizeInBits();
3923  assert(Op.getNumOperands() == 3 &&
3924         VT == Op.getOperand(1).getValueType() &&
3925         "Unexpected SRL!");
3926
3927  // Expand into a bunch of logical ops.  Note that these ops
3928  // depend on the PPC behavior for oversized shift amounts.
3929  SDValue Lo = Op.getOperand(0);
3930  SDValue Hi = Op.getOperand(1);
3931  SDValue Amt = Op.getOperand(2);
3932  EVT AmtVT = Amt.getValueType();
3933
3934  SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
3935                             DAG.getConstant(BitWidth, AmtVT), Amt);
3936  SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
3937  SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
3938  SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
3939  SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
3940                             DAG.getConstant(-BitWidth, AmtVT));
3941  SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5);
3942  SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
3943  SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt);
3944  SDValue OutOps[] = { OutLo, OutHi };
3945  return DAG.getMergeValues(OutOps, 2, dl);
3946}
3947
3948SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const {
3949  DebugLoc dl = Op.getDebugLoc();
3950  EVT VT = Op.getValueType();
3951  unsigned BitWidth = VT.getSizeInBits();
3952  assert(Op.getNumOperands() == 3 &&
3953         VT == Op.getOperand(1).getValueType() &&
3954         "Unexpected SRA!");
3955
3956  // Expand into a bunch of logical ops, followed by a select_cc.
3957  SDValue Lo = Op.getOperand(0);
3958  SDValue Hi = Op.getOperand(1);
3959  SDValue Amt = Op.getOperand(2);
3960  EVT AmtVT = Amt.getValueType();
3961
3962  SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
3963                             DAG.getConstant(BitWidth, AmtVT), Amt);
3964  SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
3965  SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
3966  SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
3967  SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
3968                             DAG.getConstant(-BitWidth, AmtVT));
3969  SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5);
3970  SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt);
3971  SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, AmtVT),
3972                                  Tmp4, Tmp6, ISD::SETLE);
3973  SDValue OutOps[] = { OutLo, OutHi };
3974  return DAG.getMergeValues(OutOps, 2, dl);
3975}
3976
3977//===----------------------------------------------------------------------===//
3978// Vector related lowering.
3979//
3980
3981/// BuildSplatI - Build a canonical splati of Val with an element size of
3982/// SplatSize.  Cast the result to VT.
3983static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT,
3984                             SelectionDAG &DAG, DebugLoc dl) {
3985  assert(Val >= -16 && Val <= 15 && "vsplti is out of range!");
3986
3987  static const EVT VTys[] = { // canonical VT to use for each size.
3988    MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32
3989  };
3990
3991  EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1];
3992
3993  // Force vspltis[hw] -1 to vspltisb -1 to canonicalize.
3994  if (Val == -1)
3995    SplatSize = 1;
3996
3997  EVT CanonicalVT = VTys[SplatSize-1];
3998
3999  // Build a canonical splat for this value.
4000  SDValue Elt = DAG.getConstant(Val, MVT::i32);
4001  SmallVector<SDValue, 8> Ops;
4002  Ops.assign(CanonicalVT.getVectorNumElements(), Elt);
4003  SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, dl, CanonicalVT,
4004                              &Ops[0], Ops.size());
4005  return DAG.getNode(ISD::BITCAST, dl, ReqVT, Res);
4006}
4007
4008/// BuildIntrinsicOp - Return a binary operator intrinsic node with the
4009/// specified intrinsic ID.
4010static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS,
4011                                SelectionDAG &DAG, DebugLoc dl,
4012                                EVT DestVT = MVT::Other) {
4013  if (DestVT == MVT::Other) DestVT = LHS.getValueType();
4014  return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
4015                     DAG.getConstant(IID, MVT::i32), LHS, RHS);
4016}
4017
4018/// BuildIntrinsicOp - Return a ternary operator intrinsic node with the
4019/// specified intrinsic ID.
4020static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1,
4021                                SDValue Op2, SelectionDAG &DAG,
4022                                DebugLoc dl, EVT DestVT = MVT::Other) {
4023  if (DestVT == MVT::Other) DestVT = Op0.getValueType();
4024  return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
4025                     DAG.getConstant(IID, MVT::i32), Op0, Op1, Op2);
4026}
4027
4028
4029/// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified
4030/// amount.  The result has the specified value type.
4031static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt,
4032                             EVT VT, SelectionDAG &DAG, DebugLoc dl) {
4033  // Force LHS/RHS to be the right type.
4034  LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS);
4035  RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS);
4036
4037  int Ops[16];
4038  for (unsigned i = 0; i != 16; ++i)
4039    Ops[i] = i + Amt;
4040  SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops);
4041  return DAG.getNode(ISD::BITCAST, dl, VT, T);
4042}
4043
4044// If this is a case we can't handle, return null and let the default
4045// expansion code take care of it.  If we CAN select this case, and if it
4046// selects to a single instruction, return Op.  Otherwise, if we can codegen
4047// this case more efficiently than a constant pool load, lower it to the
4048// sequence of ops that should be used.
4049SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
4050                                             SelectionDAG &DAG) const {
4051  DebugLoc dl = Op.getDebugLoc();
4052  BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
4053  assert(BVN != 0 && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR");
4054
4055  // Check if this is a splat of a constant value.
4056  APInt APSplatBits, APSplatUndef;
4057  unsigned SplatBitSize;
4058  bool HasAnyUndefs;
4059  if (! BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize,
4060                             HasAnyUndefs, 0, true) || SplatBitSize > 32)
4061    return SDValue();
4062
4063  unsigned SplatBits = APSplatBits.getZExtValue();
4064  unsigned SplatUndef = APSplatUndef.getZExtValue();
4065  unsigned SplatSize = SplatBitSize / 8;
4066
4067  // First, handle single instruction cases.
4068
4069  // All zeros?
4070  if (SplatBits == 0) {
4071    // Canonicalize all zero vectors to be v4i32.
4072    if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) {
4073      SDValue Z = DAG.getConstant(0, MVT::i32);
4074      Z = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Z, Z, Z, Z);
4075      Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z);
4076    }
4077    return Op;
4078  }
4079
4080  // If the sign extended value is in the range [-16,15], use VSPLTI[bhw].
4081  int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >>
4082                    (32-SplatBitSize));
4083  if (SextVal >= -16 && SextVal <= 15)
4084    return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG, dl);
4085
4086
4087  // Two instruction sequences.
4088
4089  // If this value is in the range [-32,30] and is even, use:
4090  //    tmp = VSPLTI[bhw], result = add tmp, tmp
4091  if (SextVal >= -32 && SextVal <= 30 && (SextVal & 1) == 0) {
4092    SDValue Res = BuildSplatI(SextVal >> 1, SplatSize, MVT::Other, DAG, dl);
4093    Res = DAG.getNode(ISD::ADD, dl, Res.getValueType(), Res, Res);
4094    return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
4095  }
4096
4097  // If this is 0x8000_0000 x 4, turn into vspltisw + vslw.  If it is
4098  // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000).  This is important
4099  // for fneg/fabs.
4100  if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) {
4101    // Make -1 and vspltisw -1:
4102    SDValue OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG, dl);
4103
4104    // Make the VSLW intrinsic, computing 0x8000_0000.
4105    SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV,
4106                                   OnesV, DAG, dl);
4107
4108    // xor by OnesV to invert it.
4109    Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV);
4110    return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
4111  }
4112
4113  // Check to see if this is a wide variety of vsplti*, binop self cases.
4114  static const signed char SplatCsts[] = {
4115    -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7,
4116    -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16
4117  };
4118
4119  for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) {
4120    // Indirect through the SplatCsts array so that we favor 'vsplti -1' for
4121    // cases which are ambiguous (e.g. formation of 0x8000_0000).  'vsplti -1'
4122    int i = SplatCsts[idx];
4123
4124    // Figure out what shift amount will be used by altivec if shifted by i in
4125    // this splat size.
4126    unsigned TypeShiftAmt = i & (SplatBitSize-1);
4127
4128    // vsplti + shl self.
4129    if (SextVal == (i << (int)TypeShiftAmt)) {
4130      SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
4131      static const unsigned IIDs[] = { // Intrinsic to use for each size.
4132        Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0,
4133        Intrinsic::ppc_altivec_vslw
4134      };
4135      Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
4136      return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
4137    }
4138
4139    // vsplti + srl self.
4140    if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
4141      SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
4142      static const unsigned IIDs[] = { // Intrinsic to use for each size.
4143        Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0,
4144        Intrinsic::ppc_altivec_vsrw
4145      };
4146      Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
4147      return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
4148    }
4149
4150    // vsplti + sra self.
4151    if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
4152      SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
4153      static const unsigned IIDs[] = { // Intrinsic to use for each size.
4154        Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0,
4155        Intrinsic::ppc_altivec_vsraw
4156      };
4157      Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
4158      return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
4159    }
4160
4161    // vsplti + rol self.
4162    if (SextVal == (int)(((unsigned)i << TypeShiftAmt) |
4163                         ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) {
4164      SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
4165      static const unsigned IIDs[] = { // Intrinsic to use for each size.
4166        Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0,
4167        Intrinsic::ppc_altivec_vrlw
4168      };
4169      Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
4170      return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
4171    }
4172
4173    // t = vsplti c, result = vsldoi t, t, 1
4174    if (SextVal == ((i << 8) | (i < 0 ? 0xFF : 0))) {
4175      SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl);
4176      return BuildVSLDOI(T, T, 1, Op.getValueType(), DAG, dl);
4177    }
4178    // t = vsplti c, result = vsldoi t, t, 2
4179    if (SextVal == ((i << 16) | (i < 0 ? 0xFFFF : 0))) {
4180      SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl);
4181      return BuildVSLDOI(T, T, 2, Op.getValueType(), DAG, dl);
4182    }
4183    // t = vsplti c, result = vsldoi t, t, 3
4184    if (SextVal == ((i << 24) | (i < 0 ? 0xFFFFFF : 0))) {
4185      SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl);
4186      return BuildVSLDOI(T, T, 3, Op.getValueType(), DAG, dl);
4187    }
4188  }
4189
4190  // Three instruction sequences.
4191
4192  // Odd, in range [17,31]:  (vsplti C)-(vsplti -16).
4193  if (SextVal >= 0 && SextVal <= 31) {
4194    SDValue LHS = BuildSplatI(SextVal-16, SplatSize, MVT::Other, DAG, dl);
4195    SDValue RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG, dl);
4196    LHS = DAG.getNode(ISD::SUB, dl, LHS.getValueType(), LHS, RHS);
4197    return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), LHS);
4198  }
4199  // Odd, in range [-31,-17]:  (vsplti C)+(vsplti -16).
4200  if (SextVal >= -31 && SextVal <= 0) {
4201    SDValue LHS = BuildSplatI(SextVal+16, SplatSize, MVT::Other, DAG, dl);
4202    SDValue RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG, dl);
4203    LHS = DAG.getNode(ISD::ADD, dl, LHS.getValueType(), LHS, RHS);
4204    return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), LHS);
4205  }
4206
4207  return SDValue();
4208}
4209
4210/// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
4211/// the specified operations to build the shuffle.
4212static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
4213                                      SDValue RHS, SelectionDAG &DAG,
4214                                      DebugLoc dl) {
4215  unsigned OpNum = (PFEntry >> 26) & 0x0F;
4216  unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
4217  unsigned RHSID = (PFEntry >>  0) & ((1 << 13)-1);
4218
4219  enum {
4220    OP_COPY = 0,  // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
4221    OP_VMRGHW,
4222    OP_VMRGLW,
4223    OP_VSPLTISW0,
4224    OP_VSPLTISW1,
4225    OP_VSPLTISW2,
4226    OP_VSPLTISW3,
4227    OP_VSLDOI4,
4228    OP_VSLDOI8,
4229    OP_VSLDOI12
4230  };
4231
4232  if (OpNum == OP_COPY) {
4233    if (LHSID == (1*9+2)*9+3) return LHS;
4234    assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!");
4235    return RHS;
4236  }
4237
4238  SDValue OpLHS, OpRHS;
4239  OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl);
4240  OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl);
4241
4242  int ShufIdxs[16];
4243  switch (OpNum) {
4244  default: llvm_unreachable("Unknown i32 permute!");
4245  case OP_VMRGHW:
4246    ShufIdxs[ 0] =  0; ShufIdxs[ 1] =  1; ShufIdxs[ 2] =  2; ShufIdxs[ 3] =  3;
4247    ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19;
4248    ShufIdxs[ 8] =  4; ShufIdxs[ 9] =  5; ShufIdxs[10] =  6; ShufIdxs[11] =  7;
4249    ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23;
4250    break;
4251  case OP_VMRGLW:
4252    ShufIdxs[ 0] =  8; ShufIdxs[ 1] =  9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11;
4253    ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27;
4254    ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15;
4255    ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31;
4256    break;
4257  case OP_VSPLTISW0:
4258    for (unsigned i = 0; i != 16; ++i)
4259      ShufIdxs[i] = (i&3)+0;
4260    break;
4261  case OP_VSPLTISW1:
4262    for (unsigned i = 0; i != 16; ++i)
4263      ShufIdxs[i] = (i&3)+4;
4264    break;
4265  case OP_VSPLTISW2:
4266    for (unsigned i = 0; i != 16; ++i)
4267      ShufIdxs[i] = (i&3)+8;
4268    break;
4269  case OP_VSPLTISW3:
4270    for (unsigned i = 0; i != 16; ++i)
4271      ShufIdxs[i] = (i&3)+12;
4272    break;
4273  case OP_VSLDOI4:
4274    return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl);
4275  case OP_VSLDOI8:
4276    return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl);
4277  case OP_VSLDOI12:
4278    return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl);
4279  }
4280  EVT VT = OpLHS.getValueType();
4281  OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS);
4282  OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS);
4283  SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs);
4284  return DAG.getNode(ISD::BITCAST, dl, VT, T);
4285}
4286
4287/// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE.  If this
4288/// is a shuffle we can handle in a single instruction, return it.  Otherwise,
4289/// return the code it can be lowered into.  Worst case, it can always be
4290/// lowered into a vperm.
4291SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
4292                                               SelectionDAG &DAG) const {
4293  DebugLoc dl = Op.getDebugLoc();
4294  SDValue V1 = Op.getOperand(0);
4295  SDValue V2 = Op.getOperand(1);
4296  ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
4297  EVT VT = Op.getValueType();
4298
4299  // Cases that are handled by instructions that take permute immediates
4300  // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be
4301  // selected by the instruction selector.
4302  if (V2.getOpcode() == ISD::UNDEF) {
4303    if (PPC::isSplatShuffleMask(SVOp, 1) ||
4304        PPC::isSplatShuffleMask(SVOp, 2) ||
4305        PPC::isSplatShuffleMask(SVOp, 4) ||
4306        PPC::isVPKUWUMShuffleMask(SVOp, true) ||
4307        PPC::isVPKUHUMShuffleMask(SVOp, true) ||
4308        PPC::isVSLDOIShuffleMask(SVOp, true) != -1 ||
4309        PPC::isVMRGLShuffleMask(SVOp, 1, true) ||
4310        PPC::isVMRGLShuffleMask(SVOp, 2, true) ||
4311        PPC::isVMRGLShuffleMask(SVOp, 4, true) ||
4312        PPC::isVMRGHShuffleMask(SVOp, 1, true) ||
4313        PPC::isVMRGHShuffleMask(SVOp, 2, true) ||
4314        PPC::isVMRGHShuffleMask(SVOp, 4, true)) {
4315      return Op;
4316    }
4317  }
4318
4319  // Altivec has a variety of "shuffle immediates" that take two vector inputs
4320  // and produce a fixed permutation.  If any of these match, do not lower to
4321  // VPERM.
4322  if (PPC::isVPKUWUMShuffleMask(SVOp, false) ||
4323      PPC::isVPKUHUMShuffleMask(SVOp, false) ||
4324      PPC::isVSLDOIShuffleMask(SVOp, false) != -1 ||
4325      PPC::isVMRGLShuffleMask(SVOp, 1, false) ||
4326      PPC::isVMRGLShuffleMask(SVOp, 2, false) ||
4327      PPC::isVMRGLShuffleMask(SVOp, 4, false) ||
4328      PPC::isVMRGHShuffleMask(SVOp, 1, false) ||
4329      PPC::isVMRGHShuffleMask(SVOp, 2, false) ||
4330      PPC::isVMRGHShuffleMask(SVOp, 4, false))
4331    return Op;
4332
4333  // Check to see if this is a shuffle of 4-byte values.  If so, we can use our
4334  // perfect shuffle table to emit an optimal matching sequence.
4335  ArrayRef<int> PermMask = SVOp->getMask();
4336
4337  unsigned PFIndexes[4];
4338  bool isFourElementShuffle = true;
4339  for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number
4340    unsigned EltNo = 8;   // Start out undef.
4341    for (unsigned j = 0; j != 4; ++j) {  // Intra-element byte.
4342      if (PermMask[i*4+j] < 0)
4343        continue;   // Undef, ignore it.
4344
4345      unsigned ByteSource = PermMask[i*4+j];
4346      if ((ByteSource & 3) != j) {
4347        isFourElementShuffle = false;
4348        break;
4349      }
4350
4351      if (EltNo == 8) {
4352        EltNo = ByteSource/4;
4353      } else if (EltNo != ByteSource/4) {
4354        isFourElementShuffle = false;
4355        break;
4356      }
4357    }
4358    PFIndexes[i] = EltNo;
4359  }
4360
4361  // If this shuffle can be expressed as a shuffle of 4-byte elements, use the
4362  // perfect shuffle vector to determine if it is cost effective to do this as
4363  // discrete instructions, or whether we should use a vperm.
4364  if (isFourElementShuffle) {
4365    // Compute the index in the perfect shuffle table.
4366    unsigned PFTableIndex =
4367      PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
4368
4369    unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
4370    unsigned Cost  = (PFEntry >> 30);
4371
4372    // Determining when to avoid vperm is tricky.  Many things affect the cost
4373    // of vperm, particularly how many times the perm mask needs to be computed.
4374    // For example, if the perm mask can be hoisted out of a loop or is already
4375    // used (perhaps because there are multiple permutes with the same shuffle
4376    // mask?) the vperm has a cost of 1.  OTOH, hoisting the permute mask out of
4377    // the loop requires an extra register.
4378    //
4379    // As a compromise, we only emit discrete instructions if the shuffle can be
4380    // generated in 3 or fewer operations.  When we have loop information
4381    // available, if this block is within a loop, we should avoid using vperm
4382    // for 3-operation perms and use a constant pool load instead.
4383    if (Cost < 3)
4384      return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl);
4385  }
4386
4387  // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant
4388  // vector that will get spilled to the constant pool.
4389  if (V2.getOpcode() == ISD::UNDEF) V2 = V1;
4390
4391  // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except
4392  // that it is in input element units, not in bytes.  Convert now.
4393  EVT EltVT = V1.getValueType().getVectorElementType();
4394  unsigned BytesPerElement = EltVT.getSizeInBits()/8;
4395
4396  SmallVector<SDValue, 16> ResultMask;
4397  for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
4398    unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i];
4399
4400    for (unsigned j = 0; j != BytesPerElement; ++j)
4401      ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j,
4402                                           MVT::i32));
4403  }
4404
4405  SDValue VPermMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8,
4406                                    &ResultMask[0], ResultMask.size());
4407  return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), V1, V2, VPermMask);
4408}
4409
4410/// getAltivecCompareInfo - Given an intrinsic, return false if it is not an
4411/// altivec comparison.  If it is, return true and fill in Opc/isDot with
4412/// information about the intrinsic.
4413static bool getAltivecCompareInfo(SDValue Intrin, int &CompareOpc,
4414                                  bool &isDot) {
4415  unsigned IntrinsicID =
4416    cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue();
4417  CompareOpc = -1;
4418  isDot = false;
4419  switch (IntrinsicID) {
4420  default: return false;
4421    // Comparison predicates.
4422  case Intrinsic::ppc_altivec_vcmpbfp_p:  CompareOpc = 966; isDot = 1; break;
4423  case Intrinsic::ppc_altivec_vcmpeqfp_p: CompareOpc = 198; isDot = 1; break;
4424  case Intrinsic::ppc_altivec_vcmpequb_p: CompareOpc =   6; isDot = 1; break;
4425  case Intrinsic::ppc_altivec_vcmpequh_p: CompareOpc =  70; isDot = 1; break;
4426  case Intrinsic::ppc_altivec_vcmpequw_p: CompareOpc = 134; isDot = 1; break;
4427  case Intrinsic::ppc_altivec_vcmpgefp_p: CompareOpc = 454; isDot = 1; break;
4428  case Intrinsic::ppc_altivec_vcmpgtfp_p: CompareOpc = 710; isDot = 1; break;
4429  case Intrinsic::ppc_altivec_vcmpgtsb_p: CompareOpc = 774; isDot = 1; break;
4430  case Intrinsic::ppc_altivec_vcmpgtsh_p: CompareOpc = 838; isDot = 1; break;
4431  case Intrinsic::ppc_altivec_vcmpgtsw_p: CompareOpc = 902; isDot = 1; break;
4432  case Intrinsic::ppc_altivec_vcmpgtub_p: CompareOpc = 518; isDot = 1; break;
4433  case Intrinsic::ppc_altivec_vcmpgtuh_p: CompareOpc = 582; isDot = 1; break;
4434  case Intrinsic::ppc_altivec_vcmpgtuw_p: CompareOpc = 646; isDot = 1; break;
4435
4436    // Normal Comparisons.
4437  case Intrinsic::ppc_altivec_vcmpbfp:    CompareOpc = 966; isDot = 0; break;
4438  case Intrinsic::ppc_altivec_vcmpeqfp:   CompareOpc = 198; isDot = 0; break;
4439  case Intrinsic::ppc_altivec_vcmpequb:   CompareOpc =   6; isDot = 0; break;
4440  case Intrinsic::ppc_altivec_vcmpequh:   CompareOpc =  70; isDot = 0; break;
4441  case Intrinsic::ppc_altivec_vcmpequw:   CompareOpc = 134; isDot = 0; break;
4442  case Intrinsic::ppc_altivec_vcmpgefp:   CompareOpc = 454; isDot = 0; break;
4443  case Intrinsic::ppc_altivec_vcmpgtfp:   CompareOpc = 710; isDot = 0; break;
4444  case Intrinsic::ppc_altivec_vcmpgtsb:   CompareOpc = 774; isDot = 0; break;
4445  case Intrinsic::ppc_altivec_vcmpgtsh:   CompareOpc = 838; isDot = 0; break;
4446  case Intrinsic::ppc_altivec_vcmpgtsw:   CompareOpc = 902; isDot = 0; break;
4447  case Intrinsic::ppc_altivec_vcmpgtub:   CompareOpc = 518; isDot = 0; break;
4448  case Intrinsic::ppc_altivec_vcmpgtuh:   CompareOpc = 582; isDot = 0; break;
4449  case Intrinsic::ppc_altivec_vcmpgtuw:   CompareOpc = 646; isDot = 0; break;
4450  }
4451  return true;
4452}
4453
4454/// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom
4455/// lower, do it, otherwise return null.
4456SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
4457                                                   SelectionDAG &DAG) const {
4458  // If this is a lowered altivec predicate compare, CompareOpc is set to the
4459  // opcode number of the comparison.
4460  DebugLoc dl = Op.getDebugLoc();
4461  int CompareOpc;
4462  bool isDot;
4463  if (!getAltivecCompareInfo(Op, CompareOpc, isDot))
4464    return SDValue();    // Don't custom lower most intrinsics.
4465
4466  // If this is a non-dot comparison, make the VCMP node and we are done.
4467  if (!isDot) {
4468    SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(),
4469                              Op.getOperand(1), Op.getOperand(2),
4470                              DAG.getConstant(CompareOpc, MVT::i32));
4471    return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp);
4472  }
4473
4474  // Create the PPCISD altivec 'dot' comparison node.
4475  SDValue Ops[] = {
4476    Op.getOperand(2),  // LHS
4477    Op.getOperand(3),  // RHS
4478    DAG.getConstant(CompareOpc, MVT::i32)
4479  };
4480  std::vector<EVT> VTs;
4481  VTs.push_back(Op.getOperand(2).getValueType());
4482  VTs.push_back(MVT::Glue);
4483  SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops, 3);
4484
4485  // Now that we have the comparison, emit a copy from the CR to a GPR.
4486  // This is flagged to the above dot comparison.
4487  SDValue Flags = DAG.getNode(PPCISD::MFCR, dl, MVT::i32,
4488                                DAG.getRegister(PPC::CR6, MVT::i32),
4489                                CompNode.getValue(1));
4490
4491  // Unpack the result based on how the target uses it.
4492  unsigned BitNo;   // Bit # of CR6.
4493  bool InvertBit;   // Invert result?
4494  switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) {
4495  default:  // Can't happen, don't crash on invalid number though.
4496  case 0:   // Return the value of the EQ bit of CR6.
4497    BitNo = 0; InvertBit = false;
4498    break;
4499  case 1:   // Return the inverted value of the EQ bit of CR6.
4500    BitNo = 0; InvertBit = true;
4501    break;
4502  case 2:   // Return the value of the LT bit of CR6.
4503    BitNo = 2; InvertBit = false;
4504    break;
4505  case 3:   // Return the inverted value of the LT bit of CR6.
4506    BitNo = 2; InvertBit = true;
4507    break;
4508  }
4509
4510  // Shift the bit into the low position.
4511  Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags,
4512                      DAG.getConstant(8-(3-BitNo), MVT::i32));
4513  // Isolate the bit.
4514  Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags,
4515                      DAG.getConstant(1, MVT::i32));
4516
4517  // If we are supposed to, toggle the bit.
4518  if (InvertBit)
4519    Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags,
4520                        DAG.getConstant(1, MVT::i32));
4521  return Flags;
4522}
4523
4524SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op,
4525                                                   SelectionDAG &DAG) const {
4526  DebugLoc dl = Op.getDebugLoc();
4527  // Create a stack slot that is 16-byte aligned.
4528  MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
4529  int FrameIdx = FrameInfo->CreateStackObject(16, 16, false);
4530  EVT PtrVT = getPointerTy();
4531  SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
4532
4533  // Store the input value into Value#0 of the stack slot.
4534  SDValue Store = DAG.getStore(DAG.getEntryNode(), dl,
4535                               Op.getOperand(0), FIdx, MachinePointerInfo(),
4536                               false, false, 0);
4537  // Load it out.
4538  return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo(),
4539                     false, false, false, 0);
4540}
4541
4542SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
4543  DebugLoc dl = Op.getDebugLoc();
4544  if (Op.getValueType() == MVT::v4i32) {
4545    SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
4546
4547    SDValue Zero  = BuildSplatI(  0, 1, MVT::v4i32, DAG, dl);
4548    SDValue Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG, dl);//+16 as shift amt.
4549
4550    SDValue RHSSwap =   // = vrlw RHS, 16
4551      BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl);
4552
4553    // Shrinkify inputs to v8i16.
4554    LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS);
4555    RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS);
4556    RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap);
4557
4558    // Low parts multiplied together, generating 32-bit results (we ignore the
4559    // top parts).
4560    SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh,
4561                                        LHS, RHS, DAG, dl, MVT::v4i32);
4562
4563    SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm,
4564                                      LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32);
4565    // Shift the high parts up 16 bits.
4566    HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd,
4567                              Neg16, DAG, dl);
4568    return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd);
4569  } else if (Op.getValueType() == MVT::v8i16) {
4570    SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
4571
4572    SDValue Zero = BuildSplatI(0, 1, MVT::v8i16, DAG, dl);
4573
4574    return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm,
4575                            LHS, RHS, Zero, DAG, dl);
4576  } else if (Op.getValueType() == MVT::v16i8) {
4577    SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
4578
4579    // Multiply the even 8-bit parts, producing 16-bit sums.
4580    SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub,
4581                                           LHS, RHS, DAG, dl, MVT::v8i16);
4582    EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts);
4583
4584    // Multiply the odd 8-bit parts, producing 16-bit sums.
4585    SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub,
4586                                          LHS, RHS, DAG, dl, MVT::v8i16);
4587    OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts);
4588
4589    // Merge the results together.
4590    int Ops[16];
4591    for (unsigned i = 0; i != 8; ++i) {
4592      Ops[i*2  ] = 2*i+1;
4593      Ops[i*2+1] = 2*i+1+16;
4594    }
4595    return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops);
4596  } else {
4597    llvm_unreachable("Unknown mul to lower!");
4598  }
4599}
4600
4601/// LowerOperation - Provide custom lowering hooks for some operations.
4602///
4603SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
4604  switch (Op.getOpcode()) {
4605  default: llvm_unreachable("Wasn't expecting to be able to lower this!");
4606  case ISD::ConstantPool:       return LowerConstantPool(Op, DAG);
4607  case ISD::BlockAddress:       return LowerBlockAddress(Op, DAG);
4608  case ISD::GlobalAddress:      return LowerGlobalAddress(Op, DAG);
4609  case ISD::GlobalTLSAddress:   return LowerGlobalTLSAddress(Op, DAG);
4610  case ISD::JumpTable:          return LowerJumpTable(Op, DAG);
4611  case ISD::SETCC:              return LowerSETCC(Op, DAG);
4612  case ISD::INIT_TRAMPOLINE:    return LowerINIT_TRAMPOLINE(Op, DAG);
4613  case ISD::ADJUST_TRAMPOLINE:  return LowerADJUST_TRAMPOLINE(Op, DAG);
4614  case ISD::VASTART:
4615    return LowerVASTART(Op, DAG, PPCSubTarget);
4616
4617  case ISD::VAARG:
4618    return LowerVAARG(Op, DAG, PPCSubTarget);
4619
4620  case ISD::STACKRESTORE:       return LowerSTACKRESTORE(Op, DAG, PPCSubTarget);
4621  case ISD::DYNAMIC_STACKALLOC:
4622    return LowerDYNAMIC_STACKALLOC(Op, DAG, PPCSubTarget);
4623
4624  case ISD::SELECT_CC:          return LowerSELECT_CC(Op, DAG);
4625  case ISD::FP_TO_UINT:
4626  case ISD::FP_TO_SINT:         return LowerFP_TO_INT(Op, DAG,
4627                                                       Op.getDebugLoc());
4628  case ISD::SINT_TO_FP:         return LowerSINT_TO_FP(Op, DAG);
4629  case ISD::FLT_ROUNDS_:        return LowerFLT_ROUNDS_(Op, DAG);
4630
4631  // Lower 64-bit shifts.
4632  case ISD::SHL_PARTS:          return LowerSHL_PARTS(Op, DAG);
4633  case ISD::SRL_PARTS:          return LowerSRL_PARTS(Op, DAG);
4634  case ISD::SRA_PARTS:          return LowerSRA_PARTS(Op, DAG);
4635
4636  // Vector-related lowering.
4637  case ISD::BUILD_VECTOR:       return LowerBUILD_VECTOR(Op, DAG);
4638  case ISD::VECTOR_SHUFFLE:     return LowerVECTOR_SHUFFLE(Op, DAG);
4639  case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
4640  case ISD::SCALAR_TO_VECTOR:   return LowerSCALAR_TO_VECTOR(Op, DAG);
4641  case ISD::MUL:                return LowerMUL(Op, DAG);
4642
4643  // Frame & Return address.
4644  case ISD::RETURNADDR:         return LowerRETURNADDR(Op, DAG);
4645  case ISD::FRAMEADDR:          return LowerFRAMEADDR(Op, DAG);
4646  }
4647}
4648
4649void PPCTargetLowering::ReplaceNodeResults(SDNode *N,
4650                                           SmallVectorImpl<SDValue>&Results,
4651                                           SelectionDAG &DAG) const {
4652  const TargetMachine &TM = getTargetMachine();
4653  DebugLoc dl = N->getDebugLoc();
4654  switch (N->getOpcode()) {
4655  default:
4656    llvm_unreachable("Do not know how to custom type legalize this operation!");
4657  case ISD::VAARG: {
4658    if (!TM.getSubtarget<PPCSubtarget>().isSVR4ABI()
4659        || TM.getSubtarget<PPCSubtarget>().isPPC64())
4660      return;
4661
4662    EVT VT = N->getValueType(0);
4663
4664    if (VT == MVT::i64) {
4665      SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG, PPCSubTarget);
4666
4667      Results.push_back(NewNode);
4668      Results.push_back(NewNode.getValue(1));
4669    }
4670    return;
4671  }
4672  case ISD::FP_ROUND_INREG: {
4673    assert(N->getValueType(0) == MVT::ppcf128);
4674    assert(N->getOperand(0).getValueType() == MVT::ppcf128);
4675    SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
4676                             MVT::f64, N->getOperand(0),
4677                             DAG.getIntPtrConstant(0));
4678    SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
4679                             MVT::f64, N->getOperand(0),
4680                             DAG.getIntPtrConstant(1));
4681
4682    // This sequence changes FPSCR to do round-to-zero, adds the two halves
4683    // of the long double, and puts FPSCR back the way it was.  We do not
4684    // actually model FPSCR.
4685    std::vector<EVT> NodeTys;
4686    SDValue Ops[4], Result, MFFSreg, InFlag, FPreg;
4687
4688    NodeTys.push_back(MVT::f64);   // Return register
4689    NodeTys.push_back(MVT::Glue);    // Returns a flag for later insns
4690    Result = DAG.getNode(PPCISD::MFFS, dl, NodeTys, &InFlag, 0);
4691    MFFSreg = Result.getValue(0);
4692    InFlag = Result.getValue(1);
4693
4694    NodeTys.clear();
4695    NodeTys.push_back(MVT::Glue);   // Returns a flag
4696    Ops[0] = DAG.getConstant(31, MVT::i32);
4697    Ops[1] = InFlag;
4698    Result = DAG.getNode(PPCISD::MTFSB1, dl, NodeTys, Ops, 2);
4699    InFlag = Result.getValue(0);
4700
4701    NodeTys.clear();
4702    NodeTys.push_back(MVT::Glue);   // Returns a flag
4703    Ops[0] = DAG.getConstant(30, MVT::i32);
4704    Ops[1] = InFlag;
4705    Result = DAG.getNode(PPCISD::MTFSB0, dl, NodeTys, Ops, 2);
4706    InFlag = Result.getValue(0);
4707
4708    NodeTys.clear();
4709    NodeTys.push_back(MVT::f64);    // result of add
4710    NodeTys.push_back(MVT::Glue);   // Returns a flag
4711    Ops[0] = Lo;
4712    Ops[1] = Hi;
4713    Ops[2] = InFlag;
4714    Result = DAG.getNode(PPCISD::FADDRTZ, dl, NodeTys, Ops, 3);
4715    FPreg = Result.getValue(0);
4716    InFlag = Result.getValue(1);
4717
4718    NodeTys.clear();
4719    NodeTys.push_back(MVT::f64);
4720    Ops[0] = DAG.getConstant(1, MVT::i32);
4721    Ops[1] = MFFSreg;
4722    Ops[2] = FPreg;
4723    Ops[3] = InFlag;
4724    Result = DAG.getNode(PPCISD::MTFSF, dl, NodeTys, Ops, 4);
4725    FPreg = Result.getValue(0);
4726
4727    // We know the low half is about to be thrown away, so just use something
4728    // convenient.
4729    Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::ppcf128,
4730                                FPreg, FPreg));
4731    return;
4732  }
4733  case ISD::FP_TO_SINT:
4734    Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl));
4735    return;
4736  }
4737}
4738
4739
4740//===----------------------------------------------------------------------===//
4741//  Other Lowering Code
4742//===----------------------------------------------------------------------===//
4743
4744MachineBasicBlock *
4745PPCTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
4746                                    bool is64bit, unsigned BinOpcode) const {
4747  // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
4748  const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
4749
4750  const BasicBlock *LLVM_BB = BB->getBasicBlock();
4751  MachineFunction *F = BB->getParent();
4752  MachineFunction::iterator It = BB;
4753  ++It;
4754
4755  unsigned dest = MI->getOperand(0).getReg();
4756  unsigned ptrA = MI->getOperand(1).getReg();
4757  unsigned ptrB = MI->getOperand(2).getReg();
4758  unsigned incr = MI->getOperand(3).getReg();
4759  DebugLoc dl = MI->getDebugLoc();
4760
4761  MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
4762  MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
4763  F->insert(It, loopMBB);
4764  F->insert(It, exitMBB);
4765  exitMBB->splice(exitMBB->begin(), BB,
4766                  llvm::next(MachineBasicBlock::iterator(MI)),
4767                  BB->end());
4768  exitMBB->transferSuccessorsAndUpdatePHIs(BB);
4769
4770  MachineRegisterInfo &RegInfo = F->getRegInfo();
4771  unsigned TmpReg = (!BinOpcode) ? incr :
4772    RegInfo.createVirtualRegister(
4773       is64bit ? (const TargetRegisterClass *) &PPC::G8RCRegClass :
4774                 (const TargetRegisterClass *) &PPC::GPRCRegClass);
4775
4776  //  thisMBB:
4777  //   ...
4778  //   fallthrough --> loopMBB
4779  BB->addSuccessor(loopMBB);
4780
4781  //  loopMBB:
4782  //   l[wd]arx dest, ptr
4783  //   add r0, dest, incr
4784  //   st[wd]cx. r0, ptr
4785  //   bne- loopMBB
4786  //   fallthrough --> exitMBB
4787  BB = loopMBB;
4788  BuildMI(BB, dl, TII->get(is64bit ? PPC::LDARX : PPC::LWARX), dest)
4789    .addReg(ptrA).addReg(ptrB);
4790  if (BinOpcode)
4791    BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest);
4792  BuildMI(BB, dl, TII->get(is64bit ? PPC::STDCX : PPC::STWCX))
4793    .addReg(TmpReg).addReg(ptrA).addReg(ptrB);
4794  BuildMI(BB, dl, TII->get(PPC::BCC))
4795    .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB);
4796  BB->addSuccessor(loopMBB);
4797  BB->addSuccessor(exitMBB);
4798
4799  //  exitMBB:
4800  //   ...
4801  BB = exitMBB;
4802  return BB;
4803}
4804
4805MachineBasicBlock *
4806PPCTargetLowering::EmitPartwordAtomicBinary(MachineInstr *MI,
4807                                            MachineBasicBlock *BB,
4808                                            bool is8bit,    // operation
4809                                            unsigned BinOpcode) const {
4810  // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
4811  const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
4812  // In 64 bit mode we have to use 64 bits for addresses, even though the
4813  // lwarx/stwcx are 32 bits.  With the 32-bit atomics we can use address
4814  // registers without caring whether they're 32 or 64, but here we're
4815  // doing actual arithmetic on the addresses.
4816  bool is64bit = PPCSubTarget.isPPC64();
4817  unsigned ZeroReg = is64bit ? PPC::X0 : PPC::R0;
4818
4819  const BasicBlock *LLVM_BB = BB->getBasicBlock();
4820  MachineFunction *F = BB->getParent();
4821  MachineFunction::iterator It = BB;
4822  ++It;
4823
4824  unsigned dest = MI->getOperand(0).getReg();
4825  unsigned ptrA = MI->getOperand(1).getReg();
4826  unsigned ptrB = MI->getOperand(2).getReg();
4827  unsigned incr = MI->getOperand(3).getReg();
4828  DebugLoc dl = MI->getDebugLoc();
4829
4830  MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
4831  MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
4832  F->insert(It, loopMBB);
4833  F->insert(It, exitMBB);
4834  exitMBB->splice(exitMBB->begin(), BB,
4835                  llvm::next(MachineBasicBlock::iterator(MI)),
4836                  BB->end());
4837  exitMBB->transferSuccessorsAndUpdatePHIs(BB);
4838
4839  MachineRegisterInfo &RegInfo = F->getRegInfo();
4840  const TargetRegisterClass *RC =
4841    is64bit ? (const TargetRegisterClass *) &PPC::G8RCRegClass :
4842              (const TargetRegisterClass *) &PPC::GPRCRegClass;
4843  unsigned PtrReg = RegInfo.createVirtualRegister(RC);
4844  unsigned Shift1Reg = RegInfo.createVirtualRegister(RC);
4845  unsigned ShiftReg = RegInfo.createVirtualRegister(RC);
4846  unsigned Incr2Reg = RegInfo.createVirtualRegister(RC);
4847  unsigned MaskReg = RegInfo.createVirtualRegister(RC);
4848  unsigned Mask2Reg = RegInfo.createVirtualRegister(RC);
4849  unsigned Mask3Reg = RegInfo.createVirtualRegister(RC);
4850  unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC);
4851  unsigned Tmp3Reg = RegInfo.createVirtualRegister(RC);
4852  unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC);
4853  unsigned TmpDestReg = RegInfo.createVirtualRegister(RC);
4854  unsigned Ptr1Reg;
4855  unsigned TmpReg = (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(RC);
4856
4857  //  thisMBB:
4858  //   ...
4859  //   fallthrough --> loopMBB
4860  BB->addSuccessor(loopMBB);
4861
4862  // The 4-byte load must be aligned, while a char or short may be
4863  // anywhere in the word.  Hence all this nasty bookkeeping code.
4864  //   add ptr1, ptrA, ptrB [copy if ptrA==0]
4865  //   rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27]
4866  //   xori shift, shift1, 24 [16]
4867  //   rlwinm ptr, ptr1, 0, 0, 29
4868  //   slw incr2, incr, shift
4869  //   li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535]
4870  //   slw mask, mask2, shift
4871  //  loopMBB:
4872  //   lwarx tmpDest, ptr
4873  //   add tmp, tmpDest, incr2
4874  //   andc tmp2, tmpDest, mask
4875  //   and tmp3, tmp, mask
4876  //   or tmp4, tmp3, tmp2
4877  //   stwcx. tmp4, ptr
4878  //   bne- loopMBB
4879  //   fallthrough --> exitMBB
4880  //   srw dest, tmpDest, shift
4881  if (ptrA != ZeroReg) {
4882    Ptr1Reg = RegInfo.createVirtualRegister(RC);
4883    BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
4884      .addReg(ptrA).addReg(ptrB);
4885  } else {
4886    Ptr1Reg = ptrB;
4887  }
4888  BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg)
4889      .addImm(3).addImm(27).addImm(is8bit ? 28 : 27);
4890  BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg)
4891      .addReg(Shift1Reg).addImm(is8bit ? 24 : 16);
4892  if (is64bit)
4893    BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg)
4894      .addReg(Ptr1Reg).addImm(0).addImm(61);
4895  else
4896    BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg)
4897      .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29);
4898  BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg)
4899      .addReg(incr).addReg(ShiftReg);
4900  if (is8bit)
4901    BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255);
4902  else {
4903    BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0);
4904    BuildMI(BB, dl, TII->get(PPC::ORI),Mask2Reg).addReg(Mask3Reg).addImm(65535);
4905  }
4906  BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg)
4907      .addReg(Mask2Reg).addReg(ShiftReg);
4908
4909  BB = loopMBB;
4910  BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg)
4911    .addReg(ZeroReg).addReg(PtrReg);
4912  if (BinOpcode)
4913    BuildMI(BB, dl, TII->get(BinOpcode), TmpReg)
4914      .addReg(Incr2Reg).addReg(TmpDestReg);
4915  BuildMI(BB, dl, TII->get(is64bit ? PPC::ANDC8 : PPC::ANDC), Tmp2Reg)
4916    .addReg(TmpDestReg).addReg(MaskReg);
4917  BuildMI(BB, dl, TII->get(is64bit ? PPC::AND8 : PPC::AND), Tmp3Reg)
4918    .addReg(TmpReg).addReg(MaskReg);
4919  BuildMI(BB, dl, TII->get(is64bit ? PPC::OR8 : PPC::OR), Tmp4Reg)
4920    .addReg(Tmp3Reg).addReg(Tmp2Reg);
4921  BuildMI(BB, dl, TII->get(is64bit ? PPC::STDCX : PPC::STWCX))
4922    .addReg(Tmp4Reg).addReg(ZeroReg).addReg(PtrReg);
4923  BuildMI(BB, dl, TII->get(PPC::BCC))
4924    .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB);
4925  BB->addSuccessor(loopMBB);
4926  BB->addSuccessor(exitMBB);
4927
4928  //  exitMBB:
4929  //   ...
4930  BB = exitMBB;
4931  BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest).addReg(TmpDestReg)
4932    .addReg(ShiftReg);
4933  return BB;
4934}
4935
4936MachineBasicBlock *
4937PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
4938                                               MachineBasicBlock *BB) const {
4939  const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
4940
4941  // To "insert" these instructions we actually have to insert their
4942  // control-flow patterns.
4943  const BasicBlock *LLVM_BB = BB->getBasicBlock();
4944  MachineFunction::iterator It = BB;
4945  ++It;
4946
4947  MachineFunction *F = BB->getParent();
4948
4949  if (PPCSubTarget.hasISEL() && (MI->getOpcode() == PPC::SELECT_CC_I4 ||
4950                                 MI->getOpcode() == PPC::SELECT_CC_I8)) {
4951    unsigned OpCode = MI->getOpcode() == PPC::SELECT_CC_I8 ?
4952                                         PPC::ISEL8 : PPC::ISEL;
4953    unsigned SelectPred = MI->getOperand(4).getImm();
4954    DebugLoc dl = MI->getDebugLoc();
4955
4956    // The SelectPred is ((BI << 5) | BO) for a BCC
4957    unsigned BO = SelectPred & 0xF;
4958    assert((BO == 12 || BO == 4) && "invalid predicate BO field for isel");
4959
4960    unsigned TrueOpNo, FalseOpNo;
4961    if (BO == 12) {
4962      TrueOpNo = 2;
4963      FalseOpNo = 3;
4964    } else {
4965      TrueOpNo = 3;
4966      FalseOpNo = 2;
4967      SelectPred = PPC::InvertPredicate((PPC::Predicate)SelectPred);
4968    }
4969
4970    BuildMI(*BB, MI, dl, TII->get(OpCode), MI->getOperand(0).getReg())
4971      .addReg(MI->getOperand(TrueOpNo).getReg())
4972      .addReg(MI->getOperand(FalseOpNo).getReg())
4973      .addImm(SelectPred).addReg(MI->getOperand(1).getReg());
4974  } else if (MI->getOpcode() == PPC::SELECT_CC_I4 ||
4975             MI->getOpcode() == PPC::SELECT_CC_I8 ||
4976             MI->getOpcode() == PPC::SELECT_CC_F4 ||
4977             MI->getOpcode() == PPC::SELECT_CC_F8 ||
4978             MI->getOpcode() == PPC::SELECT_CC_VRRC) {
4979
4980
4981    // The incoming instruction knows the destination vreg to set, the
4982    // condition code register to branch on, the true/false values to
4983    // select between, and a branch opcode to use.
4984
4985    //  thisMBB:
4986    //  ...
4987    //   TrueVal = ...
4988    //   cmpTY ccX, r1, r2
4989    //   bCC copy1MBB
4990    //   fallthrough --> copy0MBB
4991    MachineBasicBlock *thisMBB = BB;
4992    MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
4993    MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
4994    unsigned SelectPred = MI->getOperand(4).getImm();
4995    DebugLoc dl = MI->getDebugLoc();
4996    F->insert(It, copy0MBB);
4997    F->insert(It, sinkMBB);
4998
4999    // Transfer the remainder of BB and its successor edges to sinkMBB.
5000    sinkMBB->splice(sinkMBB->begin(), BB,
5001                    llvm::next(MachineBasicBlock::iterator(MI)),
5002                    BB->end());
5003    sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
5004
5005    // Next, add the true and fallthrough blocks as its successors.
5006    BB->addSuccessor(copy0MBB);
5007    BB->addSuccessor(sinkMBB);
5008
5009    BuildMI(BB, dl, TII->get(PPC::BCC))
5010      .addImm(SelectPred).addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB);
5011
5012    //  copy0MBB:
5013    //   %FalseValue = ...
5014    //   # fallthrough to sinkMBB
5015    BB = copy0MBB;
5016
5017    // Update machine-CFG edges
5018    BB->addSuccessor(sinkMBB);
5019
5020    //  sinkMBB:
5021    //   %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
5022    //  ...
5023    BB = sinkMBB;
5024    BuildMI(*BB, BB->begin(), dl,
5025            TII->get(PPC::PHI), MI->getOperand(0).getReg())
5026      .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB)
5027      .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
5028  }
5029  else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I8)
5030    BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4);
5031  else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I16)
5032    BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4);
5033  else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I32)
5034    BB = EmitAtomicBinary(MI, BB, false, PPC::ADD4);
5035  else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I64)
5036    BB = EmitAtomicBinary(MI, BB, true, PPC::ADD8);
5037
5038  else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I8)
5039    BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND);
5040  else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I16)
5041    BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND);
5042  else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I32)
5043    BB = EmitAtomicBinary(MI, BB, false, PPC::AND);
5044  else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I64)
5045    BB = EmitAtomicBinary(MI, BB, true, PPC::AND8);
5046
5047  else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I8)
5048    BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR);
5049  else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I16)
5050    BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR);
5051  else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I32)
5052    BB = EmitAtomicBinary(MI, BB, false, PPC::OR);
5053  else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I64)
5054    BB = EmitAtomicBinary(MI, BB, true, PPC::OR8);
5055
5056  else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I8)
5057    BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR);
5058  else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I16)
5059    BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR);
5060  else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I32)
5061    BB = EmitAtomicBinary(MI, BB, false, PPC::XOR);
5062  else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I64)
5063    BB = EmitAtomicBinary(MI, BB, true, PPC::XOR8);
5064
5065  else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I8)
5066    BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ANDC);
5067  else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I16)
5068    BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ANDC);
5069  else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I32)
5070    BB = EmitAtomicBinary(MI, BB, false, PPC::ANDC);
5071  else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I64)
5072    BB = EmitAtomicBinary(MI, BB, true, PPC::ANDC8);
5073
5074  else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I8)
5075    BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF);
5076  else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I16)
5077    BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF);
5078  else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I32)
5079    BB = EmitAtomicBinary(MI, BB, false, PPC::SUBF);
5080  else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I64)
5081    BB = EmitAtomicBinary(MI, BB, true, PPC::SUBF8);
5082
5083  else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I8)
5084    BB = EmitPartwordAtomicBinary(MI, BB, true, 0);
5085  else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I16)
5086    BB = EmitPartwordAtomicBinary(MI, BB, false, 0);
5087  else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I32)
5088    BB = EmitAtomicBinary(MI, BB, false, 0);
5089  else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I64)
5090    BB = EmitAtomicBinary(MI, BB, true, 0);
5091
5092  else if (MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 ||
5093           MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I64) {
5094    bool is64bit = MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I64;
5095
5096    unsigned dest   = MI->getOperand(0).getReg();
5097    unsigned ptrA   = MI->getOperand(1).getReg();
5098    unsigned ptrB   = MI->getOperand(2).getReg();
5099    unsigned oldval = MI->getOperand(3).getReg();
5100    unsigned newval = MI->getOperand(4).getReg();
5101    DebugLoc dl     = MI->getDebugLoc();
5102
5103    MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB);
5104    MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB);
5105    MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB);
5106    MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
5107    F->insert(It, loop1MBB);
5108    F->insert(It, loop2MBB);
5109    F->insert(It, midMBB);
5110    F->insert(It, exitMBB);
5111    exitMBB->splice(exitMBB->begin(), BB,
5112                    llvm::next(MachineBasicBlock::iterator(MI)),
5113                    BB->end());
5114    exitMBB->transferSuccessorsAndUpdatePHIs(BB);
5115
5116    //  thisMBB:
5117    //   ...
5118    //   fallthrough --> loopMBB
5119    BB->addSuccessor(loop1MBB);
5120
5121    // loop1MBB:
5122    //   l[wd]arx dest, ptr
5123    //   cmp[wd] dest, oldval
5124    //   bne- midMBB
5125    // loop2MBB:
5126    //   st[wd]cx. newval, ptr
5127    //   bne- loopMBB
5128    //   b exitBB
5129    // midMBB:
5130    //   st[wd]cx. dest, ptr
5131    // exitBB:
5132    BB = loop1MBB;
5133    BuildMI(BB, dl, TII->get(is64bit ? PPC::LDARX : PPC::LWARX), dest)
5134      .addReg(ptrA).addReg(ptrB);
5135    BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0)
5136      .addReg(oldval).addReg(dest);
5137    BuildMI(BB, dl, TII->get(PPC::BCC))
5138      .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB);
5139    BB->addSuccessor(loop2MBB);
5140    BB->addSuccessor(midMBB);
5141
5142    BB = loop2MBB;
5143    BuildMI(BB, dl, TII->get(is64bit ? PPC::STDCX : PPC::STWCX))
5144      .addReg(newval).addReg(ptrA).addReg(ptrB);
5145    BuildMI(BB, dl, TII->get(PPC::BCC))
5146      .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB);
5147    BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
5148    BB->addSuccessor(loop1MBB);
5149    BB->addSuccessor(exitMBB);
5150
5151    BB = midMBB;
5152    BuildMI(BB, dl, TII->get(is64bit ? PPC::STDCX : PPC::STWCX))
5153      .addReg(dest).addReg(ptrA).addReg(ptrB);
5154    BB->addSuccessor(exitMBB);
5155
5156    //  exitMBB:
5157    //   ...
5158    BB = exitMBB;
5159  } else if (MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 ||
5160             MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) {
5161    // We must use 64-bit registers for addresses when targeting 64-bit,
5162    // since we're actually doing arithmetic on them.  Other registers
5163    // can be 32-bit.
5164    bool is64bit = PPCSubTarget.isPPC64();
5165    bool is8bit = MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I8;
5166
5167    unsigned dest   = MI->getOperand(0).getReg();
5168    unsigned ptrA   = MI->getOperand(1).getReg();
5169    unsigned ptrB   = MI->getOperand(2).getReg();
5170    unsigned oldval = MI->getOperand(3).getReg();
5171    unsigned newval = MI->getOperand(4).getReg();
5172    DebugLoc dl     = MI->getDebugLoc();
5173
5174    MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB);
5175    MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB);
5176    MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB);
5177    MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
5178    F->insert(It, loop1MBB);
5179    F->insert(It, loop2MBB);
5180    F->insert(It, midMBB);
5181    F->insert(It, exitMBB);
5182    exitMBB->splice(exitMBB->begin(), BB,
5183                    llvm::next(MachineBasicBlock::iterator(MI)),
5184                    BB->end());
5185    exitMBB->transferSuccessorsAndUpdatePHIs(BB);
5186
5187    MachineRegisterInfo &RegInfo = F->getRegInfo();
5188    const TargetRegisterClass *RC =
5189      is64bit ? (const TargetRegisterClass *) &PPC::G8RCRegClass :
5190                (const TargetRegisterClass *) &PPC::GPRCRegClass;
5191    unsigned PtrReg = RegInfo.createVirtualRegister(RC);
5192    unsigned Shift1Reg = RegInfo.createVirtualRegister(RC);
5193    unsigned ShiftReg = RegInfo.createVirtualRegister(RC);
5194    unsigned NewVal2Reg = RegInfo.createVirtualRegister(RC);
5195    unsigned NewVal3Reg = RegInfo.createVirtualRegister(RC);
5196    unsigned OldVal2Reg = RegInfo.createVirtualRegister(RC);
5197    unsigned OldVal3Reg = RegInfo.createVirtualRegister(RC);
5198    unsigned MaskReg = RegInfo.createVirtualRegister(RC);
5199    unsigned Mask2Reg = RegInfo.createVirtualRegister(RC);
5200    unsigned Mask3Reg = RegInfo.createVirtualRegister(RC);
5201    unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC);
5202    unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC);
5203    unsigned TmpDestReg = RegInfo.createVirtualRegister(RC);
5204    unsigned Ptr1Reg;
5205    unsigned TmpReg = RegInfo.createVirtualRegister(RC);
5206    unsigned ZeroReg = is64bit ? PPC::X0 : PPC::R0;
5207    //  thisMBB:
5208    //   ...
5209    //   fallthrough --> loopMBB
5210    BB->addSuccessor(loop1MBB);
5211
5212    // The 4-byte load must be aligned, while a char or short may be
5213    // anywhere in the word.  Hence all this nasty bookkeeping code.
5214    //   add ptr1, ptrA, ptrB [copy if ptrA==0]
5215    //   rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27]
5216    //   xori shift, shift1, 24 [16]
5217    //   rlwinm ptr, ptr1, 0, 0, 29
5218    //   slw newval2, newval, shift
5219    //   slw oldval2, oldval,shift
5220    //   li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535]
5221    //   slw mask, mask2, shift
5222    //   and newval3, newval2, mask
5223    //   and oldval3, oldval2, mask
5224    // loop1MBB:
5225    //   lwarx tmpDest, ptr
5226    //   and tmp, tmpDest, mask
5227    //   cmpw tmp, oldval3
5228    //   bne- midMBB
5229    // loop2MBB:
5230    //   andc tmp2, tmpDest, mask
5231    //   or tmp4, tmp2, newval3
5232    //   stwcx. tmp4, ptr
5233    //   bne- loop1MBB
5234    //   b exitBB
5235    // midMBB:
5236    //   stwcx. tmpDest, ptr
5237    // exitBB:
5238    //   srw dest, tmpDest, shift
5239    if (ptrA != ZeroReg) {
5240      Ptr1Reg = RegInfo.createVirtualRegister(RC);
5241      BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
5242        .addReg(ptrA).addReg(ptrB);
5243    } else {
5244      Ptr1Reg = ptrB;
5245    }
5246    BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg)
5247        .addImm(3).addImm(27).addImm(is8bit ? 28 : 27);
5248    BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg)
5249        .addReg(Shift1Reg).addImm(is8bit ? 24 : 16);
5250    if (is64bit)
5251      BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg)
5252        .addReg(Ptr1Reg).addImm(0).addImm(61);
5253    else
5254      BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg)
5255        .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29);
5256    BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg)
5257        .addReg(newval).addReg(ShiftReg);
5258    BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg)
5259        .addReg(oldval).addReg(ShiftReg);
5260    if (is8bit)
5261      BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255);
5262    else {
5263      BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0);
5264      BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg)
5265        .addReg(Mask3Reg).addImm(65535);
5266    }
5267    BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg)
5268        .addReg(Mask2Reg).addReg(ShiftReg);
5269    BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg)
5270        .addReg(NewVal2Reg).addReg(MaskReg);
5271    BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg)
5272        .addReg(OldVal2Reg).addReg(MaskReg);
5273
5274    BB = loop1MBB;
5275    BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg)
5276        .addReg(ZeroReg).addReg(PtrReg);
5277    BuildMI(BB, dl, TII->get(PPC::AND),TmpReg)
5278        .addReg(TmpDestReg).addReg(MaskReg);
5279    BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0)
5280        .addReg(TmpReg).addReg(OldVal3Reg);
5281    BuildMI(BB, dl, TII->get(PPC::BCC))
5282        .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB);
5283    BB->addSuccessor(loop2MBB);
5284    BB->addSuccessor(midMBB);
5285
5286    BB = loop2MBB;
5287    BuildMI(BB, dl, TII->get(PPC::ANDC),Tmp2Reg)
5288        .addReg(TmpDestReg).addReg(MaskReg);
5289    BuildMI(BB, dl, TII->get(PPC::OR),Tmp4Reg)
5290        .addReg(Tmp2Reg).addReg(NewVal3Reg);
5291    BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(Tmp4Reg)
5292        .addReg(ZeroReg).addReg(PtrReg);
5293    BuildMI(BB, dl, TII->get(PPC::BCC))
5294      .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB);
5295    BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
5296    BB->addSuccessor(loop1MBB);
5297    BB->addSuccessor(exitMBB);
5298
5299    BB = midMBB;
5300    BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(TmpDestReg)
5301      .addReg(ZeroReg).addReg(PtrReg);
5302    BB->addSuccessor(exitMBB);
5303
5304    //  exitMBB:
5305    //   ...
5306    BB = exitMBB;
5307    BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW),dest).addReg(TmpReg)
5308      .addReg(ShiftReg);
5309  } else {
5310    llvm_unreachable("Unexpected instr type to insert");
5311  }
5312
5313  MI->eraseFromParent();   // The pseudo instruction is gone now.
5314  return BB;
5315}
5316
5317//===----------------------------------------------------------------------===//
5318// Target Optimization Hooks
5319//===----------------------------------------------------------------------===//
5320
5321SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
5322                                             DAGCombinerInfo &DCI) const {
5323  const TargetMachine &TM = getTargetMachine();
5324  SelectionDAG &DAG = DCI.DAG;
5325  DebugLoc dl = N->getDebugLoc();
5326  switch (N->getOpcode()) {
5327  default: break;
5328  case PPCISD::SHL:
5329    if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
5330      if (C->isNullValue())   // 0 << V -> 0.
5331        return N->getOperand(0);
5332    }
5333    break;
5334  case PPCISD::SRL:
5335    if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
5336      if (C->isNullValue())   // 0 >>u V -> 0.
5337        return N->getOperand(0);
5338    }
5339    break;
5340  case PPCISD::SRA:
5341    if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
5342      if (C->isNullValue() ||   //  0 >>s V -> 0.
5343          C->isAllOnesValue())    // -1 >>s V -> -1.
5344        return N->getOperand(0);
5345    }
5346    break;
5347
5348  case ISD::SINT_TO_FP:
5349    if (TM.getSubtarget<PPCSubtarget>().has64BitSupport()) {
5350      if (N->getOperand(0).getOpcode() == ISD::FP_TO_SINT) {
5351        // Turn (sint_to_fp (fp_to_sint X)) -> fctidz/fcfid without load/stores.
5352        // We allow the src/dst to be either f32/f64, but the intermediate
5353        // type must be i64.
5354        if (N->getOperand(0).getValueType() == MVT::i64 &&
5355            N->getOperand(0).getOperand(0).getValueType() != MVT::ppcf128) {
5356          SDValue Val = N->getOperand(0).getOperand(0);
5357          if (Val.getValueType() == MVT::f32) {
5358            Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val);
5359            DCI.AddToWorklist(Val.getNode());
5360          }
5361
5362          Val = DAG.getNode(PPCISD::FCTIDZ, dl, MVT::f64, Val);
5363          DCI.AddToWorklist(Val.getNode());
5364          Val = DAG.getNode(PPCISD::FCFID, dl, MVT::f64, Val);
5365          DCI.AddToWorklist(Val.getNode());
5366          if (N->getValueType(0) == MVT::f32) {
5367            Val = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, Val,
5368                              DAG.getIntPtrConstant(0));
5369            DCI.AddToWorklist(Val.getNode());
5370          }
5371          return Val;
5372        } else if (N->getOperand(0).getValueType() == MVT::i32) {
5373          // If the intermediate type is i32, we can avoid the load/store here
5374          // too.
5375        }
5376      }
5377    }
5378    break;
5379  case ISD::STORE:
5380    // Turn STORE (FP_TO_SINT F) -> STFIWX(FCTIWZ(F)).
5381    if (TM.getSubtarget<PPCSubtarget>().hasSTFIWX() &&
5382        !cast<StoreSDNode>(N)->isTruncatingStore() &&
5383        N->getOperand(1).getOpcode() == ISD::FP_TO_SINT &&
5384        N->getOperand(1).getValueType() == MVT::i32 &&
5385        N->getOperand(1).getOperand(0).getValueType() != MVT::ppcf128) {
5386      SDValue Val = N->getOperand(1).getOperand(0);
5387      if (Val.getValueType() == MVT::f32) {
5388        Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val);
5389        DCI.AddToWorklist(Val.getNode());
5390      }
5391      Val = DAG.getNode(PPCISD::FCTIWZ, dl, MVT::f64, Val);
5392      DCI.AddToWorklist(Val.getNode());
5393
5394      Val = DAG.getNode(PPCISD::STFIWX, dl, MVT::Other, N->getOperand(0), Val,
5395                        N->getOperand(2), N->getOperand(3));
5396      DCI.AddToWorklist(Val.getNode());
5397      return Val;
5398    }
5399
5400    // Turn STORE (BSWAP) -> sthbrx/stwbrx.
5401    if (cast<StoreSDNode>(N)->isUnindexed() &&
5402        N->getOperand(1).getOpcode() == ISD::BSWAP &&
5403        N->getOperand(1).getNode()->hasOneUse() &&
5404        (N->getOperand(1).getValueType() == MVT::i32 ||
5405         N->getOperand(1).getValueType() == MVT::i16)) {
5406      SDValue BSwapOp = N->getOperand(1).getOperand(0);
5407      // Do an any-extend to 32-bits if this is a half-word input.
5408      if (BSwapOp.getValueType() == MVT::i16)
5409        BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp);
5410
5411      SDValue Ops[] = {
5412        N->getOperand(0), BSwapOp, N->getOperand(2),
5413        DAG.getValueType(N->getOperand(1).getValueType())
5414      };
5415      return
5416        DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other),
5417                                Ops, array_lengthof(Ops),
5418                                cast<StoreSDNode>(N)->getMemoryVT(),
5419                                cast<StoreSDNode>(N)->getMemOperand());
5420    }
5421    break;
5422  case ISD::BSWAP:
5423    // Turn BSWAP (LOAD) -> lhbrx/lwbrx.
5424    if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) &&
5425        N->getOperand(0).hasOneUse() &&
5426        (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16)) {
5427      SDValue Load = N->getOperand(0);
5428      LoadSDNode *LD = cast<LoadSDNode>(Load);
5429      // Create the byte-swapping load.
5430      SDValue Ops[] = {
5431        LD->getChain(),    // Chain
5432        LD->getBasePtr(),  // Ptr
5433        DAG.getValueType(N->getValueType(0)) // VT
5434      };
5435      SDValue BSLoad =
5436        DAG.getMemIntrinsicNode(PPCISD::LBRX, dl,
5437                                DAG.getVTList(MVT::i32, MVT::Other), Ops, 3,
5438                                LD->getMemoryVT(), LD->getMemOperand());
5439
5440      // If this is an i16 load, insert the truncate.
5441      SDValue ResVal = BSLoad;
5442      if (N->getValueType(0) == MVT::i16)
5443        ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad);
5444
5445      // First, combine the bswap away.  This makes the value produced by the
5446      // load dead.
5447      DCI.CombineTo(N, ResVal);
5448
5449      // Next, combine the load away, we give it a bogus result value but a real
5450      // chain result.  The result value is dead because the bswap is dead.
5451      DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1));
5452
5453      // Return N so it doesn't get rechecked!
5454      return SDValue(N, 0);
5455    }
5456
5457    break;
5458  case PPCISD::VCMP: {
5459    // If a VCMPo node already exists with exactly the same operands as this
5460    // node, use its result instead of this node (VCMPo computes both a CR6 and
5461    // a normal output).
5462    //
5463    if (!N->getOperand(0).hasOneUse() &&
5464        !N->getOperand(1).hasOneUse() &&
5465        !N->getOperand(2).hasOneUse()) {
5466
5467      // Scan all of the users of the LHS, looking for VCMPo's that match.
5468      SDNode *VCMPoNode = 0;
5469
5470      SDNode *LHSN = N->getOperand(0).getNode();
5471      for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end();
5472           UI != E; ++UI)
5473        if (UI->getOpcode() == PPCISD::VCMPo &&
5474            UI->getOperand(1) == N->getOperand(1) &&
5475            UI->getOperand(2) == N->getOperand(2) &&
5476            UI->getOperand(0) == N->getOperand(0)) {
5477          VCMPoNode = *UI;
5478          break;
5479        }
5480
5481      // If there is no VCMPo node, or if the flag value has a single use, don't
5482      // transform this.
5483      if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1))
5484        break;
5485
5486      // Look at the (necessarily single) use of the flag value.  If it has a
5487      // chain, this transformation is more complex.  Note that multiple things
5488      // could use the value result, which we should ignore.
5489      SDNode *FlagUser = 0;
5490      for (SDNode::use_iterator UI = VCMPoNode->use_begin();
5491           FlagUser == 0; ++UI) {
5492        assert(UI != VCMPoNode->use_end() && "Didn't find user!");
5493        SDNode *User = *UI;
5494        for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
5495          if (User->getOperand(i) == SDValue(VCMPoNode, 1)) {
5496            FlagUser = User;
5497            break;
5498          }
5499        }
5500      }
5501
5502      // If the user is a MFCR instruction, we know this is safe.  Otherwise we
5503      // give up for right now.
5504      if (FlagUser->getOpcode() == PPCISD::MFCR)
5505        return SDValue(VCMPoNode, 0);
5506    }
5507    break;
5508  }
5509  case ISD::BR_CC: {
5510    // If this is a branch on an altivec predicate comparison, lower this so
5511    // that we don't have to do a MFCR: instead, branch directly on CR6.  This
5512    // lowering is done pre-legalize, because the legalizer lowers the predicate
5513    // compare down to code that is difficult to reassemble.
5514    ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get();
5515    SDValue LHS = N->getOperand(2), RHS = N->getOperand(3);
5516    int CompareOpc;
5517    bool isDot;
5518
5519    if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
5520        isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) &&
5521        getAltivecCompareInfo(LHS, CompareOpc, isDot)) {
5522      assert(isDot && "Can't compare against a vector result!");
5523
5524      // If this is a comparison against something other than 0/1, then we know
5525      // that the condition is never/always true.
5526      unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
5527      if (Val != 0 && Val != 1) {
5528        if (CC == ISD::SETEQ)      // Cond never true, remove branch.
5529          return N->getOperand(0);
5530        // Always !=, turn it into an unconditional branch.
5531        return DAG.getNode(ISD::BR, dl, MVT::Other,
5532                           N->getOperand(0), N->getOperand(4));
5533      }
5534
5535      bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0);
5536
5537      // Create the PPCISD altivec 'dot' comparison node.
5538      std::vector<EVT> VTs;
5539      SDValue Ops[] = {
5540        LHS.getOperand(2),  // LHS of compare
5541        LHS.getOperand(3),  // RHS of compare
5542        DAG.getConstant(CompareOpc, MVT::i32)
5543      };
5544      VTs.push_back(LHS.getOperand(2).getValueType());
5545      VTs.push_back(MVT::Glue);
5546      SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops, 3);
5547
5548      // Unpack the result based on how the target uses it.
5549      PPC::Predicate CompOpc;
5550      switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) {
5551      default:  // Can't happen, don't crash on invalid number though.
5552      case 0:   // Branch on the value of the EQ bit of CR6.
5553        CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE;
5554        break;
5555      case 1:   // Branch on the inverted value of the EQ bit of CR6.
5556        CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ;
5557        break;
5558      case 2:   // Branch on the value of the LT bit of CR6.
5559        CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE;
5560        break;
5561      case 3:   // Branch on the inverted value of the LT bit of CR6.
5562        CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT;
5563        break;
5564      }
5565
5566      return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0),
5567                         DAG.getConstant(CompOpc, MVT::i32),
5568                         DAG.getRegister(PPC::CR6, MVT::i32),
5569                         N->getOperand(4), CompNode.getValue(1));
5570    }
5571    break;
5572  }
5573  }
5574
5575  return SDValue();
5576}
5577
5578//===----------------------------------------------------------------------===//
5579// Inline Assembly Support
5580//===----------------------------------------------------------------------===//
5581
5582void PPCTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
5583                                                       APInt &KnownZero,
5584                                                       APInt &KnownOne,
5585                                                       const SelectionDAG &DAG,
5586                                                       unsigned Depth) const {
5587  KnownZero = KnownOne = APInt(KnownZero.getBitWidth(), 0);
5588  switch (Op.getOpcode()) {
5589  default: break;
5590  case PPCISD::LBRX: {
5591    // lhbrx is known to have the top bits cleared out.
5592    if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16)
5593      KnownZero = 0xFFFF0000;
5594    break;
5595  }
5596  case ISD::INTRINSIC_WO_CHAIN: {
5597    switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) {
5598    default: break;
5599    case Intrinsic::ppc_altivec_vcmpbfp_p:
5600    case Intrinsic::ppc_altivec_vcmpeqfp_p:
5601    case Intrinsic::ppc_altivec_vcmpequb_p:
5602    case Intrinsic::ppc_altivec_vcmpequh_p:
5603    case Intrinsic::ppc_altivec_vcmpequw_p:
5604    case Intrinsic::ppc_altivec_vcmpgefp_p:
5605    case Intrinsic::ppc_altivec_vcmpgtfp_p:
5606    case Intrinsic::ppc_altivec_vcmpgtsb_p:
5607    case Intrinsic::ppc_altivec_vcmpgtsh_p:
5608    case Intrinsic::ppc_altivec_vcmpgtsw_p:
5609    case Intrinsic::ppc_altivec_vcmpgtub_p:
5610    case Intrinsic::ppc_altivec_vcmpgtuh_p:
5611    case Intrinsic::ppc_altivec_vcmpgtuw_p:
5612      KnownZero = ~1U;  // All bits but the low one are known to be zero.
5613      break;
5614    }
5615  }
5616  }
5617}
5618
5619
5620/// getConstraintType - Given a constraint, return the type of
5621/// constraint it is for this target.
5622PPCTargetLowering::ConstraintType
5623PPCTargetLowering::getConstraintType(const std::string &Constraint) const {
5624  if (Constraint.size() == 1) {
5625    switch (Constraint[0]) {
5626    default: break;
5627    case 'b':
5628    case 'r':
5629    case 'f':
5630    case 'v':
5631    case 'y':
5632      return C_RegisterClass;
5633    }
5634  }
5635  return TargetLowering::getConstraintType(Constraint);
5636}
5637
5638/// Examine constraint type and operand type and determine a weight value.
5639/// This object must already have been set up with the operand type
5640/// and the current alternative constraint selected.
5641TargetLowering::ConstraintWeight
5642PPCTargetLowering::getSingleConstraintMatchWeight(
5643    AsmOperandInfo &info, const char *constraint) const {
5644  ConstraintWeight weight = CW_Invalid;
5645  Value *CallOperandVal = info.CallOperandVal;
5646    // If we don't have a value, we can't do a match,
5647    // but allow it at the lowest weight.
5648  if (CallOperandVal == NULL)
5649    return CW_Default;
5650  Type *type = CallOperandVal->getType();
5651  // Look at the constraint type.
5652  switch (*constraint) {
5653  default:
5654    weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
5655    break;
5656  case 'b':
5657    if (type->isIntegerTy())
5658      weight = CW_Register;
5659    break;
5660  case 'f':
5661    if (type->isFloatTy())
5662      weight = CW_Register;
5663    break;
5664  case 'd':
5665    if (type->isDoubleTy())
5666      weight = CW_Register;
5667    break;
5668  case 'v':
5669    if (type->isVectorTy())
5670      weight = CW_Register;
5671    break;
5672  case 'y':
5673    weight = CW_Register;
5674    break;
5675  }
5676  return weight;
5677}
5678
5679std::pair<unsigned, const TargetRegisterClass*>
5680PPCTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
5681                                                EVT VT) const {
5682  if (Constraint.size() == 1) {
5683    // GCC RS6000 Constraint Letters
5684    switch (Constraint[0]) {
5685    case 'b':   // R1-R31
5686    case 'r':   // R0-R31
5687      if (VT == MVT::i64 && PPCSubTarget.isPPC64())
5688        return std::make_pair(0U, &PPC::G8RCRegClass);
5689      return std::make_pair(0U, &PPC::GPRCRegClass);
5690    case 'f':
5691      if (VT == MVT::f32)
5692        return std::make_pair(0U, &PPC::F4RCRegClass);
5693      if (VT == MVT::f64)
5694        return std::make_pair(0U, &PPC::F8RCRegClass);
5695      break;
5696    case 'v':
5697      return std::make_pair(0U, &PPC::VRRCRegClass);
5698    case 'y':   // crrc
5699      return std::make_pair(0U, &PPC::CRRCRegClass);
5700    }
5701  }
5702
5703  return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
5704}
5705
5706
5707/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
5708/// vector.  If it is invalid, don't add anything to Ops.
5709void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
5710                                                     std::string &Constraint,
5711                                                     std::vector<SDValue>&Ops,
5712                                                     SelectionDAG &DAG) const {
5713  SDValue Result(0,0);
5714
5715  // Only support length 1 constraints.
5716  if (Constraint.length() > 1) return;
5717
5718  char Letter = Constraint[0];
5719  switch (Letter) {
5720  default: break;
5721  case 'I':
5722  case 'J':
5723  case 'K':
5724  case 'L':
5725  case 'M':
5726  case 'N':
5727  case 'O':
5728  case 'P': {
5729    ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op);
5730    if (!CST) return; // Must be an immediate to match.
5731    unsigned Value = CST->getZExtValue();
5732    switch (Letter) {
5733    default: llvm_unreachable("Unknown constraint letter!");
5734    case 'I':  // "I" is a signed 16-bit constant.
5735      if ((short)Value == (int)Value)
5736        Result = DAG.getTargetConstant(Value, Op.getValueType());
5737      break;
5738    case 'J':  // "J" is a constant with only the high-order 16 bits nonzero.
5739    case 'L':  // "L" is a signed 16-bit constant shifted left 16 bits.
5740      if ((short)Value == 0)
5741        Result = DAG.getTargetConstant(Value, Op.getValueType());
5742      break;
5743    case 'K':  // "K" is a constant with only the low-order 16 bits nonzero.
5744      if ((Value >> 16) == 0)
5745        Result = DAG.getTargetConstant(Value, Op.getValueType());
5746      break;
5747    case 'M':  // "M" is a constant that is greater than 31.
5748      if (Value > 31)
5749        Result = DAG.getTargetConstant(Value, Op.getValueType());
5750      break;
5751    case 'N':  // "N" is a positive constant that is an exact power of two.
5752      if ((int)Value > 0 && isPowerOf2_32(Value))
5753        Result = DAG.getTargetConstant(Value, Op.getValueType());
5754      break;
5755    case 'O':  // "O" is the constant zero.
5756      if (Value == 0)
5757        Result = DAG.getTargetConstant(Value, Op.getValueType());
5758      break;
5759    case 'P':  // "P" is a constant whose negation is a signed 16-bit constant.
5760      if ((short)-Value == (int)-Value)
5761        Result = DAG.getTargetConstant(Value, Op.getValueType());
5762      break;
5763    }
5764    break;
5765  }
5766  }
5767
5768  if (Result.getNode()) {
5769    Ops.push_back(Result);
5770    return;
5771  }
5772
5773  // Handle standard constraint letters.
5774  TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
5775}
5776
5777// isLegalAddressingMode - Return true if the addressing mode represented
5778// by AM is legal for this target, for a load/store of the specified type.
5779bool PPCTargetLowering::isLegalAddressingMode(const AddrMode &AM,
5780                                              Type *Ty) const {
5781  // FIXME: PPC does not allow r+i addressing modes for vectors!
5782
5783  // PPC allows a sign-extended 16-bit immediate field.
5784  if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
5785    return false;
5786
5787  // No global is ever allowed as a base.
5788  if (AM.BaseGV)
5789    return false;
5790
5791  // PPC only support r+r,
5792  switch (AM.Scale) {
5793  case 0:  // "r+i" or just "i", depending on HasBaseReg.
5794    break;
5795  case 1:
5796    if (AM.HasBaseReg && AM.BaseOffs)  // "r+r+i" is not allowed.
5797      return false;
5798    // Otherwise we have r+r or r+i.
5799    break;
5800  case 2:
5801    if (AM.HasBaseReg || AM.BaseOffs)  // 2*r+r  or  2*r+i is not allowed.
5802      return false;
5803    // Allow 2*r as r+r.
5804    break;
5805  default:
5806    // No other scales are supported.
5807    return false;
5808  }
5809
5810  return true;
5811}
5812
5813/// isLegalAddressImmediate - Return true if the integer value can be used
5814/// as the offset of the target addressing mode for load / store of the
5815/// given type.
5816bool PPCTargetLowering::isLegalAddressImmediate(int64_t V,Type *Ty) const{
5817  // PPC allows a sign-extended 16-bit immediate field.
5818  return (V > -(1 << 16) && V < (1 << 16)-1);
5819}
5820
5821bool PPCTargetLowering::isLegalAddressImmediate(GlobalValue* GV) const {
5822  return false;
5823}
5824
5825SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op,
5826                                           SelectionDAG &DAG) const {
5827  MachineFunction &MF = DAG.getMachineFunction();
5828  MachineFrameInfo *MFI = MF.getFrameInfo();
5829  MFI->setReturnAddressIsTaken(true);
5830
5831  DebugLoc dl = Op.getDebugLoc();
5832  unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
5833
5834  // Make sure the function does not optimize away the store of the RA to
5835  // the stack.
5836  PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
5837  FuncInfo->setLRStoreRequired();
5838  bool isPPC64 = PPCSubTarget.isPPC64();
5839  bool isDarwinABI = PPCSubTarget.isDarwinABI();
5840
5841  if (Depth > 0) {
5842    SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
5843    SDValue Offset =
5844
5845      DAG.getConstant(PPCFrameLowering::getReturnSaveOffset(isPPC64, isDarwinABI),
5846                      isPPC64? MVT::i64 : MVT::i32);
5847    return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
5848                       DAG.getNode(ISD::ADD, dl, getPointerTy(),
5849                                   FrameAddr, Offset),
5850                       MachinePointerInfo(), false, false, false, 0);
5851  }
5852
5853  // Just load the return address off the stack.
5854  SDValue RetAddrFI = getReturnAddrFrameIndex(DAG);
5855  return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
5856                     RetAddrFI, MachinePointerInfo(), false, false, false, 0);
5857}
5858
5859SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op,
5860                                          SelectionDAG &DAG) const {
5861  DebugLoc dl = Op.getDebugLoc();
5862  unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
5863
5864  EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
5865  bool isPPC64 = PtrVT == MVT::i64;
5866
5867  MachineFunction &MF = DAG.getMachineFunction();
5868  MachineFrameInfo *MFI = MF.getFrameInfo();
5869  MFI->setFrameAddressIsTaken(true);
5870  bool is31 = (getTargetMachine().Options.DisableFramePointerElim(MF) ||
5871               MFI->hasVarSizedObjects()) &&
5872                  MFI->getStackSize() &&
5873                  !MF.getFunction()->hasFnAttr(Attribute::Naked);
5874  unsigned FrameReg = isPPC64 ? (is31 ? PPC::X31 : PPC::X1) :
5875                                (is31 ? PPC::R31 : PPC::R1);
5876  SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg,
5877                                         PtrVT);
5878  while (Depth--)
5879    FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(),
5880                            FrameAddr, MachinePointerInfo(), false, false,
5881                            false, 0);
5882  return FrameAddr;
5883}
5884
5885bool
5886PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
5887  // The PowerPC target isn't yet aware of offsets.
5888  return false;
5889}
5890
5891/// getOptimalMemOpType - Returns the target specific optimal type for load
5892/// and store operations as a result of memset, memcpy, and memmove
5893/// lowering. If DstAlign is zero that means it's safe to destination
5894/// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
5895/// means there isn't a need to check it against alignment requirement,
5896/// probably because the source does not need to be loaded. If
5897/// 'IsZeroVal' is true, that means it's safe to return a
5898/// non-scalar-integer type, e.g. empty string source, constant, or loaded
5899/// from memory. 'MemcpyStrSrc' indicates whether the memcpy source is
5900/// constant so it does not need to be loaded.
5901/// It returns EVT::Other if the type should be determined using generic
5902/// target-independent logic.
5903EVT PPCTargetLowering::getOptimalMemOpType(uint64_t Size,
5904                                           unsigned DstAlign, unsigned SrcAlign,
5905                                           bool IsZeroVal,
5906                                           bool MemcpyStrSrc,
5907                                           MachineFunction &MF) const {
5908  if (this->PPCSubTarget.isPPC64()) {
5909    return MVT::i64;
5910  } else {
5911    return MVT::i32;
5912  }
5913}
5914
5915/// isFMAFasterThanMulAndAdd - Return true if an FMA operation is faster than
5916/// a pair of mul and add instructions. fmuladd intrinsics will be expanded to
5917/// FMAs when this method returns true (and FMAs are legal), otherwise fmuladd
5918/// is expanded to mul + add.
5919bool PPCTargetLowering::isFMAFasterThanMulAndAdd(EVT VT) const {
5920  if (!VT.isSimple())
5921    return false;
5922
5923  switch (VT.getSimpleVT().SimpleTy) {
5924  case MVT::f32:
5925  case MVT::f64:
5926  case MVT::v4f32:
5927    return true;
5928  default:
5929    break;
5930  }
5931
5932  return false;
5933}
5934
5935Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const {
5936  if (DisableILPPref)
5937    return TargetLowering::getSchedulingPreference(N);
5938
5939  return Sched::ILP;
5940}
5941
5942