SparcISelLowering.cpp revision bf34f346420dbcdb3f9376967bde701682471a79
1//===-- SparcISelLowering.cpp - Sparc DAG Lowering Implementation ---------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the interfaces that Sparc uses to lower LLVM code into a
11// selection DAG.
12//
13//===----------------------------------------------------------------------===//
14
15#include "SparcISelLowering.h"
16#include "SparcMachineFunctionInfo.h"
17#include "SparcTargetMachine.h"
18#include "MCTargetDesc/SparcBaseInfo.h"
19#include "llvm/CodeGen/CallingConvLower.h"
20#include "llvm/CodeGen/MachineFrameInfo.h"
21#include "llvm/CodeGen/MachineFunction.h"
22#include "llvm/CodeGen/MachineInstrBuilder.h"
23#include "llvm/CodeGen/MachineRegisterInfo.h"
24#include "llvm/CodeGen/SelectionDAG.h"
25#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
26#include "llvm/IR/DerivedTypes.h"
27#include "llvm/IR/Function.h"
28#include "llvm/IR/Module.h"
29#include "llvm/Support/ErrorHandling.h"
30using namespace llvm;
31
32
33//===----------------------------------------------------------------------===//
34// Calling Convention Implementation
35//===----------------------------------------------------------------------===//
36
37static bool CC_Sparc_Assign_SRet(unsigned &ValNo, MVT &ValVT,
38                                 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
39                                 ISD::ArgFlagsTy &ArgFlags, CCState &State)
40{
41  assert (ArgFlags.isSRet());
42
43  // Assign SRet argument.
44  State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
45                                         0,
46                                         LocVT, LocInfo));
47  return true;
48}
49
50static bool CC_Sparc_Assign_f64(unsigned &ValNo, MVT &ValVT,
51                                MVT &LocVT, CCValAssign::LocInfo &LocInfo,
52                                ISD::ArgFlagsTy &ArgFlags, CCState &State)
53{
54  static const uint16_t RegList[] = {
55    SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
56  };
57  // Try to get first reg.
58  if (unsigned Reg = State.AllocateReg(RegList, 6)) {
59    State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
60  } else {
61    // Assign whole thing in stack.
62    State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
63                                           State.AllocateStack(8,4),
64                                           LocVT, LocInfo));
65    return true;
66  }
67
68  // Try to get second reg.
69  if (unsigned Reg = State.AllocateReg(RegList, 6))
70    State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
71  else
72    State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
73                                           State.AllocateStack(4,4),
74                                           LocVT, LocInfo));
75  return true;
76}
77
78// Allocate a full-sized argument for the 64-bit ABI.
79static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT,
80                            MVT &LocVT, CCValAssign::LocInfo &LocInfo,
81                            ISD::ArgFlagsTy &ArgFlags, CCState &State) {
82  assert((LocVT == MVT::f32 || LocVT.getSizeInBits() == 64) &&
83         "Can't handle non-64 bits locations");
84
85  // Stack space is allocated for all arguments starting from [%fp+BIAS+128].
86  unsigned Offset = State.AllocateStack(8, 8);
87  unsigned Reg = 0;
88
89  if (LocVT == MVT::i64 && Offset < 6*8)
90    // Promote integers to %i0-%i5.
91    Reg = SP::I0 + Offset/8;
92  else if (LocVT == MVT::f64 && Offset < 16*8)
93    // Promote doubles to %d0-%d30. (Which LLVM calls D0-D15).
94    Reg = SP::D0 + Offset/8;
95  else if (LocVT == MVT::f32 && Offset < 16*8)
96    // Promote floats to %f1, %f3, ...
97    Reg = SP::F1 + Offset/4;
98
99  // Promote to register when possible, otherwise use the stack slot.
100  if (Reg) {
101    State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
102    return true;
103  }
104
105  // This argument goes on the stack in an 8-byte slot.
106  // When passing floats, LocVT is smaller than 8 bytes. Adjust the offset to
107  // the right-aligned float. The first 4 bytes of the stack slot are undefined.
108  if (LocVT == MVT::f32)
109    Offset += 4;
110
111  State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
112  return true;
113}
114
115// Allocate a half-sized argument for the 64-bit ABI.
116//
117// This is used when passing { float, int } structs by value in registers.
118static bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT,
119                            MVT &LocVT, CCValAssign::LocInfo &LocInfo,
120                            ISD::ArgFlagsTy &ArgFlags, CCState &State) {
121  assert(LocVT.getSizeInBits() == 32 && "Can't handle non-32 bits locations");
122  unsigned Offset = State.AllocateStack(4, 4);
123
124  if (LocVT == MVT::f32 && Offset < 16*8) {
125    // Promote floats to %f0-%f31.
126    State.addLoc(CCValAssign::getReg(ValNo, ValVT, SP::F0 + Offset/4,
127                                     LocVT, LocInfo));
128    return true;
129  }
130
131  if (LocVT == MVT::i32 && Offset < 6*8) {
132    // Promote integers to %i0-%i5, using half the register.
133    unsigned Reg = SP::I0 + Offset/8;
134    LocVT = MVT::i64;
135    LocInfo = CCValAssign::AExt;
136
137    // Set the Custom bit if this i32 goes in the high bits of a register.
138    if (Offset % 8 == 0)
139      State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg,
140                                             LocVT, LocInfo));
141    else
142      State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
143    return true;
144  }
145
146  State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
147  return true;
148}
149
150#include "SparcGenCallingConv.inc"
151
152// The calling conventions in SparcCallingConv.td are described in terms of the
153// callee's register window. This function translates registers to the
154// corresponding caller window %o register.
155static unsigned toCallerWindow(unsigned Reg) {
156  assert(SP::I0 + 7 == SP::I7 && SP::O0 + 7 == SP::O7 && "Unexpected enum");
157  if (Reg >= SP::I0 && Reg <= SP::I7)
158    return Reg - SP::I0 + SP::O0;
159  return Reg;
160}
161
162SDValue
163SparcTargetLowering::LowerReturn(SDValue Chain,
164                                 CallingConv::ID CallConv, bool IsVarArg,
165                                 const SmallVectorImpl<ISD::OutputArg> &Outs,
166                                 const SmallVectorImpl<SDValue> &OutVals,
167                                 SDLoc DL, SelectionDAG &DAG) const {
168  if (Subtarget->is64Bit())
169    return LowerReturn_64(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
170  return LowerReturn_32(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
171}
172
173SDValue
174SparcTargetLowering::LowerReturn_32(SDValue Chain,
175                                    CallingConv::ID CallConv, bool IsVarArg,
176                                    const SmallVectorImpl<ISD::OutputArg> &Outs,
177                                    const SmallVectorImpl<SDValue> &OutVals,
178                                    SDLoc DL, SelectionDAG &DAG) const {
179  MachineFunction &MF = DAG.getMachineFunction();
180
181  // CCValAssign - represent the assignment of the return value to locations.
182  SmallVector<CCValAssign, 16> RVLocs;
183
184  // CCState - Info about the registers and stack slot.
185  CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(),
186                 DAG.getTarget(), RVLocs, *DAG.getContext());
187
188  // Analyze return values.
189  CCInfo.AnalyzeReturn(Outs, RetCC_Sparc32);
190
191  SDValue Flag;
192  SmallVector<SDValue, 4> RetOps(1, Chain);
193  // Make room for the return address offset.
194  RetOps.push_back(SDValue());
195
196  // Copy the result values into the output registers.
197  for (unsigned i = 0; i != RVLocs.size(); ++i) {
198    CCValAssign &VA = RVLocs[i];
199    assert(VA.isRegLoc() && "Can only return in registers!");
200
201    Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(),
202                             OutVals[i], Flag);
203
204    // Guarantee that all emitted copies are stuck together with flags.
205    Flag = Chain.getValue(1);
206    RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
207  }
208
209  unsigned RetAddrOffset = 8; // Call Inst + Delay Slot
210  // If the function returns a struct, copy the SRetReturnReg to I0
211  if (MF.getFunction()->hasStructRetAttr()) {
212    SparcMachineFunctionInfo *SFI = MF.getInfo<SparcMachineFunctionInfo>();
213    unsigned Reg = SFI->getSRetReturnReg();
214    if (!Reg)
215      llvm_unreachable("sret virtual register not created in the entry block");
216    SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy());
217    Chain = DAG.getCopyToReg(Chain, DL, SP::I0, Val, Flag);
218    Flag = Chain.getValue(1);
219    RetOps.push_back(DAG.getRegister(SP::I0, getPointerTy()));
220    RetAddrOffset = 12; // CallInst + Delay Slot + Unimp
221  }
222
223  RetOps[0] = Chain;  // Update chain.
224  RetOps[1] = DAG.getConstant(RetAddrOffset, MVT::i32);
225
226  // Add the flag if we have it.
227  if (Flag.getNode())
228    RetOps.push_back(Flag);
229
230  return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other,
231                     &RetOps[0], RetOps.size());
232}
233
234// Lower return values for the 64-bit ABI.
235// Return values are passed the exactly the same way as function arguments.
236SDValue
237SparcTargetLowering::LowerReturn_64(SDValue Chain,
238                                    CallingConv::ID CallConv, bool IsVarArg,
239                                    const SmallVectorImpl<ISD::OutputArg> &Outs,
240                                    const SmallVectorImpl<SDValue> &OutVals,
241                                    SDLoc DL, SelectionDAG &DAG) const {
242  // CCValAssign - represent the assignment of the return value to locations.
243  SmallVector<CCValAssign, 16> RVLocs;
244
245  // CCState - Info about the registers and stack slot.
246  CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(),
247                 DAG.getTarget(), RVLocs, *DAG.getContext());
248
249  // Analyze return values.
250  CCInfo.AnalyzeReturn(Outs, CC_Sparc64);
251
252  SDValue Flag;
253  SmallVector<SDValue, 4> RetOps(1, Chain);
254
255  // The second operand on the return instruction is the return address offset.
256  // The return address is always %i7+8 with the 64-bit ABI.
257  RetOps.push_back(DAG.getConstant(8, MVT::i32));
258
259  // Copy the result values into the output registers.
260  for (unsigned i = 0; i != RVLocs.size(); ++i) {
261    CCValAssign &VA = RVLocs[i];
262    assert(VA.isRegLoc() && "Can only return in registers!");
263    SDValue OutVal = OutVals[i];
264
265    // Integer return values must be sign or zero extended by the callee.
266    switch (VA.getLocInfo()) {
267    case CCValAssign::SExt:
268      OutVal = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), OutVal);
269      break;
270    case CCValAssign::ZExt:
271      OutVal = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), OutVal);
272      break;
273    case CCValAssign::AExt:
274      OutVal = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), OutVal);
275    default:
276      break;
277    }
278
279    // The custom bit on an i32 return value indicates that it should be passed
280    // in the high bits of the register.
281    if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {
282      OutVal = DAG.getNode(ISD::SHL, DL, MVT::i64, OutVal,
283                           DAG.getConstant(32, MVT::i32));
284
285      // The next value may go in the low bits of the same register.
286      // Handle both at once.
287      if (i+1 < RVLocs.size() && RVLocs[i+1].getLocReg() == VA.getLocReg()) {
288        SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, OutVals[i+1]);
289        OutVal = DAG.getNode(ISD::OR, DL, MVT::i64, OutVal, NV);
290        // Skip the next value, it's already done.
291        ++i;
292      }
293    }
294
295    Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVal, Flag);
296
297    // Guarantee that all emitted copies are stuck together with flags.
298    Flag = Chain.getValue(1);
299    RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
300  }
301
302  RetOps[0] = Chain;  // Update chain.
303
304  // Add the flag if we have it.
305  if (Flag.getNode())
306    RetOps.push_back(Flag);
307
308  return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other,
309                     &RetOps[0], RetOps.size());
310}
311
312SDValue SparcTargetLowering::
313LowerFormalArguments(SDValue Chain,
314                     CallingConv::ID CallConv,
315                     bool IsVarArg,
316                     const SmallVectorImpl<ISD::InputArg> &Ins,
317                     SDLoc DL,
318                     SelectionDAG &DAG,
319                     SmallVectorImpl<SDValue> &InVals) const {
320  if (Subtarget->is64Bit())
321    return LowerFormalArguments_64(Chain, CallConv, IsVarArg, Ins,
322                                   DL, DAG, InVals);
323  return LowerFormalArguments_32(Chain, CallConv, IsVarArg, Ins,
324                                 DL, DAG, InVals);
325}
326
327/// LowerFormalArguments32 - V8 uses a very simple ABI, where all values are
328/// passed in either one or two GPRs, including FP values.  TODO: we should
329/// pass FP values in FP registers for fastcc functions.
330SDValue SparcTargetLowering::
331LowerFormalArguments_32(SDValue Chain,
332                        CallingConv::ID CallConv,
333                        bool isVarArg,
334                        const SmallVectorImpl<ISD::InputArg> &Ins,
335                        SDLoc dl,
336                        SelectionDAG &DAG,
337                        SmallVectorImpl<SDValue> &InVals) const {
338  MachineFunction &MF = DAG.getMachineFunction();
339  MachineRegisterInfo &RegInfo = MF.getRegInfo();
340  SparcMachineFunctionInfo *FuncInfo = MF.getInfo<SparcMachineFunctionInfo>();
341
342  // Assign locations to all of the incoming arguments.
343  SmallVector<CCValAssign, 16> ArgLocs;
344  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
345                 getTargetMachine(), ArgLocs, *DAG.getContext());
346  CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc32);
347
348  const unsigned StackOffset = 92;
349
350  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
351    CCValAssign &VA = ArgLocs[i];
352
353    if (i == 0  && Ins[i].Flags.isSRet()) {
354      // Get SRet from [%fp+64].
355      int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, 64, true);
356      SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
357      SDValue Arg = DAG.getLoad(MVT::i32, dl, Chain, FIPtr,
358                                MachinePointerInfo(),
359                                false, false, false, 0);
360      InVals.push_back(Arg);
361      continue;
362    }
363
364    if (VA.isRegLoc()) {
365      if (VA.needsCustom()) {
366        assert(VA.getLocVT() == MVT::f64);
367        unsigned VRegHi = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
368        MF.getRegInfo().addLiveIn(VA.getLocReg(), VRegHi);
369        SDValue HiVal = DAG.getCopyFromReg(Chain, dl, VRegHi, MVT::i32);
370
371        assert(i+1 < e);
372        CCValAssign &NextVA = ArgLocs[++i];
373
374        SDValue LoVal;
375        if (NextVA.isMemLoc()) {
376          int FrameIdx = MF.getFrameInfo()->
377            CreateFixedObject(4, StackOffset+NextVA.getLocMemOffset(),true);
378          SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
379          LoVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr,
380                              MachinePointerInfo(),
381                              false, false, false, 0);
382        } else {
383          unsigned loReg = MF.addLiveIn(NextVA.getLocReg(),
384                                        &SP::IntRegsRegClass);
385          LoVal = DAG.getCopyFromReg(Chain, dl, loReg, MVT::i32);
386        }
387        SDValue WholeValue =
388          DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
389        WholeValue = DAG.getNode(ISD::BITCAST, dl, MVT::f64, WholeValue);
390        InVals.push_back(WholeValue);
391        continue;
392      }
393      unsigned VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
394      MF.getRegInfo().addLiveIn(VA.getLocReg(), VReg);
395      SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
396      if (VA.getLocVT() == MVT::f32)
397        Arg = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Arg);
398      else if (VA.getLocVT() != MVT::i32) {
399        Arg = DAG.getNode(ISD::AssertSext, dl, MVT::i32, Arg,
400                          DAG.getValueType(VA.getLocVT()));
401        Arg = DAG.getNode(ISD::TRUNCATE, dl, VA.getLocVT(), Arg);
402      }
403      InVals.push_back(Arg);
404      continue;
405    }
406
407    assert(VA.isMemLoc());
408
409    unsigned Offset = VA.getLocMemOffset()+StackOffset;
410
411    if (VA.needsCustom()) {
412      assert(VA.getValVT() == MVT::f64);
413      // If it is double-word aligned, just load.
414      if (Offset % 8 == 0) {
415        int FI = MF.getFrameInfo()->CreateFixedObject(8,
416                                                      Offset,
417                                                      true);
418        SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy());
419        SDValue Load = DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr,
420                                   MachinePointerInfo(),
421                                   false,false, false, 0);
422        InVals.push_back(Load);
423        continue;
424      }
425
426      int FI = MF.getFrameInfo()->CreateFixedObject(4,
427                                                    Offset,
428                                                    true);
429      SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy());
430      SDValue HiVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr,
431                                  MachinePointerInfo(),
432                                  false, false, false, 0);
433      int FI2 = MF.getFrameInfo()->CreateFixedObject(4,
434                                                     Offset+4,
435                                                     true);
436      SDValue FIPtr2 = DAG.getFrameIndex(FI2, getPointerTy());
437
438      SDValue LoVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr2,
439                                  MachinePointerInfo(),
440                                  false, false, false, 0);
441
442      SDValue WholeValue =
443        DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
444      WholeValue = DAG.getNode(ISD::BITCAST, dl, MVT::f64, WholeValue);
445      InVals.push_back(WholeValue);
446      continue;
447    }
448
449    int FI = MF.getFrameInfo()->CreateFixedObject(4,
450                                                  Offset,
451                                                  true);
452    SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy());
453    SDValue Load ;
454    if (VA.getValVT() == MVT::i32 || VA.getValVT() == MVT::f32) {
455      Load = DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr,
456                         MachinePointerInfo(),
457                         false, false, false, 0);
458    } else {
459      ISD::LoadExtType LoadOp = ISD::SEXTLOAD;
460      // Sparc is big endian, so add an offset based on the ObjectVT.
461      unsigned Offset = 4-std::max(1U, VA.getValVT().getSizeInBits()/8);
462      FIPtr = DAG.getNode(ISD::ADD, dl, MVT::i32, FIPtr,
463                          DAG.getConstant(Offset, MVT::i32));
464      Load = DAG.getExtLoad(LoadOp, dl, MVT::i32, Chain, FIPtr,
465                            MachinePointerInfo(),
466                            VA.getValVT(), false, false,0);
467      Load = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Load);
468    }
469    InVals.push_back(Load);
470  }
471
472  if (MF.getFunction()->hasStructRetAttr()) {
473    // Copy the SRet Argument to SRetReturnReg.
474    SparcMachineFunctionInfo *SFI = MF.getInfo<SparcMachineFunctionInfo>();
475    unsigned Reg = SFI->getSRetReturnReg();
476    if (!Reg) {
477      Reg = MF.getRegInfo().createVirtualRegister(&SP::IntRegsRegClass);
478      SFI->setSRetReturnReg(Reg);
479    }
480    SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]);
481    Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
482  }
483
484  // Store remaining ArgRegs to the stack if this is a varargs function.
485  if (isVarArg) {
486    static const uint16_t ArgRegs[] = {
487      SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
488    };
489    unsigned NumAllocated = CCInfo.getFirstUnallocated(ArgRegs, 6);
490    const uint16_t *CurArgReg = ArgRegs+NumAllocated, *ArgRegEnd = ArgRegs+6;
491    unsigned ArgOffset = CCInfo.getNextStackOffset();
492    if (NumAllocated == 6)
493      ArgOffset += StackOffset;
494    else {
495      assert(!ArgOffset);
496      ArgOffset = 68+4*NumAllocated;
497    }
498
499    // Remember the vararg offset for the va_start implementation.
500    FuncInfo->setVarArgsFrameOffset(ArgOffset);
501
502    std::vector<SDValue> OutChains;
503
504    for (; CurArgReg != ArgRegEnd; ++CurArgReg) {
505      unsigned VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
506      MF.getRegInfo().addLiveIn(*CurArgReg, VReg);
507      SDValue Arg = DAG.getCopyFromReg(DAG.getRoot(), dl, VReg, MVT::i32);
508
509      int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset,
510                                                          true);
511      SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
512
513      OutChains.push_back(DAG.getStore(DAG.getRoot(), dl, Arg, FIPtr,
514                                       MachinePointerInfo(),
515                                       false, false, 0));
516      ArgOffset += 4;
517    }
518
519    if (!OutChains.empty()) {
520      OutChains.push_back(Chain);
521      Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
522                          &OutChains[0], OutChains.size());
523    }
524  }
525
526  return Chain;
527}
528
529// Lower formal arguments for the 64 bit ABI.
530SDValue SparcTargetLowering::
531LowerFormalArguments_64(SDValue Chain,
532                        CallingConv::ID CallConv,
533                        bool IsVarArg,
534                        const SmallVectorImpl<ISD::InputArg> &Ins,
535                        SDLoc DL,
536                        SelectionDAG &DAG,
537                        SmallVectorImpl<SDValue> &InVals) const {
538  MachineFunction &MF = DAG.getMachineFunction();
539
540  // Analyze arguments according to CC_Sparc64.
541  SmallVector<CCValAssign, 16> ArgLocs;
542  CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(),
543                 getTargetMachine(), ArgLocs, *DAG.getContext());
544  CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc64);
545
546  // The argument array begins at %fp+BIAS+128, after the register save area.
547  const unsigned ArgArea = 128;
548
549  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
550    CCValAssign &VA = ArgLocs[i];
551    if (VA.isRegLoc()) {
552      // This argument is passed in a register.
553      // All integer register arguments are promoted by the caller to i64.
554
555      // Create a virtual register for the promoted live-in value.
556      unsigned VReg = MF.addLiveIn(VA.getLocReg(),
557                                   getRegClassFor(VA.getLocVT()));
558      SDValue Arg = DAG.getCopyFromReg(Chain, DL, VReg, VA.getLocVT());
559
560      // Get the high bits for i32 struct elements.
561      if (VA.getValVT() == MVT::i32 && VA.needsCustom())
562        Arg = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), Arg,
563                          DAG.getConstant(32, MVT::i32));
564
565      // The caller promoted the argument, so insert an Assert?ext SDNode so we
566      // won't promote the value again in this function.
567      switch (VA.getLocInfo()) {
568      case CCValAssign::SExt:
569        Arg = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Arg,
570                          DAG.getValueType(VA.getValVT()));
571        break;
572      case CCValAssign::ZExt:
573        Arg = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Arg,
574                          DAG.getValueType(VA.getValVT()));
575        break;
576      default:
577        break;
578      }
579
580      // Truncate the register down to the argument type.
581      if (VA.isExtInLoc())
582        Arg = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Arg);
583
584      InVals.push_back(Arg);
585      continue;
586    }
587
588    // The registers are exhausted. This argument was passed on the stack.
589    assert(VA.isMemLoc());
590    // The CC_Sparc64_Full/Half functions compute stack offsets relative to the
591    // beginning of the arguments area at %fp+BIAS+128.
592    unsigned Offset = VA.getLocMemOffset() + ArgArea;
593    unsigned ValSize = VA.getValVT().getSizeInBits() / 8;
594    // Adjust offset for extended arguments, SPARC is big-endian.
595    // The caller will have written the full slot with extended bytes, but we
596    // prefer our own extending loads.
597    if (VA.isExtInLoc())
598      Offset += 8 - ValSize;
599    int FI = MF.getFrameInfo()->CreateFixedObject(ValSize, Offset, true);
600    InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain,
601                                 DAG.getFrameIndex(FI, getPointerTy()),
602                                 MachinePointerInfo::getFixedStack(FI),
603                                 false, false, false, 0));
604  }
605
606  if (!IsVarArg)
607    return Chain;
608
609  // This function takes variable arguments, some of which may have been passed
610  // in registers %i0-%i5. Variable floating point arguments are never passed
611  // in floating point registers. They go on %i0-%i5 or on the stack like
612  // integer arguments.
613  //
614  // The va_start intrinsic needs to know the offset to the first variable
615  // argument.
616  unsigned ArgOffset = CCInfo.getNextStackOffset();
617  SparcMachineFunctionInfo *FuncInfo = MF.getInfo<SparcMachineFunctionInfo>();
618  // Skip the 128 bytes of register save area.
619  FuncInfo->setVarArgsFrameOffset(ArgOffset + ArgArea +
620                                  Subtarget->getStackPointerBias());
621
622  // Save the variable arguments that were passed in registers.
623  // The caller is required to reserve stack space for 6 arguments regardless
624  // of how many arguments were actually passed.
625  SmallVector<SDValue, 8> OutChains;
626  for (; ArgOffset < 6*8; ArgOffset += 8) {
627    unsigned VReg = MF.addLiveIn(SP::I0 + ArgOffset/8, &SP::I64RegsRegClass);
628    SDValue VArg = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64);
629    int FI = MF.getFrameInfo()->CreateFixedObject(8, ArgOffset + ArgArea, true);
630    OutChains.push_back(DAG.getStore(Chain, DL, VArg,
631                                     DAG.getFrameIndex(FI, getPointerTy()),
632                                     MachinePointerInfo::getFixedStack(FI),
633                                     false, false, 0));
634  }
635
636  if (!OutChains.empty())
637    Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
638                        &OutChains[0], OutChains.size());
639
640  return Chain;
641}
642
643SDValue
644SparcTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
645                               SmallVectorImpl<SDValue> &InVals) const {
646  if (Subtarget->is64Bit())
647    return LowerCall_64(CLI, InVals);
648  return LowerCall_32(CLI, InVals);
649}
650
651// Lower a call for the 32-bit ABI.
652SDValue
653SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI,
654                                  SmallVectorImpl<SDValue> &InVals) const {
655  SelectionDAG &DAG                     = CLI.DAG;
656  SDLoc &dl                             = CLI.DL;
657  SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
658  SmallVectorImpl<SDValue> &OutVals     = CLI.OutVals;
659  SmallVectorImpl<ISD::InputArg> &Ins   = CLI.Ins;
660  SDValue Chain                         = CLI.Chain;
661  SDValue Callee                        = CLI.Callee;
662  bool &isTailCall                      = CLI.IsTailCall;
663  CallingConv::ID CallConv              = CLI.CallConv;
664  bool isVarArg                         = CLI.IsVarArg;
665
666  // Sparc target does not yet support tail call optimization.
667  isTailCall = false;
668
669  // Analyze operands of the call, assigning locations to each operand.
670  SmallVector<CCValAssign, 16> ArgLocs;
671  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
672                 DAG.getTarget(), ArgLocs, *DAG.getContext());
673  CCInfo.AnalyzeCallOperands(Outs, CC_Sparc32);
674
675  // Get the size of the outgoing arguments stack space requirement.
676  unsigned ArgsSize = CCInfo.getNextStackOffset();
677
678  // Keep stack frames 8-byte aligned.
679  ArgsSize = (ArgsSize+7) & ~7;
680
681  MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
682
683  // Create local copies for byval args.
684  SmallVector<SDValue, 8> ByValArgs;
685  for (unsigned i = 0,  e = Outs.size(); i != e; ++i) {
686    ISD::ArgFlagsTy Flags = Outs[i].Flags;
687    if (!Flags.isByVal())
688      continue;
689
690    SDValue Arg = OutVals[i];
691    unsigned Size = Flags.getByValSize();
692    unsigned Align = Flags.getByValAlign();
693
694    int FI = MFI->CreateStackObject(Size, Align, false);
695    SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy());
696    SDValue SizeNode = DAG.getConstant(Size, MVT::i32);
697
698    Chain = DAG.getMemcpy(Chain, dl, FIPtr, Arg, SizeNode, Align,
699                          false,        // isVolatile,
700                          (Size <= 32), // AlwaysInline if size <= 32
701                          MachinePointerInfo(), MachinePointerInfo());
702    ByValArgs.push_back(FIPtr);
703  }
704
705  Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(ArgsSize, true),
706                               dl);
707
708  SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
709  SmallVector<SDValue, 8> MemOpChains;
710
711  const unsigned StackOffset = 92;
712  bool hasStructRetAttr = false;
713  // Walk the register/memloc assignments, inserting copies/loads.
714  for (unsigned i = 0, realArgIdx = 0, byvalArgIdx = 0, e = ArgLocs.size();
715       i != e;
716       ++i, ++realArgIdx) {
717    CCValAssign &VA = ArgLocs[i];
718    SDValue Arg = OutVals[realArgIdx];
719
720    ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
721
722    // Use local copy if it is a byval arg.
723    if (Flags.isByVal())
724      Arg = ByValArgs[byvalArgIdx++];
725
726    // Promote the value if needed.
727    switch (VA.getLocInfo()) {
728    default: llvm_unreachable("Unknown loc info!");
729    case CCValAssign::Full: break;
730    case CCValAssign::SExt:
731      Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
732      break;
733    case CCValAssign::ZExt:
734      Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
735      break;
736    case CCValAssign::AExt:
737      Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
738      break;
739    case CCValAssign::BCvt:
740      Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
741      break;
742    }
743
744    if (Flags.isSRet()) {
745      assert(VA.needsCustom());
746      // store SRet argument in %sp+64
747      SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
748      SDValue PtrOff = DAG.getIntPtrConstant(64);
749      PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
750      MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
751                                         MachinePointerInfo(),
752                                         false, false, 0));
753      hasStructRetAttr = true;
754      continue;
755    }
756
757    if (VA.needsCustom()) {
758      assert(VA.getLocVT() == MVT::f64);
759
760      if (VA.isMemLoc()) {
761        unsigned Offset = VA.getLocMemOffset() + StackOffset;
762        // if it is double-word aligned, just store.
763        if (Offset % 8 == 0) {
764          SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
765          SDValue PtrOff = DAG.getIntPtrConstant(Offset);
766          PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
767          MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
768                                             MachinePointerInfo(),
769                                             false, false, 0));
770          continue;
771        }
772      }
773
774      SDValue StackPtr = DAG.CreateStackTemporary(MVT::f64, MVT::i32);
775      SDValue Store = DAG.getStore(DAG.getEntryNode(), dl,
776                                   Arg, StackPtr, MachinePointerInfo(),
777                                   false, false, 0);
778      // Sparc is big-endian, so the high part comes first.
779      SDValue Hi = DAG.getLoad(MVT::i32, dl, Store, StackPtr,
780                               MachinePointerInfo(), false, false, false, 0);
781      // Increment the pointer to the other half.
782      StackPtr = DAG.getNode(ISD::ADD, dl, StackPtr.getValueType(), StackPtr,
783                             DAG.getIntPtrConstant(4));
784      // Load the low part.
785      SDValue Lo = DAG.getLoad(MVT::i32, dl, Store, StackPtr,
786                               MachinePointerInfo(), false, false, false, 0);
787
788      if (VA.isRegLoc()) {
789        RegsToPass.push_back(std::make_pair(VA.getLocReg(), Hi));
790        assert(i+1 != e);
791        CCValAssign &NextVA = ArgLocs[++i];
792        if (NextVA.isRegLoc()) {
793          RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), Lo));
794        } else {
795          // Store the low part in stack.
796          unsigned Offset = NextVA.getLocMemOffset() + StackOffset;
797          SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
798          SDValue PtrOff = DAG.getIntPtrConstant(Offset);
799          PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
800          MemOpChains.push_back(DAG.getStore(Chain, dl, Lo, PtrOff,
801                                             MachinePointerInfo(),
802                                             false, false, 0));
803        }
804      } else {
805        unsigned Offset = VA.getLocMemOffset() + StackOffset;
806        // Store the high part.
807        SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
808        SDValue PtrOff = DAG.getIntPtrConstant(Offset);
809        PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
810        MemOpChains.push_back(DAG.getStore(Chain, dl, Hi, PtrOff,
811                                           MachinePointerInfo(),
812                                           false, false, 0));
813        // Store the low part.
814        PtrOff = DAG.getIntPtrConstant(Offset+4);
815        PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
816        MemOpChains.push_back(DAG.getStore(Chain, dl, Lo, PtrOff,
817                                           MachinePointerInfo(),
818                                           false, false, 0));
819      }
820      continue;
821    }
822
823    // Arguments that can be passed on register must be kept at
824    // RegsToPass vector
825    if (VA.isRegLoc()) {
826      if (VA.getLocVT() != MVT::f32) {
827        RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
828        continue;
829      }
830      Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
831      RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
832      continue;
833    }
834
835    assert(VA.isMemLoc());
836
837    // Create a store off the stack pointer for this argument.
838    SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
839    SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset()+StackOffset);
840    PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
841    MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
842                                       MachinePointerInfo(),
843                                       false, false, 0));
844  }
845
846
847  // Emit all stores, make sure the occur before any copies into physregs.
848  if (!MemOpChains.empty())
849    Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
850                        &MemOpChains[0], MemOpChains.size());
851
852  // Build a sequence of copy-to-reg nodes chained together with token
853  // chain and flag operands which copy the outgoing args into registers.
854  // The InFlag in necessary since all emitted instructions must be
855  // stuck together.
856  SDValue InFlag;
857  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
858    unsigned Reg = toCallerWindow(RegsToPass[i].first);
859    Chain = DAG.getCopyToReg(Chain, dl, Reg, RegsToPass[i].second, InFlag);
860    InFlag = Chain.getValue(1);
861  }
862
863  unsigned SRetArgSize = (hasStructRetAttr)? getSRetArgSize(DAG, Callee):0;
864
865  // If the callee is a GlobalAddress node (quite common, every direct call is)
866  // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
867  // Likewise ExternalSymbol -> TargetExternalSymbol.
868  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
869    Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32);
870  else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
871    Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32);
872
873  // Returns a chain & a flag for retval copy to use
874  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
875  SmallVector<SDValue, 8> Ops;
876  Ops.push_back(Chain);
877  Ops.push_back(Callee);
878  if (hasStructRetAttr)
879    Ops.push_back(DAG.getTargetConstant(SRetArgSize, MVT::i32));
880  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
881    Ops.push_back(DAG.getRegister(toCallerWindow(RegsToPass[i].first),
882                                  RegsToPass[i].second.getValueType()));
883
884  // Add a register mask operand representing the call-preserved registers.
885  const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
886  const uint32_t *Mask = TRI->getCallPreservedMask(CallConv);
887  assert(Mask && "Missing call preserved mask for calling convention");
888  Ops.push_back(DAG.getRegisterMask(Mask));
889
890  if (InFlag.getNode())
891    Ops.push_back(InFlag);
892
893  Chain = DAG.getNode(SPISD::CALL, dl, NodeTys, &Ops[0], Ops.size());
894  InFlag = Chain.getValue(1);
895
896  Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(ArgsSize, true),
897                             DAG.getIntPtrConstant(0, true), InFlag, dl);
898  InFlag = Chain.getValue(1);
899
900  // Assign locations to each value returned by this call.
901  SmallVector<CCValAssign, 16> RVLocs;
902  CCState RVInfo(CallConv, isVarArg, DAG.getMachineFunction(),
903                 DAG.getTarget(), RVLocs, *DAG.getContext());
904
905  RVInfo.AnalyzeCallResult(Ins, RetCC_Sparc32);
906
907  // Copy all of the result registers out of their specified physreg.
908  for (unsigned i = 0; i != RVLocs.size(); ++i) {
909    Chain = DAG.getCopyFromReg(Chain, dl, toCallerWindow(RVLocs[i].getLocReg()),
910                               RVLocs[i].getValVT(), InFlag).getValue(1);
911    InFlag = Chain.getValue(2);
912    InVals.push_back(Chain.getValue(0));
913  }
914
915  return Chain;
916}
917
918// This functions returns true if CalleeName is a ABI function that returns
919// a long double (fp128).
920static bool isFP128ABICall(const char *CalleeName)
921{
922  static const char *const ABICalls[] =
923    {  "_Q_add", "_Q_sub", "_Q_mul", "_Q_div",
924       "_Q_sqrt", "_Q_neg",
925       "_Q_itoq", "_Q_stoq", "_Q_dtoq", "_Q_utoq",
926       0
927    };
928  for (const char * const *I = ABICalls; I != 0; ++I)
929    if (strcmp(CalleeName, *I) == 0)
930      return true;
931  return false;
932}
933
934unsigned
935SparcTargetLowering::getSRetArgSize(SelectionDAG &DAG, SDValue Callee) const
936{
937  const Function *CalleeFn = 0;
938  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
939    CalleeFn = dyn_cast<Function>(G->getGlobal());
940  } else if (ExternalSymbolSDNode *E =
941             dyn_cast<ExternalSymbolSDNode>(Callee)) {
942    const Function *Fn = DAG.getMachineFunction().getFunction();
943    const Module *M = Fn->getParent();
944    const char *CalleeName = E->getSymbol();
945    CalleeFn = M->getFunction(CalleeName);
946    if (!CalleeFn && isFP128ABICall(CalleeName))
947      return 16; // Return sizeof(fp128)
948  }
949
950  if (!CalleeFn)
951    return 0;
952
953  assert(CalleeFn->hasStructRetAttr() &&
954         "Callee does not have the StructRet attribute.");
955
956  PointerType *Ty = cast<PointerType>(CalleeFn->arg_begin()->getType());
957  Type *ElementTy = Ty->getElementType();
958  return getDataLayout()->getTypeAllocSize(ElementTy);
959}
960
961
962// Fixup floating point arguments in the ... part of a varargs call.
963//
964// The SPARC v9 ABI requires that floating point arguments are treated the same
965// as integers when calling a varargs function. This does not apply to the
966// fixed arguments that are part of the function's prototype.
967//
968// This function post-processes a CCValAssign array created by
969// AnalyzeCallOperands().
970static void fixupVariableFloatArgs(SmallVectorImpl<CCValAssign> &ArgLocs,
971                                   ArrayRef<ISD::OutputArg> Outs) {
972  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
973    const CCValAssign &VA = ArgLocs[i];
974    // FIXME: What about f32 arguments? C promotes them to f64 when calling
975    // varargs functions.
976    if (!VA.isRegLoc() || VA.getLocVT() != MVT::f64)
977      continue;
978    // The fixed arguments to a varargs function still go in FP registers.
979    if (Outs[VA.getValNo()].IsFixed)
980      continue;
981
982    // This floating point argument should be reassigned.
983    CCValAssign NewVA;
984
985    // Determine the offset into the argument array.
986    unsigned Offset = 8 * (VA.getLocReg() - SP::D0);
987    assert(Offset < 16*8 && "Offset out of range, bad register enum?");
988
989    if (Offset < 6*8) {
990      // This argument should go in %i0-%i5.
991      unsigned IReg = SP::I0 + Offset/8;
992      // Full register, just bitconvert into i64.
993      NewVA = CCValAssign::getReg(VA.getValNo(), VA.getValVT(),
994                                  IReg, MVT::i64, CCValAssign::BCvt);
995    } else {
996      // This needs to go to memory, we're out of integer registers.
997      NewVA = CCValAssign::getMem(VA.getValNo(), VA.getValVT(),
998                                  Offset, VA.getLocVT(), VA.getLocInfo());
999    }
1000    ArgLocs[i] = NewVA;
1001  }
1002}
1003
1004// Lower a call for the 64-bit ABI.
1005SDValue
1006SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI,
1007                                  SmallVectorImpl<SDValue> &InVals) const {
1008  SelectionDAG &DAG = CLI.DAG;
1009  SDLoc DL = CLI.DL;
1010  SDValue Chain = CLI.Chain;
1011
1012  // Analyze operands of the call, assigning locations to each operand.
1013  SmallVector<CCValAssign, 16> ArgLocs;
1014  CCState CCInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(),
1015                 DAG.getTarget(), ArgLocs, *DAG.getContext());
1016  CCInfo.AnalyzeCallOperands(CLI.Outs, CC_Sparc64);
1017
1018  // Get the size of the outgoing arguments stack space requirement.
1019  // The stack offset computed by CC_Sparc64 includes all arguments.
1020  // Called functions expect 6 argument words to exist in the stack frame, used
1021  // or not.
1022  unsigned ArgsSize = std::max(6*8u, CCInfo.getNextStackOffset());
1023
1024  // Keep stack frames 16-byte aligned.
1025  ArgsSize = RoundUpToAlignment(ArgsSize, 16);
1026
1027  // Varargs calls require special treatment.
1028  if (CLI.IsVarArg)
1029    fixupVariableFloatArgs(ArgLocs, CLI.Outs);
1030
1031  // Adjust the stack pointer to make room for the arguments.
1032  // FIXME: Use hasReservedCallFrame to avoid %sp adjustments around all calls
1033  // with more than 6 arguments.
1034  Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(ArgsSize, true),
1035                               DL);
1036
1037  // Collect the set of registers to pass to the function and their values.
1038  // This will be emitted as a sequence of CopyToReg nodes glued to the call
1039  // instruction.
1040  SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
1041
1042  // Collect chains from all the memory opeations that copy arguments to the
1043  // stack. They must follow the stack pointer adjustment above and precede the
1044  // call instruction itself.
1045  SmallVector<SDValue, 8> MemOpChains;
1046
1047  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1048    const CCValAssign &VA = ArgLocs[i];
1049    SDValue Arg = CLI.OutVals[i];
1050
1051    // Promote the value if needed.
1052    switch (VA.getLocInfo()) {
1053    default:
1054      llvm_unreachable("Unknown location info!");
1055    case CCValAssign::Full:
1056      break;
1057    case CCValAssign::SExt:
1058      Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
1059      break;
1060    case CCValAssign::ZExt:
1061      Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
1062      break;
1063    case CCValAssign::AExt:
1064      Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
1065      break;
1066    case CCValAssign::BCvt:
1067      Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
1068      break;
1069    }
1070
1071    if (VA.isRegLoc()) {
1072      // The custom bit on an i32 return value indicates that it should be
1073      // passed in the high bits of the register.
1074      if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {
1075        Arg = DAG.getNode(ISD::SHL, DL, MVT::i64, Arg,
1076                          DAG.getConstant(32, MVT::i32));
1077
1078        // The next value may go in the low bits of the same register.
1079        // Handle both at once.
1080        if (i+1 < ArgLocs.size() && ArgLocs[i+1].isRegLoc() &&
1081            ArgLocs[i+1].getLocReg() == VA.getLocReg()) {
1082          SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64,
1083                                   CLI.OutVals[i+1]);
1084          Arg = DAG.getNode(ISD::OR, DL, MVT::i64, Arg, NV);
1085          // Skip the next value, it's already done.
1086          ++i;
1087        }
1088      }
1089      RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()), Arg));
1090      continue;
1091    }
1092
1093    assert(VA.isMemLoc());
1094
1095    // Create a store off the stack pointer for this argument.
1096    SDValue StackPtr = DAG.getRegister(SP::O6, getPointerTy());
1097    // The argument area starts at %fp+BIAS+128 in the callee frame,
1098    // %sp+BIAS+128 in ours.
1099    SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset() +
1100                                           Subtarget->getStackPointerBias() +
1101                                           128);
1102    PtrOff = DAG.getNode(ISD::ADD, DL, getPointerTy(), StackPtr, PtrOff);
1103    MemOpChains.push_back(DAG.getStore(Chain, DL, Arg, PtrOff,
1104                                       MachinePointerInfo(),
1105                                       false, false, 0));
1106  }
1107
1108  // Emit all stores, make sure they occur before the call.
1109  if (!MemOpChains.empty())
1110    Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
1111                        &MemOpChains[0], MemOpChains.size());
1112
1113  // Build a sequence of CopyToReg nodes glued together with token chain and
1114  // glue operands which copy the outgoing args into registers. The InGlue is
1115  // necessary since all emitted instructions must be stuck together in order
1116  // to pass the live physical registers.
1117  SDValue InGlue;
1118  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1119    Chain = DAG.getCopyToReg(Chain, DL,
1120                             RegsToPass[i].first, RegsToPass[i].second, InGlue);
1121    InGlue = Chain.getValue(1);
1122  }
1123
1124  // If the callee is a GlobalAddress node (quite common, every direct call is)
1125  // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1126  // Likewise ExternalSymbol -> TargetExternalSymbol.
1127  SDValue Callee = CLI.Callee;
1128  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1129    Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, getPointerTy());
1130  else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
1131    Callee = DAG.getTargetExternalSymbol(E->getSymbol(), getPointerTy());
1132
1133  // Build the operands for the call instruction itself.
1134  SmallVector<SDValue, 8> Ops;
1135  Ops.push_back(Chain);
1136  Ops.push_back(Callee);
1137  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1138    Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1139                                  RegsToPass[i].second.getValueType()));
1140
1141  // Add a register mask operand representing the call-preserved registers.
1142  const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
1143  const uint32_t *Mask = TRI->getCallPreservedMask(CLI.CallConv);
1144  assert(Mask && "Missing call preserved mask for calling convention");
1145  Ops.push_back(DAG.getRegisterMask(Mask));
1146
1147  // Make sure the CopyToReg nodes are glued to the call instruction which
1148  // consumes the registers.
1149  if (InGlue.getNode())
1150    Ops.push_back(InGlue);
1151
1152  // Now the call itself.
1153  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1154  Chain = DAG.getNode(SPISD::CALL, DL, NodeTys, &Ops[0], Ops.size());
1155  InGlue = Chain.getValue(1);
1156
1157  // Revert the stack pointer immediately after the call.
1158  Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(ArgsSize, true),
1159                             DAG.getIntPtrConstant(0, true), InGlue, DL);
1160  InGlue = Chain.getValue(1);
1161
1162  // Now extract the return values. This is more or less the same as
1163  // LowerFormalArguments_64.
1164
1165  // Assign locations to each value returned by this call.
1166  SmallVector<CCValAssign, 16> RVLocs;
1167  CCState RVInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(),
1168                 DAG.getTarget(), RVLocs, *DAG.getContext());
1169  RVInfo.AnalyzeCallResult(CLI.Ins, CC_Sparc64);
1170
1171  // Copy all of the result registers out of their specified physreg.
1172  for (unsigned i = 0; i != RVLocs.size(); ++i) {
1173    CCValAssign &VA = RVLocs[i];
1174    unsigned Reg = toCallerWindow(VA.getLocReg());
1175
1176    // When returning 'inreg {i32, i32 }', two consecutive i32 arguments can
1177    // reside in the same register in the high and low bits. Reuse the
1178    // CopyFromReg previous node to avoid duplicate copies.
1179    SDValue RV;
1180    if (RegisterSDNode *SrcReg = dyn_cast<RegisterSDNode>(Chain.getOperand(1)))
1181      if (SrcReg->getReg() == Reg && Chain->getOpcode() == ISD::CopyFromReg)
1182        RV = Chain.getValue(0);
1183
1184    // But usually we'll create a new CopyFromReg for a different register.
1185    if (!RV.getNode()) {
1186      RV = DAG.getCopyFromReg(Chain, DL, Reg, RVLocs[i].getLocVT(), InGlue);
1187      Chain = RV.getValue(1);
1188      InGlue = Chain.getValue(2);
1189    }
1190
1191    // Get the high bits for i32 struct elements.
1192    if (VA.getValVT() == MVT::i32 && VA.needsCustom())
1193      RV = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), RV,
1194                       DAG.getConstant(32, MVT::i32));
1195
1196    // The callee promoted the return value, so insert an Assert?ext SDNode so
1197    // we won't promote the value again in this function.
1198    switch (VA.getLocInfo()) {
1199    case CCValAssign::SExt:
1200      RV = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), RV,
1201                       DAG.getValueType(VA.getValVT()));
1202      break;
1203    case CCValAssign::ZExt:
1204      RV = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), RV,
1205                       DAG.getValueType(VA.getValVT()));
1206      break;
1207    default:
1208      break;
1209    }
1210
1211    // Truncate the register down to the return value type.
1212    if (VA.isExtInLoc())
1213      RV = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), RV);
1214
1215    InVals.push_back(RV);
1216  }
1217
1218  return Chain;
1219}
1220
1221//===----------------------------------------------------------------------===//
1222// TargetLowering Implementation
1223//===----------------------------------------------------------------------===//
1224
1225/// IntCondCCodeToICC - Convert a DAG integer condition code to a SPARC ICC
1226/// condition.
1227static SPCC::CondCodes IntCondCCodeToICC(ISD::CondCode CC) {
1228  switch (CC) {
1229  default: llvm_unreachable("Unknown integer condition code!");
1230  case ISD::SETEQ:  return SPCC::ICC_E;
1231  case ISD::SETNE:  return SPCC::ICC_NE;
1232  case ISD::SETLT:  return SPCC::ICC_L;
1233  case ISD::SETGT:  return SPCC::ICC_G;
1234  case ISD::SETLE:  return SPCC::ICC_LE;
1235  case ISD::SETGE:  return SPCC::ICC_GE;
1236  case ISD::SETULT: return SPCC::ICC_CS;
1237  case ISD::SETULE: return SPCC::ICC_LEU;
1238  case ISD::SETUGT: return SPCC::ICC_GU;
1239  case ISD::SETUGE: return SPCC::ICC_CC;
1240  }
1241}
1242
1243/// FPCondCCodeToFCC - Convert a DAG floatingp oint condition code to a SPARC
1244/// FCC condition.
1245static SPCC::CondCodes FPCondCCodeToFCC(ISD::CondCode CC) {
1246  switch (CC) {
1247  default: llvm_unreachable("Unknown fp condition code!");
1248  case ISD::SETEQ:
1249  case ISD::SETOEQ: return SPCC::FCC_E;
1250  case ISD::SETNE:
1251  case ISD::SETUNE: return SPCC::FCC_NE;
1252  case ISD::SETLT:
1253  case ISD::SETOLT: return SPCC::FCC_L;
1254  case ISD::SETGT:
1255  case ISD::SETOGT: return SPCC::FCC_G;
1256  case ISD::SETLE:
1257  case ISD::SETOLE: return SPCC::FCC_LE;
1258  case ISD::SETGE:
1259  case ISD::SETOGE: return SPCC::FCC_GE;
1260  case ISD::SETULT: return SPCC::FCC_UL;
1261  case ISD::SETULE: return SPCC::FCC_ULE;
1262  case ISD::SETUGT: return SPCC::FCC_UG;
1263  case ISD::SETUGE: return SPCC::FCC_UGE;
1264  case ISD::SETUO:  return SPCC::FCC_U;
1265  case ISD::SETO:   return SPCC::FCC_O;
1266  case ISD::SETONE: return SPCC::FCC_LG;
1267  case ISD::SETUEQ: return SPCC::FCC_UE;
1268  }
1269}
1270
1271SparcTargetLowering::SparcTargetLowering(TargetMachine &TM)
1272  : TargetLowering(TM, new TargetLoweringObjectFileELF()) {
1273  Subtarget = &TM.getSubtarget<SparcSubtarget>();
1274
1275  // Set up the register classes.
1276  addRegisterClass(MVT::i32, &SP::IntRegsRegClass);
1277  addRegisterClass(MVT::f32, &SP::FPRegsRegClass);
1278  addRegisterClass(MVT::f64, &SP::DFPRegsRegClass);
1279  addRegisterClass(MVT::f128, &SP::QFPRegsRegClass);
1280  if (Subtarget->is64Bit())
1281    addRegisterClass(MVT::i64, &SP::I64RegsRegClass);
1282
1283  // Turn FP extload into load/fextend
1284  setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
1285  setLoadExtAction(ISD::EXTLOAD, MVT::f64, Expand);
1286
1287  // Sparc doesn't have i1 sign extending load
1288  setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
1289
1290  // Turn FP truncstore into trunc + store.
1291  setTruncStoreAction(MVT::f64, MVT::f32, Expand);
1292  setTruncStoreAction(MVT::f128, MVT::f32, Expand);
1293  setTruncStoreAction(MVT::f128, MVT::f64, Expand);
1294
1295  // Custom legalize GlobalAddress nodes into LO/HI parts.
1296  setOperationAction(ISD::GlobalAddress, getPointerTy(), Custom);
1297  setOperationAction(ISD::GlobalTLSAddress, getPointerTy(), Custom);
1298  setOperationAction(ISD::ConstantPool, getPointerTy(), Custom);
1299  setOperationAction(ISD::BlockAddress, getPointerTy(), Custom);
1300
1301  // Sparc doesn't have sext_inreg, replace them with shl/sra
1302  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
1303  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Expand);
1304  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
1305
1306  // Sparc has no REM or DIVREM operations.
1307  setOperationAction(ISD::UREM, MVT::i32, Expand);
1308  setOperationAction(ISD::SREM, MVT::i32, Expand);
1309  setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
1310  setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
1311
1312  // Custom expand fp<->sint
1313  setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
1314  setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
1315
1316  // Expand fp<->uint
1317  setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
1318  setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
1319
1320  setOperationAction(ISD::BITCAST, MVT::f32, Expand);
1321  setOperationAction(ISD::BITCAST, MVT::i32, Expand);
1322
1323  // Sparc has no select or setcc: expand to SELECT_CC.
1324  setOperationAction(ISD::SELECT, MVT::i32, Expand);
1325  setOperationAction(ISD::SELECT, MVT::f32, Expand);
1326  setOperationAction(ISD::SELECT, MVT::f64, Expand);
1327  setOperationAction(ISD::SELECT, MVT::f128, Expand);
1328
1329  setOperationAction(ISD::SETCC, MVT::i32, Expand);
1330  setOperationAction(ISD::SETCC, MVT::f32, Expand);
1331  setOperationAction(ISD::SETCC, MVT::f64, Expand);
1332  setOperationAction(ISD::SETCC, MVT::f128, Expand);
1333
1334  // Sparc doesn't have BRCOND either, it has BR_CC.
1335  setOperationAction(ISD::BRCOND, MVT::Other, Expand);
1336  setOperationAction(ISD::BRIND, MVT::Other, Expand);
1337  setOperationAction(ISD::BR_JT, MVT::Other, Expand);
1338  setOperationAction(ISD::BR_CC, MVT::i32, Custom);
1339  setOperationAction(ISD::BR_CC, MVT::f32, Custom);
1340  setOperationAction(ISD::BR_CC, MVT::f64, Custom);
1341  setOperationAction(ISD::BR_CC, MVT::f128, Custom);
1342
1343  setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
1344  setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
1345  setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
1346  setOperationAction(ISD::SELECT_CC, MVT::f128, Custom);
1347
1348  if (Subtarget->is64Bit()) {
1349    setOperationAction(ISD::BITCAST, MVT::f64, Expand);
1350    setOperationAction(ISD::BITCAST, MVT::i64, Expand);
1351    setOperationAction(ISD::SELECT, MVT::i64, Expand);
1352    setOperationAction(ISD::SETCC, MVT::i64, Expand);
1353    setOperationAction(ISD::BR_CC, MVT::i64, Custom);
1354    setOperationAction(ISD::SELECT_CC, MVT::i64, Custom);
1355  }
1356
1357  // FIXME: There are instructions available for ATOMIC_FENCE
1358  // on SparcV8 and later.
1359  setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Expand);
1360
1361  if (!Subtarget->isV9()) {
1362    // SparcV8 does not have FNEGD and FABSD.
1363    setOperationAction(ISD::FNEG, MVT::f64, Custom);
1364    setOperationAction(ISD::FABS, MVT::f64, Custom);
1365  }
1366
1367  setOperationAction(ISD::FSIN , MVT::f128, Expand);
1368  setOperationAction(ISD::FCOS , MVT::f128, Expand);
1369  setOperationAction(ISD::FSINCOS, MVT::f128, Expand);
1370  setOperationAction(ISD::FREM , MVT::f128, Expand);
1371  setOperationAction(ISD::FMA  , MVT::f128, Expand);
1372  setOperationAction(ISD::FSIN , MVT::f64, Expand);
1373  setOperationAction(ISD::FCOS , MVT::f64, Expand);
1374  setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
1375  setOperationAction(ISD::FREM , MVT::f64, Expand);
1376  setOperationAction(ISD::FMA  , MVT::f64, Expand);
1377  setOperationAction(ISD::FSIN , MVT::f32, Expand);
1378  setOperationAction(ISD::FCOS , MVT::f32, Expand);
1379  setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
1380  setOperationAction(ISD::FREM , MVT::f32, Expand);
1381  setOperationAction(ISD::FMA  , MVT::f32, Expand);
1382  setOperationAction(ISD::CTPOP, MVT::i32, Expand);
1383  setOperationAction(ISD::CTTZ , MVT::i32, Expand);
1384  setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand);
1385  setOperationAction(ISD::CTLZ , MVT::i32, Expand);
1386  setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand);
1387  setOperationAction(ISD::ROTL , MVT::i32, Expand);
1388  setOperationAction(ISD::ROTR , MVT::i32, Expand);
1389  setOperationAction(ISD::BSWAP, MVT::i32, Expand);
1390  setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand);
1391  setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
1392  setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
1393  setOperationAction(ISD::FPOW , MVT::f128, Expand);
1394  setOperationAction(ISD::FPOW , MVT::f64, Expand);
1395  setOperationAction(ISD::FPOW , MVT::f32, Expand);
1396
1397  setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);
1398  setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);
1399  setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand);
1400
1401  // FIXME: Sparc provides these multiplies, but we don't have them yet.
1402  setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
1403  setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
1404
1405  setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
1406
1407  // VASTART needs to be custom lowered to use the VarArgsFrameIndex.
1408  setOperationAction(ISD::VASTART           , MVT::Other, Custom);
1409  // VAARG needs to be lowered to not do unaligned accesses for doubles.
1410  setOperationAction(ISD::VAARG             , MVT::Other, Custom);
1411
1412  // Use the default implementation.
1413  setOperationAction(ISD::VACOPY            , MVT::Other, Expand);
1414  setOperationAction(ISD::VAEND             , MVT::Other, Expand);
1415  setOperationAction(ISD::STACKSAVE         , MVT::Other, Expand);
1416  setOperationAction(ISD::STACKRESTORE      , MVT::Other, Expand);
1417  setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32  , Custom);
1418
1419  // No debug info support yet.
1420  setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
1421
1422  setStackPointerRegisterToSaveRestore(SP::O6);
1423
1424  if (Subtarget->isV9())
1425    setOperationAction(ISD::CTPOP, MVT::i32, Legal);
1426
1427  if (Subtarget->isV9() && Subtarget->hasHardQuad()) {
1428    setOperationAction(ISD::LOAD, MVT::f128, Legal);
1429    setOperationAction(ISD::STORE, MVT::f128, Legal);
1430  } else {
1431    setOperationAction(ISD::LOAD, MVT::f128, Custom);
1432    setOperationAction(ISD::STORE, MVT::f128, Custom);
1433  }
1434
1435  if (Subtarget->hasHardQuad()) {
1436    setOperationAction(ISD::FADD,  MVT::f128, Legal);
1437    setOperationAction(ISD::FSUB,  MVT::f128, Legal);
1438    setOperationAction(ISD::FMUL,  MVT::f128, Legal);
1439    setOperationAction(ISD::FDIV,  MVT::f128, Legal);
1440    setOperationAction(ISD::FSQRT, MVT::f128, Legal);
1441    setOperationAction(ISD::FP_EXTEND, MVT::f128, Legal);
1442    setOperationAction(ISD::FP_ROUND,  MVT::f64, Legal);
1443    if (Subtarget->isV9()) {
1444      setOperationAction(ISD::FNEG, MVT::f128, Legal);
1445      setOperationAction(ISD::FABS, MVT::f128, Legal);
1446    } else {
1447      setOperationAction(ISD::FNEG, MVT::f128, Custom);
1448      setOperationAction(ISD::FABS, MVT::f128, Custom);
1449    }
1450  } else {
1451    // Custom legalize f128 operations.
1452
1453    setOperationAction(ISD::FADD,  MVT::f128, Custom);
1454    setOperationAction(ISD::FSUB,  MVT::f128, Custom);
1455    setOperationAction(ISD::FMUL,  MVT::f128, Custom);
1456    setOperationAction(ISD::FDIV,  MVT::f128, Custom);
1457    setOperationAction(ISD::FSQRT, MVT::f128, Custom);
1458    setOperationAction(ISD::FNEG,  MVT::f128, Custom);
1459    setOperationAction(ISD::FABS,  MVT::f128, Custom);
1460
1461    setOperationAction(ISD::FP_EXTEND, MVT::f128, Custom);
1462    setOperationAction(ISD::FP_ROUND,  MVT::f64, Custom);
1463    setOperationAction(ISD::FP_ROUND,  MVT::f32, Custom);
1464
1465    // Setup Runtime library names.
1466    if (Subtarget->is64Bit()) {
1467      setLibcallName(RTLIB::ADD_F128,  "_Qp_add");
1468      setLibcallName(RTLIB::SUB_F128,  "_Qp_sub");
1469      setLibcallName(RTLIB::MUL_F128,  "_Qp_mul");
1470      setLibcallName(RTLIB::DIV_F128,  "_Qp_div");
1471      setLibcallName(RTLIB::SQRT_F128, "_Qp_sqrt");
1472      setLibcallName(RTLIB::FPTOSINT_F128_I32, "_Qp_qtoi");
1473      setLibcallName(RTLIB::SINTTOFP_I32_F128, "_Qp_itoq");
1474      setLibcallName(RTLIB::FPEXT_F32_F128, "_Qp_stoq");
1475      setLibcallName(RTLIB::FPEXT_F64_F128, "_Qp_dtoq");
1476      setLibcallName(RTLIB::FPROUND_F128_F32, "_Qp_qtos");
1477      setLibcallName(RTLIB::FPROUND_F128_F64, "_Qp_qtod");
1478    } else {
1479      setLibcallName(RTLIB::ADD_F128,  "_Q_add");
1480      setLibcallName(RTLIB::SUB_F128,  "_Q_sub");
1481      setLibcallName(RTLIB::MUL_F128,  "_Q_mul");
1482      setLibcallName(RTLIB::DIV_F128,  "_Q_div");
1483      setLibcallName(RTLIB::SQRT_F128, "_Q_sqrt");
1484      setLibcallName(RTLIB::FPTOSINT_F128_I32, "_Q_qtoi");
1485      setLibcallName(RTLIB::SINTTOFP_I32_F128, "_Q_itoq");
1486      setLibcallName(RTLIB::FPEXT_F32_F128, "_Q_stoq");
1487      setLibcallName(RTLIB::FPEXT_F64_F128, "_Q_dtoq");
1488      setLibcallName(RTLIB::FPROUND_F128_F32, "_Q_qtos");
1489      setLibcallName(RTLIB::FPROUND_F128_F64, "_Q_qtod");
1490    }
1491  }
1492
1493  setMinFunctionAlignment(2);
1494
1495  computeRegisterProperties();
1496}
1497
1498const char *SparcTargetLowering::getTargetNodeName(unsigned Opcode) const {
1499  switch (Opcode) {
1500  default: return 0;
1501  case SPISD::CMPICC:     return "SPISD::CMPICC";
1502  case SPISD::CMPFCC:     return "SPISD::CMPFCC";
1503  case SPISD::BRICC:      return "SPISD::BRICC";
1504  case SPISD::BRXCC:      return "SPISD::BRXCC";
1505  case SPISD::BRFCC:      return "SPISD::BRFCC";
1506  case SPISD::SELECT_ICC: return "SPISD::SELECT_ICC";
1507  case SPISD::SELECT_XCC: return "SPISD::SELECT_XCC";
1508  case SPISD::SELECT_FCC: return "SPISD::SELECT_FCC";
1509  case SPISD::Hi:         return "SPISD::Hi";
1510  case SPISD::Lo:         return "SPISD::Lo";
1511  case SPISD::FTOI:       return "SPISD::FTOI";
1512  case SPISD::ITOF:       return "SPISD::ITOF";
1513  case SPISD::CALL:       return "SPISD::CALL";
1514  case SPISD::RET_FLAG:   return "SPISD::RET_FLAG";
1515  case SPISD::GLOBAL_BASE_REG: return "SPISD::GLOBAL_BASE_REG";
1516  case SPISD::FLUSHW:     return "SPISD::FLUSHW";
1517  }
1518}
1519
1520/// isMaskedValueZeroForTargetNode - Return true if 'Op & Mask' is known to
1521/// be zero. Op is expected to be a target specific node. Used by DAG
1522/// combiner.
1523void SparcTargetLowering::computeMaskedBitsForTargetNode
1524                                (const SDValue Op,
1525                                 APInt &KnownZero,
1526                                 APInt &KnownOne,
1527                                 const SelectionDAG &DAG,
1528                                 unsigned Depth) const {
1529  APInt KnownZero2, KnownOne2;
1530  KnownZero = KnownOne = APInt(KnownZero.getBitWidth(), 0);
1531
1532  switch (Op.getOpcode()) {
1533  default: break;
1534  case SPISD::SELECT_ICC:
1535  case SPISD::SELECT_XCC:
1536  case SPISD::SELECT_FCC:
1537    DAG.ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
1538    DAG.ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
1539    assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1540    assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1541
1542    // Only known if known in both the LHS and RHS.
1543    KnownOne &= KnownOne2;
1544    KnownZero &= KnownZero2;
1545    break;
1546  }
1547}
1548
1549// Look at LHS/RHS/CC and see if they are a lowered setcc instruction.  If so
1550// set LHS/RHS and SPCC to the LHS/RHS of the setcc and SPCC to the condition.
1551static void LookThroughSetCC(SDValue &LHS, SDValue &RHS,
1552                             ISD::CondCode CC, unsigned &SPCC) {
1553  if (isa<ConstantSDNode>(RHS) &&
1554      cast<ConstantSDNode>(RHS)->isNullValue() &&
1555      CC == ISD::SETNE &&
1556      (((LHS.getOpcode() == SPISD::SELECT_ICC ||
1557         LHS.getOpcode() == SPISD::SELECT_XCC) &&
1558        LHS.getOperand(3).getOpcode() == SPISD::CMPICC) ||
1559       (LHS.getOpcode() == SPISD::SELECT_FCC &&
1560        LHS.getOperand(3).getOpcode() == SPISD::CMPFCC)) &&
1561      isa<ConstantSDNode>(LHS.getOperand(0)) &&
1562      isa<ConstantSDNode>(LHS.getOperand(1)) &&
1563      cast<ConstantSDNode>(LHS.getOperand(0))->isOne() &&
1564      cast<ConstantSDNode>(LHS.getOperand(1))->isNullValue()) {
1565    SDValue CMPCC = LHS.getOperand(3);
1566    SPCC = cast<ConstantSDNode>(LHS.getOperand(2))->getZExtValue();
1567    LHS = CMPCC.getOperand(0);
1568    RHS = CMPCC.getOperand(1);
1569  }
1570}
1571
1572// Convert to a target node and set target flags.
1573SDValue SparcTargetLowering::withTargetFlags(SDValue Op, unsigned TF,
1574                                             SelectionDAG &DAG) const {
1575  if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op))
1576    return DAG.getTargetGlobalAddress(GA->getGlobal(),
1577                                      SDLoc(GA),
1578                                      GA->getValueType(0),
1579                                      GA->getOffset(), TF);
1580
1581  if (const ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op))
1582    return DAG.getTargetConstantPool(CP->getConstVal(),
1583                                     CP->getValueType(0),
1584                                     CP->getAlignment(),
1585                                     CP->getOffset(), TF);
1586
1587  if (const BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(Op))
1588    return DAG.getTargetBlockAddress(BA->getBlockAddress(),
1589                                     Op.getValueType(),
1590                                     0,
1591                                     TF);
1592
1593  if (const ExternalSymbolSDNode *ES = dyn_cast<ExternalSymbolSDNode>(Op))
1594    return DAG.getTargetExternalSymbol(ES->getSymbol(),
1595                                       ES->getValueType(0), TF);
1596
1597  llvm_unreachable("Unhandled address SDNode");
1598}
1599
1600// Split Op into high and low parts according to HiTF and LoTF.
1601// Return an ADD node combining the parts.
1602SDValue SparcTargetLowering::makeHiLoPair(SDValue Op,
1603                                          unsigned HiTF, unsigned LoTF,
1604                                          SelectionDAG &DAG) const {
1605  SDLoc DL(Op);
1606  EVT VT = Op.getValueType();
1607  SDValue Hi = DAG.getNode(SPISD::Hi, DL, VT, withTargetFlags(Op, HiTF, DAG));
1608  SDValue Lo = DAG.getNode(SPISD::Lo, DL, VT, withTargetFlags(Op, LoTF, DAG));
1609  return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo);
1610}
1611
1612// Build SDNodes for producing an address from a GlobalAddress, ConstantPool,
1613// or ExternalSymbol SDNode.
1614SDValue SparcTargetLowering::makeAddress(SDValue Op, SelectionDAG &DAG) const {
1615  SDLoc DL(Op);
1616  EVT VT = getPointerTy();
1617
1618  // Handle PIC mode first.
1619  if (getTargetMachine().getRelocationModel() == Reloc::PIC_) {
1620    // This is the pic32 code model, the GOT is known to be smaller than 4GB.
1621    SDValue HiLo = makeHiLoPair(Op, SPII::MO_HI, SPII::MO_LO, DAG);
1622    SDValue GlobalBase = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, VT);
1623    SDValue AbsAddr = DAG.getNode(ISD::ADD, DL, VT, GlobalBase, HiLo);
1624    return DAG.getLoad(VT, DL, DAG.getEntryNode(), AbsAddr,
1625                       MachinePointerInfo::getGOT(), false, false, false, 0);
1626  }
1627
1628  // This is one of the absolute code models.
1629  switch(getTargetMachine().getCodeModel()) {
1630  default:
1631    llvm_unreachable("Unsupported absolute code model");
1632  case CodeModel::Small:
1633    // abs32.
1634    return makeHiLoPair(Op, SPII::MO_HI, SPII::MO_LO, DAG);
1635  case CodeModel::Medium: {
1636    // abs44.
1637    SDValue H44 = makeHiLoPair(Op, SPII::MO_H44, SPII::MO_M44, DAG);
1638    H44 = DAG.getNode(ISD::SHL, DL, VT, H44, DAG.getConstant(12, MVT::i32));
1639    SDValue L44 = withTargetFlags(Op, SPII::MO_L44, DAG);
1640    L44 = DAG.getNode(SPISD::Lo, DL, VT, L44);
1641    return DAG.getNode(ISD::ADD, DL, VT, H44, L44);
1642  }
1643  case CodeModel::Large: {
1644    // abs64.
1645    SDValue Hi = makeHiLoPair(Op, SPII::MO_HH, SPII::MO_HM, DAG);
1646    Hi = DAG.getNode(ISD::SHL, DL, VT, Hi, DAG.getConstant(32, MVT::i32));
1647    SDValue Lo = makeHiLoPair(Op, SPII::MO_HI, SPII::MO_LO, DAG);
1648    return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo);
1649  }
1650  }
1651}
1652
1653SDValue SparcTargetLowering::LowerGlobalAddress(SDValue Op,
1654                                                SelectionDAG &DAG) const {
1655  return makeAddress(Op, DAG);
1656}
1657
1658SDValue SparcTargetLowering::LowerConstantPool(SDValue Op,
1659                                               SelectionDAG &DAG) const {
1660  return makeAddress(Op, DAG);
1661}
1662
1663SDValue SparcTargetLowering::LowerBlockAddress(SDValue Op,
1664                                               SelectionDAG &DAG) const {
1665  return makeAddress(Op, DAG);
1666}
1667
1668SDValue
1669SparcTargetLowering::LowerF128_LibCallArg(SDValue Chain, ArgListTy &Args,
1670                                          SDValue Arg, SDLoc DL,
1671                                          SelectionDAG &DAG) const {
1672  MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
1673  EVT ArgVT = Arg.getValueType();
1674  Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
1675
1676  ArgListEntry Entry;
1677  Entry.Node = Arg;
1678  Entry.Ty   = ArgTy;
1679
1680  if (ArgTy->isFP128Ty()) {
1681    // Create a stack object and pass the pointer to the library function.
1682    int FI = MFI->CreateStackObject(16, 8, false);
1683    SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy());
1684    Chain = DAG.getStore(Chain,
1685                         DL,
1686                         Entry.Node,
1687                         FIPtr,
1688                         MachinePointerInfo(),
1689                         false,
1690                         false,
1691                         8);
1692
1693    Entry.Node = FIPtr;
1694    Entry.Ty   = PointerType::getUnqual(ArgTy);
1695  }
1696  Args.push_back(Entry);
1697  return Chain;
1698}
1699
1700SDValue
1701SparcTargetLowering::LowerF128Op(SDValue Op, SelectionDAG &DAG,
1702                                 const char *LibFuncName,
1703                                 unsigned numArgs) const {
1704
1705  ArgListTy Args;
1706
1707  MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
1708
1709  SDValue Callee = DAG.getExternalSymbol(LibFuncName, getPointerTy());
1710  Type *RetTy = Op.getValueType().getTypeForEVT(*DAG.getContext());
1711  Type *RetTyABI = RetTy;
1712  SDValue Chain = DAG.getEntryNode();
1713  SDValue RetPtr;
1714
1715  if (RetTy->isFP128Ty()) {
1716    // Create a Stack Object to receive the return value of type f128.
1717    ArgListEntry Entry;
1718    int RetFI = MFI->CreateStackObject(16, 8, false);
1719    RetPtr = DAG.getFrameIndex(RetFI, getPointerTy());
1720    Entry.Node = RetPtr;
1721    Entry.Ty   = PointerType::getUnqual(RetTy);
1722    if (!Subtarget->is64Bit())
1723      Entry.isSRet = true;
1724    Entry.isReturned = false;
1725    Args.push_back(Entry);
1726    RetTyABI = Type::getVoidTy(*DAG.getContext());
1727  }
1728
1729  assert(Op->getNumOperands() >= numArgs && "Not enough operands!");
1730  for (unsigned i = 0, e = numArgs; i != e; ++i) {
1731    Chain = LowerF128_LibCallArg(Chain, Args, Op.getOperand(i), SDLoc(Op), DAG);
1732  }
1733  TargetLowering::
1734    CallLoweringInfo CLI(Chain,
1735                         RetTyABI,
1736                         false, false, false, false,
1737                         0, CallingConv::C,
1738                         false, false, true,
1739                         Callee, Args, DAG, SDLoc(Op));
1740  std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
1741
1742  // chain is in second result.
1743  if (RetTyABI == RetTy)
1744    return CallInfo.first;
1745
1746  assert (RetTy->isFP128Ty() && "Unexpected return type!");
1747
1748  Chain = CallInfo.second;
1749
1750  // Load RetPtr to get the return value.
1751  return DAG.getLoad(Op.getValueType(),
1752                     SDLoc(Op),
1753                     Chain,
1754                     RetPtr,
1755                     MachinePointerInfo(),
1756                     false, false, false, 8);
1757}
1758
1759SDValue
1760SparcTargetLowering::LowerF128Compare(SDValue LHS, SDValue RHS,
1761                                      unsigned &SPCC,
1762                                      SDLoc DL,
1763                                      SelectionDAG &DAG) const {
1764
1765  const char *LibCall = 0;
1766  bool is64Bit = Subtarget->is64Bit();
1767  switch(SPCC) {
1768  default: llvm_unreachable("Unhandled conditional code!");
1769  case SPCC::FCC_E  : LibCall = is64Bit? "_Qp_feq" : "_Q_feq"; break;
1770  case SPCC::FCC_NE : LibCall = is64Bit? "_Qp_fne" : "_Q_fne"; break;
1771  case SPCC::FCC_L  : LibCall = is64Bit? "_Qp_flt" : "_Q_flt"; break;
1772  case SPCC::FCC_G  : LibCall = is64Bit? "_Qp_fgt" : "_Q_fgt"; break;
1773  case SPCC::FCC_LE : LibCall = is64Bit? "_Qp_fle" : "_Q_fle"; break;
1774  case SPCC::FCC_GE : LibCall = is64Bit? "_Qp_fge" : "_Q_fge"; break;
1775  case SPCC::FCC_UL :
1776  case SPCC::FCC_ULE:
1777  case SPCC::FCC_UG :
1778  case SPCC::FCC_UGE:
1779  case SPCC::FCC_U  :
1780  case SPCC::FCC_O  :
1781  case SPCC::FCC_LG :
1782  case SPCC::FCC_UE : LibCall = is64Bit? "_Qp_cmp" : "_Q_cmp"; break;
1783  }
1784
1785  SDValue Callee = DAG.getExternalSymbol(LibCall, getPointerTy());
1786  Type *RetTy = Type::getInt32Ty(*DAG.getContext());
1787  ArgListTy Args;
1788  SDValue Chain = DAG.getEntryNode();
1789  Chain = LowerF128_LibCallArg(Chain, Args, LHS, DL, DAG);
1790  Chain = LowerF128_LibCallArg(Chain, Args, RHS, DL, DAG);
1791
1792  TargetLowering::
1793    CallLoweringInfo CLI(Chain,
1794                         RetTy,
1795                         false, false, false, false,
1796                         0, CallingConv::C,
1797                         false, false, true,
1798                         Callee, Args, DAG, DL);
1799
1800  std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
1801
1802  // result is in first, and chain is in second result.
1803  SDValue Result =  CallInfo.first;
1804
1805  switch(SPCC) {
1806  default: {
1807    SDValue RHS = DAG.getTargetConstant(0, Result.getValueType());
1808    SPCC = SPCC::ICC_NE;
1809    return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
1810  }
1811  case SPCC::FCC_UL : {
1812    SDValue Mask   = DAG.getTargetConstant(1, Result.getValueType());
1813    Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
1814    SDValue RHS    = DAG.getTargetConstant(0, Result.getValueType());
1815    SPCC = SPCC::ICC_NE;
1816    return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
1817  }
1818  case SPCC::FCC_ULE: {
1819    SDValue RHS = DAG.getTargetConstant(2, Result.getValueType());
1820    SPCC = SPCC::ICC_NE;
1821    return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
1822  }
1823  case SPCC::FCC_UG :  {
1824    SDValue RHS = DAG.getTargetConstant(1, Result.getValueType());
1825    SPCC = SPCC::ICC_G;
1826    return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
1827  }
1828  case SPCC::FCC_UGE: {
1829    SDValue RHS = DAG.getTargetConstant(1, Result.getValueType());
1830    SPCC = SPCC::ICC_NE;
1831    return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
1832  }
1833
1834  case SPCC::FCC_U  :  {
1835    SDValue RHS = DAG.getTargetConstant(3, Result.getValueType());
1836    SPCC = SPCC::ICC_E;
1837    return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
1838  }
1839  case SPCC::FCC_O  :  {
1840    SDValue RHS = DAG.getTargetConstant(3, Result.getValueType());
1841    SPCC = SPCC::ICC_NE;
1842    return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
1843  }
1844  case SPCC::FCC_LG :  {
1845    SDValue Mask   = DAG.getTargetConstant(3, Result.getValueType());
1846    Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
1847    SDValue RHS    = DAG.getTargetConstant(0, Result.getValueType());
1848    SPCC = SPCC::ICC_NE;
1849    return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
1850  }
1851  case SPCC::FCC_UE : {
1852    SDValue Mask   = DAG.getTargetConstant(3, Result.getValueType());
1853    Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
1854    SDValue RHS    = DAG.getTargetConstant(0, Result.getValueType());
1855    SPCC = SPCC::ICC_E;
1856    return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
1857  }
1858  }
1859}
1860
1861static SDValue
1862LowerF128_FPEXTEND(SDValue Op, SelectionDAG &DAG,
1863                   const SparcTargetLowering &TLI) {
1864
1865  if (Op.getOperand(0).getValueType() == MVT::f64)
1866    return TLI.LowerF128Op(Op, DAG,
1867                           TLI.getLibcallName(RTLIB::FPEXT_F64_F128), 1);
1868
1869  if (Op.getOperand(0).getValueType() == MVT::f32)
1870    return TLI.LowerF128Op(Op, DAG,
1871                           TLI.getLibcallName(RTLIB::FPEXT_F32_F128), 1);
1872
1873  llvm_unreachable("fpextend with non-float operand!");
1874  return SDValue(0, 0);
1875}
1876
1877static SDValue
1878LowerF128_FPROUND(SDValue Op, SelectionDAG &DAG,
1879                  const SparcTargetLowering &TLI) {
1880  // FP_ROUND on f64 and f32 are legal.
1881  if (Op.getOperand(0).getValueType() != MVT::f128)
1882    return Op;
1883
1884  if (Op.getValueType() == MVT::f64)
1885    return TLI.LowerF128Op(Op, DAG,
1886                           TLI.getLibcallName(RTLIB::FPROUND_F128_F64), 1);
1887  if (Op.getValueType() == MVT::f32)
1888    return TLI.LowerF128Op(Op, DAG,
1889                           TLI.getLibcallName(RTLIB::FPROUND_F128_F32), 1);
1890
1891  llvm_unreachable("fpround to non-float!");
1892  return SDValue(0, 0);
1893}
1894
1895static SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG,
1896                               const SparcTargetLowering &TLI,
1897                               bool hasHardQuad) {
1898  SDLoc dl(Op);
1899  // Convert the fp value to integer in an FP register.
1900  assert(Op.getValueType() == MVT::i32);
1901
1902  if (Op.getOperand(0).getValueType() == MVT::f128 && !hasHardQuad)
1903    return TLI.LowerF128Op(Op, DAG,
1904                       TLI.getLibcallName(RTLIB::FPTOSINT_F128_I32), 1);
1905
1906  Op = DAG.getNode(SPISD::FTOI, dl, MVT::f32, Op.getOperand(0));
1907  return DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
1908}
1909
1910static SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG,
1911                               const SparcTargetLowering &TLI,
1912                               bool hasHardQuad) {
1913  SDLoc dl(Op);
1914  assert(Op.getOperand(0).getValueType() == MVT::i32);
1915  SDValue Tmp = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Op.getOperand(0));
1916  // Convert the int value to FP in an FP register.
1917  if (Op.getValueType() == MVT::f128 && hasHardQuad)
1918    return TLI.LowerF128Op(Op, DAG,
1919                           TLI.getLibcallName(RTLIB::SINTTOFP_I32_F128), 1);
1920  return DAG.getNode(SPISD::ITOF, dl, Op.getValueType(), Tmp);
1921}
1922
1923static SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG,
1924                          const SparcTargetLowering &TLI,
1925                          bool hasHardQuad) {
1926  SDValue Chain = Op.getOperand(0);
1927  ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
1928  SDValue LHS = Op.getOperand(2);
1929  SDValue RHS = Op.getOperand(3);
1930  SDValue Dest = Op.getOperand(4);
1931  SDLoc dl(Op);
1932  unsigned Opc, SPCC = ~0U;
1933
1934  // If this is a br_cc of a "setcc", and if the setcc got lowered into
1935  // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
1936  LookThroughSetCC(LHS, RHS, CC, SPCC);
1937
1938  // Get the condition flag.
1939  SDValue CompareFlag;
1940  if (LHS.getValueType().isInteger()) {
1941    CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);
1942    if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
1943    // 32-bit compares use the icc flags, 64-bit uses the xcc flags.
1944    Opc = LHS.getValueType() == MVT::i32 ? SPISD::BRICC : SPISD::BRXCC;
1945  } else {
1946    if (!hasHardQuad && LHS.getValueType() == MVT::f128) {
1947      if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
1948      CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG);
1949      Opc = SPISD::BRICC;
1950    } else {
1951      CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, MVT::Glue, LHS, RHS);
1952      if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
1953      Opc = SPISD::BRFCC;
1954    }
1955  }
1956  return DAG.getNode(Opc, dl, MVT::Other, Chain, Dest,
1957                     DAG.getConstant(SPCC, MVT::i32), CompareFlag);
1958}
1959
1960static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG,
1961                              const SparcTargetLowering &TLI,
1962                              bool hasHardQuad) {
1963  SDValue LHS = Op.getOperand(0);
1964  SDValue RHS = Op.getOperand(1);
1965  ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
1966  SDValue TrueVal = Op.getOperand(2);
1967  SDValue FalseVal = Op.getOperand(3);
1968  SDLoc dl(Op);
1969  unsigned Opc, SPCC = ~0U;
1970
1971  // If this is a select_cc of a "setcc", and if the setcc got lowered into
1972  // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
1973  LookThroughSetCC(LHS, RHS, CC, SPCC);
1974
1975  SDValue CompareFlag;
1976  if (LHS.getValueType().isInteger()) {
1977    CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);
1978    Opc = LHS.getValueType() == MVT::i32 ?
1979          SPISD::SELECT_ICC : SPISD::SELECT_XCC;
1980    if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
1981  } else {
1982    if (!hasHardQuad && LHS.getValueType() == MVT::f128) {
1983      if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
1984      CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG);
1985      Opc = SPISD::SELECT_ICC;
1986    } else {
1987      CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, MVT::Glue, LHS, RHS);
1988      Opc = SPISD::SELECT_FCC;
1989      if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
1990    }
1991  }
1992  return DAG.getNode(Opc, dl, TrueVal.getValueType(), TrueVal, FalseVal,
1993                     DAG.getConstant(SPCC, MVT::i32), CompareFlag);
1994}
1995
1996static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG,
1997                            const SparcTargetLowering &TLI) {
1998  MachineFunction &MF = DAG.getMachineFunction();
1999  SparcMachineFunctionInfo *FuncInfo = MF.getInfo<SparcMachineFunctionInfo>();
2000
2001  // Need frame address to find the address of VarArgsFrameIndex.
2002  MF.getFrameInfo()->setFrameAddressIsTaken(true);
2003
2004  // vastart just stores the address of the VarArgsFrameIndex slot into the
2005  // memory location argument.
2006  SDLoc DL(Op);
2007  SDValue Offset =
2008    DAG.getNode(ISD::ADD, DL, TLI.getPointerTy(),
2009                DAG.getRegister(SP::I6, TLI.getPointerTy()),
2010                DAG.getIntPtrConstant(FuncInfo->getVarArgsFrameOffset()));
2011  const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2012  return DAG.getStore(Op.getOperand(0), DL, Offset, Op.getOperand(1),
2013                      MachinePointerInfo(SV), false, false, 0);
2014}
2015
2016static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) {
2017  SDNode *Node = Op.getNode();
2018  EVT VT = Node->getValueType(0);
2019  SDValue InChain = Node->getOperand(0);
2020  SDValue VAListPtr = Node->getOperand(1);
2021  EVT PtrVT = VAListPtr.getValueType();
2022  const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
2023  SDLoc DL(Node);
2024  SDValue VAList = DAG.getLoad(PtrVT, DL, InChain, VAListPtr,
2025                               MachinePointerInfo(SV), false, false, false, 0);
2026  // Increment the pointer, VAList, to the next vaarg.
2027  SDValue NextPtr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
2028                                DAG.getIntPtrConstant(VT.getSizeInBits()/8));
2029  // Store the incremented VAList to the legalized pointer.
2030  InChain = DAG.getStore(VAList.getValue(1), DL, NextPtr,
2031                         VAListPtr, MachinePointerInfo(SV), false, false, 0);
2032  // Load the actual argument out of the pointer VAList.
2033  // We can't count on greater alignment than the word size.
2034  return DAG.getLoad(VT, DL, InChain, VAList, MachinePointerInfo(),
2035                     false, false, false,
2036                     std::min(PtrVT.getSizeInBits(), VT.getSizeInBits())/8);
2037}
2038
2039static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) {
2040  SDValue Chain = Op.getOperand(0);  // Legalize the chain.
2041  SDValue Size  = Op.getOperand(1);  // Legalize the size.
2042  SDLoc dl(Op);
2043
2044  unsigned SPReg = SP::O6;
2045  SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, MVT::i32);
2046  SDValue NewSP = DAG.getNode(ISD::SUB, dl, MVT::i32, SP, Size); // Value
2047  Chain = DAG.getCopyToReg(SP.getValue(1), dl, SPReg, NewSP);    // Output chain
2048
2049  // The resultant pointer is actually 16 words from the bottom of the stack,
2050  // to provide a register spill area.
2051  SDValue NewVal = DAG.getNode(ISD::ADD, dl, MVT::i32, NewSP,
2052                                 DAG.getConstant(96, MVT::i32));
2053  SDValue Ops[2] = { NewVal, Chain };
2054  return DAG.getMergeValues(Ops, 2, dl);
2055}
2056
2057
2058static SDValue getFLUSHW(SDValue Op, SelectionDAG &DAG) {
2059  SDLoc dl(Op);
2060  SDValue Chain = DAG.getNode(SPISD::FLUSHW,
2061                              dl, MVT::Other, DAG.getEntryNode());
2062  return Chain;
2063}
2064
2065static SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) {
2066  MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
2067  MFI->setFrameAddressIsTaken(true);
2068
2069  EVT VT = Op.getValueType();
2070  SDLoc dl(Op);
2071  unsigned FrameReg = SP::I6;
2072
2073  uint64_t depth = Op.getConstantOperandVal(0);
2074
2075  SDValue FrameAddr;
2076  if (depth == 0)
2077    FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
2078  else {
2079    // flush first to make sure the windowed registers' values are in stack
2080    SDValue Chain = getFLUSHW(Op, DAG);
2081    FrameAddr = DAG.getCopyFromReg(Chain, dl, FrameReg, VT);
2082
2083    for (uint64_t i = 0; i != depth; ++i) {
2084      SDValue Ptr = DAG.getNode(ISD::ADD,
2085                                dl, MVT::i32,
2086                                FrameAddr, DAG.getIntPtrConstant(56));
2087      FrameAddr = DAG.getLoad(MVT::i32, dl,
2088                              Chain,
2089                              Ptr,
2090                              MachinePointerInfo(), false, false, false, 0);
2091    }
2092  }
2093  return FrameAddr;
2094}
2095
2096static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG,
2097                               const SparcTargetLowering &TLI) {
2098  MachineFunction &MF = DAG.getMachineFunction();
2099  MachineFrameInfo *MFI = MF.getFrameInfo();
2100  MFI->setReturnAddressIsTaken(true);
2101
2102  EVT VT = Op.getValueType();
2103  SDLoc dl(Op);
2104  uint64_t depth = Op.getConstantOperandVal(0);
2105
2106  SDValue RetAddr;
2107  if (depth == 0) {
2108    unsigned RetReg = MF.addLiveIn(SP::I7,
2109                                   TLI.getRegClassFor(TLI.getPointerTy()));
2110    RetAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, RetReg, VT);
2111  } else {
2112    // Need frame address to find return address of the caller.
2113    MFI->setFrameAddressIsTaken(true);
2114
2115    // flush first to make sure the windowed registers' values are in stack
2116    SDValue Chain = getFLUSHW(Op, DAG);
2117    RetAddr = DAG.getCopyFromReg(Chain, dl, SP::I6, VT);
2118
2119    for (uint64_t i = 0; i != depth; ++i) {
2120      SDValue Ptr = DAG.getNode(ISD::ADD,
2121                                dl, MVT::i32,
2122                                RetAddr,
2123                                DAG.getIntPtrConstant((i == depth-1)?60:56));
2124      RetAddr = DAG.getLoad(MVT::i32, dl,
2125                            Chain,
2126                            Ptr,
2127                            MachinePointerInfo(), false, false, false, 0);
2128    }
2129  }
2130  return RetAddr;
2131}
2132
2133static SDValue LowerF64Op(SDValue Op, SelectionDAG &DAG)
2134{
2135  SDLoc dl(Op);
2136
2137  assert(Op.getValueType() == MVT::f64 && "LowerF64Op called on non-double!");
2138  assert(Op.getOpcode() == ISD::FNEG || Op.getOpcode() == ISD::FABS);
2139
2140  // Lower fneg/fabs on f64 to fneg/fabs on f32.
2141  // fneg f64 => fneg f32:sub_even, fmov f32:sub_odd.
2142  // fabs f64 => fabs f32:sub_even, fmov f32:sub_odd.
2143
2144  SDValue SrcReg64 = Op.getOperand(0);
2145  SDValue Hi32 = DAG.getTargetExtractSubreg(SP::sub_even, dl, MVT::f32,
2146                                            SrcReg64);
2147  SDValue Lo32 = DAG.getTargetExtractSubreg(SP::sub_odd, dl, MVT::f32,
2148                                            SrcReg64);
2149
2150  Hi32 = DAG.getNode(Op.getOpcode(), dl, MVT::f32, Hi32);
2151
2152  SDValue DstReg64 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2153                                                dl, MVT::f64), 0);
2154  DstReg64 = DAG.getTargetInsertSubreg(SP::sub_even, dl, MVT::f64,
2155                                       DstReg64, Hi32);
2156  DstReg64 = DAG.getTargetInsertSubreg(SP::sub_odd, dl, MVT::f64,
2157                                       DstReg64, Lo32);
2158  return DstReg64;
2159}
2160
2161// Lower a f128 load into two f64 loads.
2162static SDValue LowerF128Load(SDValue Op, SelectionDAG &DAG)
2163{
2164  SDLoc dl(Op);
2165  LoadSDNode *LdNode = dyn_cast<LoadSDNode>(Op.getNode());
2166  assert(LdNode && LdNode->getOffset().getOpcode() == ISD::UNDEF
2167         && "Unexpected node type");
2168
2169  SDValue Hi64 = DAG.getLoad(MVT::f64,
2170                             dl,
2171                             LdNode->getChain(),
2172                             LdNode->getBasePtr(),
2173                             LdNode->getPointerInfo(),
2174                             false, false, false, 8);
2175  EVT addrVT = LdNode->getBasePtr().getValueType();
2176  SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT,
2177                              LdNode->getBasePtr(),
2178                              DAG.getConstant(8, addrVT));
2179  SDValue Lo64 = DAG.getLoad(MVT::f64,
2180                             dl,
2181                             LdNode->getChain(),
2182                             LoPtr,
2183                             LdNode->getPointerInfo(),
2184                             false, false, false, 8);
2185
2186  SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, MVT::i32);
2187  SDValue SubRegOdd  = DAG.getTargetConstant(SP::sub_odd64, MVT::i32);
2188
2189  SDNode *InFP128 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2190                                       dl, MVT::f128);
2191  InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
2192                               MVT::f128,
2193                               SDValue(InFP128, 0),
2194                               Hi64,
2195                               SubRegEven);
2196  InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
2197                               MVT::f128,
2198                               SDValue(InFP128, 0),
2199                               Lo64,
2200                               SubRegOdd);
2201  SDValue OutChains[2] = { SDValue(Hi64.getNode(), 1),
2202                           SDValue(Lo64.getNode(), 1) };
2203  SDValue OutChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
2204                                 &OutChains[0], 2);
2205  SDValue Ops[2] = {SDValue(InFP128,0), OutChain};
2206  return DAG.getMergeValues(Ops, 2, dl);
2207}
2208
2209// Lower a f128 store into two f64 stores.
2210static SDValue LowerF128Store(SDValue Op, SelectionDAG &DAG) {
2211  SDLoc dl(Op);
2212  StoreSDNode *StNode = dyn_cast<StoreSDNode>(Op.getNode());
2213  assert(StNode && StNode->getOffset().getOpcode() == ISD::UNDEF
2214         && "Unexpected node type");
2215  SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, MVT::i32);
2216  SDValue SubRegOdd  = DAG.getTargetConstant(SP::sub_odd64, MVT::i32);
2217
2218  SDNode *Hi64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG,
2219                                    dl,
2220                                    MVT::f64,
2221                                    StNode->getValue(),
2222                                    SubRegEven);
2223  SDNode *Lo64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG,
2224                                    dl,
2225                                    MVT::f64,
2226                                    StNode->getValue(),
2227                                    SubRegOdd);
2228  SDValue OutChains[2];
2229  OutChains[0] = DAG.getStore(StNode->getChain(),
2230                              dl,
2231                              SDValue(Hi64, 0),
2232                              StNode->getBasePtr(),
2233                              MachinePointerInfo(),
2234                              false, false, 8);
2235  EVT addrVT = StNode->getBasePtr().getValueType();
2236  SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT,
2237                              StNode->getBasePtr(),
2238                              DAG.getConstant(8, addrVT));
2239  OutChains[1] = DAG.getStore(StNode->getChain(),
2240                             dl,
2241                             SDValue(Lo64, 0),
2242                             LoPtr,
2243                             MachinePointerInfo(),
2244                             false, false, 8);
2245  return DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
2246                     &OutChains[0], 2);
2247}
2248
2249static SDValue LowerFNEG(SDValue Op, SelectionDAG &DAG,
2250                         const SparcTargetLowering &TLI,
2251                         bool is64Bit) {
2252  if (Op.getValueType() == MVT::f64)
2253    return LowerF64Op(Op, DAG);
2254  if (Op.getValueType() == MVT::f128)
2255    return TLI.LowerF128Op(Op, DAG, ((is64Bit) ? "_Qp_neg" : "_Q_neg"), 1);
2256  return Op;
2257}
2258
2259static SDValue LowerFABS(SDValue Op, SelectionDAG &DAG, bool isV9) {
2260  if (Op.getValueType() == MVT::f64)
2261    return LowerF64Op(Op, DAG);
2262  if (Op.getValueType() != MVT::f128)
2263    return Op;
2264
2265  // Lower fabs on f128 to fabs on f64
2266  // fabs f128 => fabs f64:sub_even64, fmov f64:sub_odd64
2267
2268  SDLoc dl(Op);
2269  SDValue SrcReg128 = Op.getOperand(0);
2270  SDValue Hi64 = DAG.getTargetExtractSubreg(SP::sub_even64, dl, MVT::f64,
2271                                            SrcReg128);
2272  SDValue Lo64 = DAG.getTargetExtractSubreg(SP::sub_odd64, dl, MVT::f64,
2273                                            SrcReg128);
2274  if (isV9)
2275    Hi64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Hi64);
2276  else
2277    Hi64 = LowerF64Op(Op, DAG);
2278
2279  SDValue DstReg128 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2280                                                 dl, MVT::f128), 0);
2281  DstReg128 = DAG.getTargetInsertSubreg(SP::sub_even64, dl, MVT::f128,
2282                                        DstReg128, Hi64);
2283  DstReg128 = DAG.getTargetInsertSubreg(SP::sub_odd64, dl, MVT::f128,
2284                                        DstReg128, Lo64);
2285  return DstReg128;
2286}
2287
2288
2289
2290SDValue SparcTargetLowering::
2291LowerOperation(SDValue Op, SelectionDAG &DAG) const {
2292
2293  bool hasHardQuad = Subtarget->hasHardQuad();
2294  bool is64Bit     = Subtarget->is64Bit();
2295  bool isV9        = Subtarget->isV9();
2296
2297  switch (Op.getOpcode()) {
2298  default: llvm_unreachable("Should not custom lower this!");
2299
2300  case ISD::RETURNADDR:         return LowerRETURNADDR(Op, DAG, *this);
2301  case ISD::FRAMEADDR:          return LowerFRAMEADDR(Op, DAG);
2302  case ISD::GlobalTLSAddress:
2303    llvm_unreachable("TLS not implemented for Sparc.");
2304  case ISD::GlobalAddress:      return LowerGlobalAddress(Op, DAG);
2305  case ISD::BlockAddress:       return LowerBlockAddress(Op, DAG);
2306  case ISD::ConstantPool:       return LowerConstantPool(Op, DAG);
2307  case ISD::FP_TO_SINT:         return LowerFP_TO_SINT(Op, DAG, *this,
2308                                                       hasHardQuad);
2309  case ISD::SINT_TO_FP:         return LowerSINT_TO_FP(Op, DAG, *this,
2310                                                       hasHardQuad);
2311  case ISD::BR_CC:              return LowerBR_CC(Op, DAG, *this,
2312                                                  hasHardQuad);
2313  case ISD::SELECT_CC:          return LowerSELECT_CC(Op, DAG, *this,
2314                                                      hasHardQuad);
2315  case ISD::VASTART:            return LowerVASTART(Op, DAG, *this);
2316  case ISD::VAARG:              return LowerVAARG(Op, DAG);
2317  case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
2318
2319  case ISD::LOAD:               return LowerF128Load(Op, DAG);
2320  case ISD::STORE:              return LowerF128Store(Op, DAG);
2321  case ISD::FADD:               return LowerF128Op(Op, DAG,
2322                                       getLibcallName(RTLIB::ADD_F128), 2);
2323  case ISD::FSUB:               return LowerF128Op(Op, DAG,
2324                                       getLibcallName(RTLIB::SUB_F128), 2);
2325  case ISD::FMUL:               return LowerF128Op(Op, DAG,
2326                                       getLibcallName(RTLIB::MUL_F128), 2);
2327  case ISD::FDIV:               return LowerF128Op(Op, DAG,
2328                                       getLibcallName(RTLIB::DIV_F128), 2);
2329  case ISD::FSQRT:              return LowerF128Op(Op, DAG,
2330                                       getLibcallName(RTLIB::SQRT_F128),1);
2331  case ISD::FNEG:               return LowerFNEG(Op, DAG, *this, is64Bit);
2332  case ISD::FABS:               return LowerFABS(Op, DAG, isV9);
2333  case ISD::FP_EXTEND:          return LowerF128_FPEXTEND(Op, DAG, *this);
2334  case ISD::FP_ROUND:           return LowerF128_FPROUND(Op, DAG, *this);
2335  }
2336}
2337
2338MachineBasicBlock *
2339SparcTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
2340                                                 MachineBasicBlock *BB) const {
2341  const TargetInstrInfo &TII = *getTargetMachine().getInstrInfo();
2342  unsigned BROpcode;
2343  unsigned CC;
2344  DebugLoc dl = MI->getDebugLoc();
2345  // Figure out the conditional branch opcode to use for this select_cc.
2346  switch (MI->getOpcode()) {
2347  default: llvm_unreachable("Unknown SELECT_CC!");
2348  case SP::SELECT_CC_Int_ICC:
2349  case SP::SELECT_CC_FP_ICC:
2350  case SP::SELECT_CC_DFP_ICC:
2351  case SP::SELECT_CC_QFP_ICC:
2352    BROpcode = SP::BCOND;
2353    break;
2354  case SP::SELECT_CC_Int_FCC:
2355  case SP::SELECT_CC_FP_FCC:
2356  case SP::SELECT_CC_DFP_FCC:
2357  case SP::SELECT_CC_QFP_FCC:
2358    BROpcode = SP::FBCOND;
2359    break;
2360  }
2361
2362  CC = (SPCC::CondCodes)MI->getOperand(3).getImm();
2363
2364  // To "insert" a SELECT_CC instruction, we actually have to insert the diamond
2365  // control-flow pattern.  The incoming instruction knows the destination vreg
2366  // to set, the condition code register to branch on, the true/false values to
2367  // select between, and a branch opcode to use.
2368  const BasicBlock *LLVM_BB = BB->getBasicBlock();
2369  MachineFunction::iterator It = BB;
2370  ++It;
2371
2372  //  thisMBB:
2373  //  ...
2374  //   TrueVal = ...
2375  //   [f]bCC copy1MBB
2376  //   fallthrough --> copy0MBB
2377  MachineBasicBlock *thisMBB = BB;
2378  MachineFunction *F = BB->getParent();
2379  MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
2380  MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
2381  F->insert(It, copy0MBB);
2382  F->insert(It, sinkMBB);
2383
2384  // Transfer the remainder of BB and its successor edges to sinkMBB.
2385  sinkMBB->splice(sinkMBB->begin(), BB,
2386                  llvm::next(MachineBasicBlock::iterator(MI)),
2387                  BB->end());
2388  sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
2389
2390  // Add the true and fallthrough blocks as its successors.
2391  BB->addSuccessor(copy0MBB);
2392  BB->addSuccessor(sinkMBB);
2393
2394  BuildMI(BB, dl, TII.get(BROpcode)).addMBB(sinkMBB).addImm(CC);
2395
2396  //  copy0MBB:
2397  //   %FalseValue = ...
2398  //   # fallthrough to sinkMBB
2399  BB = copy0MBB;
2400
2401  // Update machine-CFG edges
2402  BB->addSuccessor(sinkMBB);
2403
2404  //  sinkMBB:
2405  //   %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
2406  //  ...
2407  BB = sinkMBB;
2408  BuildMI(*BB, BB->begin(), dl, TII.get(SP::PHI), MI->getOperand(0).getReg())
2409    .addReg(MI->getOperand(2).getReg()).addMBB(copy0MBB)
2410    .addReg(MI->getOperand(1).getReg()).addMBB(thisMBB);
2411
2412  MI->eraseFromParent();   // The pseudo instruction is gone now.
2413  return BB;
2414}
2415
2416//===----------------------------------------------------------------------===//
2417//                         Sparc Inline Assembly Support
2418//===----------------------------------------------------------------------===//
2419
2420/// getConstraintType - Given a constraint letter, return the type of
2421/// constraint it is for this target.
2422SparcTargetLowering::ConstraintType
2423SparcTargetLowering::getConstraintType(const std::string &Constraint) const {
2424  if (Constraint.size() == 1) {
2425    switch (Constraint[0]) {
2426    default:  break;
2427    case 'r': return C_RegisterClass;
2428    }
2429  }
2430
2431  return TargetLowering::getConstraintType(Constraint);
2432}
2433
2434std::pair<unsigned, const TargetRegisterClass*>
2435SparcTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
2436                                                  MVT VT) const {
2437  if (Constraint.size() == 1) {
2438    switch (Constraint[0]) {
2439    case 'r':
2440      return std::make_pair(0U, &SP::IntRegsRegClass);
2441    }
2442  }
2443
2444  return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
2445}
2446
2447bool
2448SparcTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
2449  // The Sparc target isn't yet aware of offsets.
2450  return false;
2451}
2452