SparcISelLowering.cpp revision 8717679c449db5555ec0ce2873bbbe53106f4c88
1//===-- SparcISelLowering.cpp - Sparc DAG Lowering Implementation ---------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the interfaces that Sparc uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#include "SparcISelLowering.h" 16#include "SparcMachineFunctionInfo.h" 17#include "SparcTargetMachine.h" 18#include "MCTargetDesc/SparcBaseInfo.h" 19#include "llvm/CodeGen/CallingConvLower.h" 20#include "llvm/CodeGen/MachineFrameInfo.h" 21#include "llvm/CodeGen/MachineFunction.h" 22#include "llvm/CodeGen/MachineInstrBuilder.h" 23#include "llvm/CodeGen/MachineRegisterInfo.h" 24#include "llvm/CodeGen/SelectionDAG.h" 25#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 26#include "llvm/IR/DerivedTypes.h" 27#include "llvm/IR/Function.h" 28#include "llvm/IR/Module.h" 29#include "llvm/Support/ErrorHandling.h" 30using namespace llvm; 31 32 33//===----------------------------------------------------------------------===// 34// Calling Convention Implementation 35//===----------------------------------------------------------------------===// 36 37static bool CC_Sparc_Assign_SRet(unsigned &ValNo, MVT &ValVT, 38 MVT &LocVT, CCValAssign::LocInfo &LocInfo, 39 ISD::ArgFlagsTy &ArgFlags, CCState &State) 40{ 41 assert (ArgFlags.isSRet()); 42 43 // Assign SRet argument. 44 State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT, 45 0, 46 LocVT, LocInfo)); 47 return true; 48} 49 50static bool CC_Sparc_Assign_f64(unsigned &ValNo, MVT &ValVT, 51 MVT &LocVT, CCValAssign::LocInfo &LocInfo, 52 ISD::ArgFlagsTy &ArgFlags, CCState &State) 53{ 54 static const uint16_t RegList[] = { 55 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5 56 }; 57 // Try to get first reg. 58 if (unsigned Reg = State.AllocateReg(RegList, 6)) { 59 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 60 } else { 61 // Assign whole thing in stack. 62 State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT, 63 State.AllocateStack(8,4), 64 LocVT, LocInfo)); 65 return true; 66 } 67 68 // Try to get second reg. 69 if (unsigned Reg = State.AllocateReg(RegList, 6)) 70 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 71 else 72 State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT, 73 State.AllocateStack(4,4), 74 LocVT, LocInfo)); 75 return true; 76} 77 78// Allocate a full-sized argument for the 64-bit ABI. 79static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, 80 MVT &LocVT, CCValAssign::LocInfo &LocInfo, 81 ISD::ArgFlagsTy &ArgFlags, CCState &State) { 82 assert((LocVT == MVT::f32 || LocVT.getSizeInBits() == 64) && 83 "Can't handle non-64 bits locations"); 84 85 // Stack space is allocated for all arguments starting from [%fp+BIAS+128]. 86 unsigned Offset = State.AllocateStack(8, 8); 87 unsigned Reg = 0; 88 89 if (LocVT == MVT::i64 && Offset < 6*8) 90 // Promote integers to %i0-%i5. 91 Reg = SP::I0 + Offset/8; 92 else if (LocVT == MVT::f64 && Offset < 16*8) 93 // Promote doubles to %d0-%d30. (Which LLVM calls D0-D15). 94 Reg = SP::D0 + Offset/8; 95 else if (LocVT == MVT::f32 && Offset < 16*8) 96 // Promote floats to %f1, %f3, ... 97 Reg = SP::F1 + Offset/4; 98 99 // Promote to register when possible, otherwise use the stack slot. 100 if (Reg) { 101 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 102 return true; 103 } 104 105 // This argument goes on the stack in an 8-byte slot. 106 // When passing floats, LocVT is smaller than 8 bytes. Adjust the offset to 107 // the right-aligned float. The first 4 bytes of the stack slot are undefined. 108 if (LocVT == MVT::f32) 109 Offset += 4; 110 111 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo)); 112 return true; 113} 114 115// Allocate a half-sized argument for the 64-bit ABI. 116// 117// This is used when passing { float, int } structs by value in registers. 118static bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, 119 MVT &LocVT, CCValAssign::LocInfo &LocInfo, 120 ISD::ArgFlagsTy &ArgFlags, CCState &State) { 121 assert(LocVT.getSizeInBits() == 32 && "Can't handle non-32 bits locations"); 122 unsigned Offset = State.AllocateStack(4, 4); 123 124 if (LocVT == MVT::f32 && Offset < 16*8) { 125 // Promote floats to %f0-%f31. 126 State.addLoc(CCValAssign::getReg(ValNo, ValVT, SP::F0 + Offset/4, 127 LocVT, LocInfo)); 128 return true; 129 } 130 131 if (LocVT == MVT::i32 && Offset < 6*8) { 132 // Promote integers to %i0-%i5, using half the register. 133 unsigned Reg = SP::I0 + Offset/8; 134 LocVT = MVT::i64; 135 LocInfo = CCValAssign::AExt; 136 137 // Set the Custom bit if this i32 goes in the high bits of a register. 138 if (Offset % 8 == 0) 139 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, 140 LocVT, LocInfo)); 141 else 142 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 143 return true; 144 } 145 146 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo)); 147 return true; 148} 149 150#include "SparcGenCallingConv.inc" 151 152// The calling conventions in SparcCallingConv.td are described in terms of the 153// callee's register window. This function translates registers to the 154// corresponding caller window %o register. 155static unsigned toCallerWindow(unsigned Reg) { 156 assert(SP::I0 + 7 == SP::I7 && SP::O0 + 7 == SP::O7 && "Unexpected enum"); 157 if (Reg >= SP::I0 && Reg <= SP::I7) 158 return Reg - SP::I0 + SP::O0; 159 return Reg; 160} 161 162SDValue 163SparcTargetLowering::LowerReturn(SDValue Chain, 164 CallingConv::ID CallConv, bool IsVarArg, 165 const SmallVectorImpl<ISD::OutputArg> &Outs, 166 const SmallVectorImpl<SDValue> &OutVals, 167 SDLoc DL, SelectionDAG &DAG) const { 168 if (Subtarget->is64Bit()) 169 return LowerReturn_64(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG); 170 return LowerReturn_32(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG); 171} 172 173SDValue 174SparcTargetLowering::LowerReturn_32(SDValue Chain, 175 CallingConv::ID CallConv, bool IsVarArg, 176 const SmallVectorImpl<ISD::OutputArg> &Outs, 177 const SmallVectorImpl<SDValue> &OutVals, 178 SDLoc DL, SelectionDAG &DAG) const { 179 MachineFunction &MF = DAG.getMachineFunction(); 180 181 // CCValAssign - represent the assignment of the return value to locations. 182 SmallVector<CCValAssign, 16> RVLocs; 183 184 // CCState - Info about the registers and stack slot. 185 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), 186 DAG.getTarget(), RVLocs, *DAG.getContext()); 187 188 // Analyze return values. 189 CCInfo.AnalyzeReturn(Outs, RetCC_Sparc32); 190 191 SDValue Flag; 192 SmallVector<SDValue, 4> RetOps(1, Chain); 193 // Make room for the return address offset. 194 RetOps.push_back(SDValue()); 195 196 // Copy the result values into the output registers. 197 for (unsigned i = 0; i != RVLocs.size(); ++i) { 198 CCValAssign &VA = RVLocs[i]; 199 assert(VA.isRegLoc() && "Can only return in registers!"); 200 201 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), 202 OutVals[i], Flag); 203 204 // Guarantee that all emitted copies are stuck together with flags. 205 Flag = Chain.getValue(1); 206 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 207 } 208 209 unsigned RetAddrOffset = 8; // Call Inst + Delay Slot 210 // If the function returns a struct, copy the SRetReturnReg to I0 211 if (MF.getFunction()->hasStructRetAttr()) { 212 SparcMachineFunctionInfo *SFI = MF.getInfo<SparcMachineFunctionInfo>(); 213 unsigned Reg = SFI->getSRetReturnReg(); 214 if (!Reg) 215 llvm_unreachable("sret virtual register not created in the entry block"); 216 SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy()); 217 Chain = DAG.getCopyToReg(Chain, DL, SP::I0, Val, Flag); 218 Flag = Chain.getValue(1); 219 RetOps.push_back(DAG.getRegister(SP::I0, getPointerTy())); 220 RetAddrOffset = 12; // CallInst + Delay Slot + Unimp 221 } 222 223 RetOps[0] = Chain; // Update chain. 224 RetOps[1] = DAG.getConstant(RetAddrOffset, MVT::i32); 225 226 // Add the flag if we have it. 227 if (Flag.getNode()) 228 RetOps.push_back(Flag); 229 230 return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other, 231 &RetOps[0], RetOps.size()); 232} 233 234// Lower return values for the 64-bit ABI. 235// Return values are passed the exactly the same way as function arguments. 236SDValue 237SparcTargetLowering::LowerReturn_64(SDValue Chain, 238 CallingConv::ID CallConv, bool IsVarArg, 239 const SmallVectorImpl<ISD::OutputArg> &Outs, 240 const SmallVectorImpl<SDValue> &OutVals, 241 SDLoc DL, SelectionDAG &DAG) const { 242 // CCValAssign - represent the assignment of the return value to locations. 243 SmallVector<CCValAssign, 16> RVLocs; 244 245 // CCState - Info about the registers and stack slot. 246 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), 247 DAG.getTarget(), RVLocs, *DAG.getContext()); 248 249 // Analyze return values. 250 CCInfo.AnalyzeReturn(Outs, CC_Sparc64); 251 252 SDValue Flag; 253 SmallVector<SDValue, 4> RetOps(1, Chain); 254 255 // The second operand on the return instruction is the return address offset. 256 // The return address is always %i7+8 with the 64-bit ABI. 257 RetOps.push_back(DAG.getConstant(8, MVT::i32)); 258 259 // Copy the result values into the output registers. 260 for (unsigned i = 0; i != RVLocs.size(); ++i) { 261 CCValAssign &VA = RVLocs[i]; 262 assert(VA.isRegLoc() && "Can only return in registers!"); 263 SDValue OutVal = OutVals[i]; 264 265 // Integer return values must be sign or zero extended by the callee. 266 switch (VA.getLocInfo()) { 267 case CCValAssign::SExt: 268 OutVal = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), OutVal); 269 break; 270 case CCValAssign::ZExt: 271 OutVal = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), OutVal); 272 break; 273 case CCValAssign::AExt: 274 OutVal = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), OutVal); 275 default: 276 break; 277 } 278 279 // The custom bit on an i32 return value indicates that it should be passed 280 // in the high bits of the register. 281 if (VA.getValVT() == MVT::i32 && VA.needsCustom()) { 282 OutVal = DAG.getNode(ISD::SHL, DL, MVT::i64, OutVal, 283 DAG.getConstant(32, MVT::i32)); 284 285 // The next value may go in the low bits of the same register. 286 // Handle both at once. 287 if (i+1 < RVLocs.size() && RVLocs[i+1].getLocReg() == VA.getLocReg()) { 288 SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, OutVals[i+1]); 289 OutVal = DAG.getNode(ISD::OR, DL, MVT::i64, OutVal, NV); 290 // Skip the next value, it's already done. 291 ++i; 292 } 293 } 294 295 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVal, Flag); 296 297 // Guarantee that all emitted copies are stuck together with flags. 298 Flag = Chain.getValue(1); 299 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 300 } 301 302 RetOps[0] = Chain; // Update chain. 303 304 // Add the flag if we have it. 305 if (Flag.getNode()) 306 RetOps.push_back(Flag); 307 308 return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other, 309 &RetOps[0], RetOps.size()); 310} 311 312SDValue SparcTargetLowering:: 313LowerFormalArguments(SDValue Chain, 314 CallingConv::ID CallConv, 315 bool IsVarArg, 316 const SmallVectorImpl<ISD::InputArg> &Ins, 317 SDLoc DL, 318 SelectionDAG &DAG, 319 SmallVectorImpl<SDValue> &InVals) const { 320 if (Subtarget->is64Bit()) 321 return LowerFormalArguments_64(Chain, CallConv, IsVarArg, Ins, 322 DL, DAG, InVals); 323 return LowerFormalArguments_32(Chain, CallConv, IsVarArg, Ins, 324 DL, DAG, InVals); 325} 326 327/// LowerFormalArguments32 - V8 uses a very simple ABI, where all values are 328/// passed in either one or two GPRs, including FP values. TODO: we should 329/// pass FP values in FP registers for fastcc functions. 330SDValue SparcTargetLowering:: 331LowerFormalArguments_32(SDValue Chain, 332 CallingConv::ID CallConv, 333 bool isVarArg, 334 const SmallVectorImpl<ISD::InputArg> &Ins, 335 SDLoc dl, 336 SelectionDAG &DAG, 337 SmallVectorImpl<SDValue> &InVals) const { 338 MachineFunction &MF = DAG.getMachineFunction(); 339 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 340 SparcMachineFunctionInfo *FuncInfo = MF.getInfo<SparcMachineFunctionInfo>(); 341 342 // Assign locations to all of the incoming arguments. 343 SmallVector<CCValAssign, 16> ArgLocs; 344 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 345 getTargetMachine(), ArgLocs, *DAG.getContext()); 346 CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc32); 347 348 const unsigned StackOffset = 92; 349 350 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 351 CCValAssign &VA = ArgLocs[i]; 352 353 if (i == 0 && Ins[i].Flags.isSRet()) { 354 // Get SRet from [%fp+64]. 355 int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, 64, true); 356 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32); 357 SDValue Arg = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, 358 MachinePointerInfo(), 359 false, false, false, 0); 360 InVals.push_back(Arg); 361 continue; 362 } 363 364 if (VA.isRegLoc()) { 365 if (VA.needsCustom()) { 366 assert(VA.getLocVT() == MVT::f64); 367 unsigned VRegHi = RegInfo.createVirtualRegister(&SP::IntRegsRegClass); 368 MF.getRegInfo().addLiveIn(VA.getLocReg(), VRegHi); 369 SDValue HiVal = DAG.getCopyFromReg(Chain, dl, VRegHi, MVT::i32); 370 371 assert(i+1 < e); 372 CCValAssign &NextVA = ArgLocs[++i]; 373 374 SDValue LoVal; 375 if (NextVA.isMemLoc()) { 376 int FrameIdx = MF.getFrameInfo()-> 377 CreateFixedObject(4, StackOffset+NextVA.getLocMemOffset(),true); 378 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32); 379 LoVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, 380 MachinePointerInfo(), 381 false, false, false, 0); 382 } else { 383 unsigned loReg = MF.addLiveIn(NextVA.getLocReg(), 384 &SP::IntRegsRegClass); 385 LoVal = DAG.getCopyFromReg(Chain, dl, loReg, MVT::i32); 386 } 387 SDValue WholeValue = 388 DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal); 389 WholeValue = DAG.getNode(ISD::BITCAST, dl, MVT::f64, WholeValue); 390 InVals.push_back(WholeValue); 391 continue; 392 } 393 unsigned VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass); 394 MF.getRegInfo().addLiveIn(VA.getLocReg(), VReg); 395 SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 396 if (VA.getLocVT() == MVT::f32) 397 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Arg); 398 else if (VA.getLocVT() != MVT::i32) { 399 Arg = DAG.getNode(ISD::AssertSext, dl, MVT::i32, Arg, 400 DAG.getValueType(VA.getLocVT())); 401 Arg = DAG.getNode(ISD::TRUNCATE, dl, VA.getLocVT(), Arg); 402 } 403 InVals.push_back(Arg); 404 continue; 405 } 406 407 assert(VA.isMemLoc()); 408 409 unsigned Offset = VA.getLocMemOffset()+StackOffset; 410 411 if (VA.needsCustom()) { 412 assert(VA.getValVT() == MVT::f64); 413 // If it is double-word aligned, just load. 414 if (Offset % 8 == 0) { 415 int FI = MF.getFrameInfo()->CreateFixedObject(8, 416 Offset, 417 true); 418 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy()); 419 SDValue Load = DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, 420 MachinePointerInfo(), 421 false,false, false, 0); 422 InVals.push_back(Load); 423 continue; 424 } 425 426 int FI = MF.getFrameInfo()->CreateFixedObject(4, 427 Offset, 428 true); 429 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy()); 430 SDValue HiVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, 431 MachinePointerInfo(), 432 false, false, false, 0); 433 int FI2 = MF.getFrameInfo()->CreateFixedObject(4, 434 Offset+4, 435 true); 436 SDValue FIPtr2 = DAG.getFrameIndex(FI2, getPointerTy()); 437 438 SDValue LoVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr2, 439 MachinePointerInfo(), 440 false, false, false, 0); 441 442 SDValue WholeValue = 443 DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal); 444 WholeValue = DAG.getNode(ISD::BITCAST, dl, MVT::f64, WholeValue); 445 InVals.push_back(WholeValue); 446 continue; 447 } 448 449 int FI = MF.getFrameInfo()->CreateFixedObject(4, 450 Offset, 451 true); 452 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy()); 453 SDValue Load ; 454 if (VA.getValVT() == MVT::i32 || VA.getValVT() == MVT::f32) { 455 Load = DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, 456 MachinePointerInfo(), 457 false, false, false, 0); 458 } else { 459 ISD::LoadExtType LoadOp = ISD::SEXTLOAD; 460 // Sparc is big endian, so add an offset based on the ObjectVT. 461 unsigned Offset = 4-std::max(1U, VA.getValVT().getSizeInBits()/8); 462 FIPtr = DAG.getNode(ISD::ADD, dl, MVT::i32, FIPtr, 463 DAG.getConstant(Offset, MVT::i32)); 464 Load = DAG.getExtLoad(LoadOp, dl, MVT::i32, Chain, FIPtr, 465 MachinePointerInfo(), 466 VA.getValVT(), false, false,0); 467 Load = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Load); 468 } 469 InVals.push_back(Load); 470 } 471 472 if (MF.getFunction()->hasStructRetAttr()) { 473 // Copy the SRet Argument to SRetReturnReg. 474 SparcMachineFunctionInfo *SFI = MF.getInfo<SparcMachineFunctionInfo>(); 475 unsigned Reg = SFI->getSRetReturnReg(); 476 if (!Reg) { 477 Reg = MF.getRegInfo().createVirtualRegister(&SP::IntRegsRegClass); 478 SFI->setSRetReturnReg(Reg); 479 } 480 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]); 481 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain); 482 } 483 484 // Store remaining ArgRegs to the stack if this is a varargs function. 485 if (isVarArg) { 486 static const uint16_t ArgRegs[] = { 487 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5 488 }; 489 unsigned NumAllocated = CCInfo.getFirstUnallocated(ArgRegs, 6); 490 const uint16_t *CurArgReg = ArgRegs+NumAllocated, *ArgRegEnd = ArgRegs+6; 491 unsigned ArgOffset = CCInfo.getNextStackOffset(); 492 if (NumAllocated == 6) 493 ArgOffset += StackOffset; 494 else { 495 assert(!ArgOffset); 496 ArgOffset = 68+4*NumAllocated; 497 } 498 499 // Remember the vararg offset for the va_start implementation. 500 FuncInfo->setVarArgsFrameOffset(ArgOffset); 501 502 std::vector<SDValue> OutChains; 503 504 for (; CurArgReg != ArgRegEnd; ++CurArgReg) { 505 unsigned VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass); 506 MF.getRegInfo().addLiveIn(*CurArgReg, VReg); 507 SDValue Arg = DAG.getCopyFromReg(DAG.getRoot(), dl, VReg, MVT::i32); 508 509 int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset, 510 true); 511 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32); 512 513 OutChains.push_back(DAG.getStore(DAG.getRoot(), dl, Arg, FIPtr, 514 MachinePointerInfo(), 515 false, false, 0)); 516 ArgOffset += 4; 517 } 518 519 if (!OutChains.empty()) { 520 OutChains.push_back(Chain); 521 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 522 &OutChains[0], OutChains.size()); 523 } 524 } 525 526 return Chain; 527} 528 529// Lower formal arguments for the 64 bit ABI. 530SDValue SparcTargetLowering:: 531LowerFormalArguments_64(SDValue Chain, 532 CallingConv::ID CallConv, 533 bool IsVarArg, 534 const SmallVectorImpl<ISD::InputArg> &Ins, 535 SDLoc DL, 536 SelectionDAG &DAG, 537 SmallVectorImpl<SDValue> &InVals) const { 538 MachineFunction &MF = DAG.getMachineFunction(); 539 540 // Analyze arguments according to CC_Sparc64. 541 SmallVector<CCValAssign, 16> ArgLocs; 542 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), 543 getTargetMachine(), ArgLocs, *DAG.getContext()); 544 CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc64); 545 546 // The argument array begins at %fp+BIAS+128, after the register save area. 547 const unsigned ArgArea = 128; 548 549 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 550 CCValAssign &VA = ArgLocs[i]; 551 if (VA.isRegLoc()) { 552 // This argument is passed in a register. 553 // All integer register arguments are promoted by the caller to i64. 554 555 // Create a virtual register for the promoted live-in value. 556 unsigned VReg = MF.addLiveIn(VA.getLocReg(), 557 getRegClassFor(VA.getLocVT())); 558 SDValue Arg = DAG.getCopyFromReg(Chain, DL, VReg, VA.getLocVT()); 559 560 // Get the high bits for i32 struct elements. 561 if (VA.getValVT() == MVT::i32 && VA.needsCustom()) 562 Arg = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), Arg, 563 DAG.getConstant(32, MVT::i32)); 564 565 // The caller promoted the argument, so insert an Assert?ext SDNode so we 566 // won't promote the value again in this function. 567 switch (VA.getLocInfo()) { 568 case CCValAssign::SExt: 569 Arg = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Arg, 570 DAG.getValueType(VA.getValVT())); 571 break; 572 case CCValAssign::ZExt: 573 Arg = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Arg, 574 DAG.getValueType(VA.getValVT())); 575 break; 576 default: 577 break; 578 } 579 580 // Truncate the register down to the argument type. 581 if (VA.isExtInLoc()) 582 Arg = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Arg); 583 584 InVals.push_back(Arg); 585 continue; 586 } 587 588 // The registers are exhausted. This argument was passed on the stack. 589 assert(VA.isMemLoc()); 590 // The CC_Sparc64_Full/Half functions compute stack offsets relative to the 591 // beginning of the arguments area at %fp+BIAS+128. 592 unsigned Offset = VA.getLocMemOffset() + ArgArea; 593 unsigned ValSize = VA.getValVT().getSizeInBits() / 8; 594 // Adjust offset for extended arguments, SPARC is big-endian. 595 // The caller will have written the full slot with extended bytes, but we 596 // prefer our own extending loads. 597 if (VA.isExtInLoc()) 598 Offset += 8 - ValSize; 599 int FI = MF.getFrameInfo()->CreateFixedObject(ValSize, Offset, true); 600 InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, 601 DAG.getFrameIndex(FI, getPointerTy()), 602 MachinePointerInfo::getFixedStack(FI), 603 false, false, false, 0)); 604 } 605 606 if (!IsVarArg) 607 return Chain; 608 609 // This function takes variable arguments, some of which may have been passed 610 // in registers %i0-%i5. Variable floating point arguments are never passed 611 // in floating point registers. They go on %i0-%i5 or on the stack like 612 // integer arguments. 613 // 614 // The va_start intrinsic needs to know the offset to the first variable 615 // argument. 616 unsigned ArgOffset = CCInfo.getNextStackOffset(); 617 SparcMachineFunctionInfo *FuncInfo = MF.getInfo<SparcMachineFunctionInfo>(); 618 // Skip the 128 bytes of register save area. 619 FuncInfo->setVarArgsFrameOffset(ArgOffset + ArgArea + 620 Subtarget->getStackPointerBias()); 621 622 // Save the variable arguments that were passed in registers. 623 // The caller is required to reserve stack space for 6 arguments regardless 624 // of how many arguments were actually passed. 625 SmallVector<SDValue, 8> OutChains; 626 for (; ArgOffset < 6*8; ArgOffset += 8) { 627 unsigned VReg = MF.addLiveIn(SP::I0 + ArgOffset/8, &SP::I64RegsRegClass); 628 SDValue VArg = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64); 629 int FI = MF.getFrameInfo()->CreateFixedObject(8, ArgOffset + ArgArea, true); 630 OutChains.push_back(DAG.getStore(Chain, DL, VArg, 631 DAG.getFrameIndex(FI, getPointerTy()), 632 MachinePointerInfo::getFixedStack(FI), 633 false, false, 0)); 634 } 635 636 if (!OutChains.empty()) 637 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, 638 &OutChains[0], OutChains.size()); 639 640 return Chain; 641} 642 643SDValue 644SparcTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 645 SmallVectorImpl<SDValue> &InVals) const { 646 if (Subtarget->is64Bit()) 647 return LowerCall_64(CLI, InVals); 648 return LowerCall_32(CLI, InVals); 649} 650 651// Lower a call for the 32-bit ABI. 652SDValue 653SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI, 654 SmallVectorImpl<SDValue> &InVals) const { 655 SelectionDAG &DAG = CLI.DAG; 656 SDLoc &dl = CLI.DL; 657 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 658 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 659 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 660 SDValue Chain = CLI.Chain; 661 SDValue Callee = CLI.Callee; 662 bool &isTailCall = CLI.IsTailCall; 663 CallingConv::ID CallConv = CLI.CallConv; 664 bool isVarArg = CLI.IsVarArg; 665 666 // Sparc target does not yet support tail call optimization. 667 isTailCall = false; 668 669 // Analyze operands of the call, assigning locations to each operand. 670 SmallVector<CCValAssign, 16> ArgLocs; 671 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 672 DAG.getTarget(), ArgLocs, *DAG.getContext()); 673 CCInfo.AnalyzeCallOperands(Outs, CC_Sparc32); 674 675 // Get the size of the outgoing arguments stack space requirement. 676 unsigned ArgsSize = CCInfo.getNextStackOffset(); 677 678 // Keep stack frames 8-byte aligned. 679 ArgsSize = (ArgsSize+7) & ~7; 680 681 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 682 683 // Create local copies for byval args. 684 SmallVector<SDValue, 8> ByValArgs; 685 for (unsigned i = 0, e = Outs.size(); i != e; ++i) { 686 ISD::ArgFlagsTy Flags = Outs[i].Flags; 687 if (!Flags.isByVal()) 688 continue; 689 690 SDValue Arg = OutVals[i]; 691 unsigned Size = Flags.getByValSize(); 692 unsigned Align = Flags.getByValAlign(); 693 694 int FI = MFI->CreateStackObject(Size, Align, false); 695 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy()); 696 SDValue SizeNode = DAG.getConstant(Size, MVT::i32); 697 698 Chain = DAG.getMemcpy(Chain, dl, FIPtr, Arg, SizeNode, Align, 699 false, // isVolatile, 700 (Size <= 32), // AlwaysInline if size <= 32 701 MachinePointerInfo(), MachinePointerInfo()); 702 ByValArgs.push_back(FIPtr); 703 } 704 705 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(ArgsSize, true), 706 dl); 707 708 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 709 SmallVector<SDValue, 8> MemOpChains; 710 711 const unsigned StackOffset = 92; 712 bool hasStructRetAttr = false; 713 // Walk the register/memloc assignments, inserting copies/loads. 714 for (unsigned i = 0, realArgIdx = 0, byvalArgIdx = 0, e = ArgLocs.size(); 715 i != e; 716 ++i, ++realArgIdx) { 717 CCValAssign &VA = ArgLocs[i]; 718 SDValue Arg = OutVals[realArgIdx]; 719 720 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 721 722 // Use local copy if it is a byval arg. 723 if (Flags.isByVal()) 724 Arg = ByValArgs[byvalArgIdx++]; 725 726 // Promote the value if needed. 727 switch (VA.getLocInfo()) { 728 default: llvm_unreachable("Unknown loc info!"); 729 case CCValAssign::Full: break; 730 case CCValAssign::SExt: 731 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 732 break; 733 case CCValAssign::ZExt: 734 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 735 break; 736 case CCValAssign::AExt: 737 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 738 break; 739 case CCValAssign::BCvt: 740 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 741 break; 742 } 743 744 if (Flags.isSRet()) { 745 assert(VA.needsCustom()); 746 // store SRet argument in %sp+64 747 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32); 748 SDValue PtrOff = DAG.getIntPtrConstant(64); 749 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff); 750 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, 751 MachinePointerInfo(), 752 false, false, 0)); 753 hasStructRetAttr = true; 754 continue; 755 } 756 757 if (VA.needsCustom()) { 758 assert(VA.getLocVT() == MVT::f64); 759 760 if (VA.isMemLoc()) { 761 unsigned Offset = VA.getLocMemOffset() + StackOffset; 762 // if it is double-word aligned, just store. 763 if (Offset % 8 == 0) { 764 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32); 765 SDValue PtrOff = DAG.getIntPtrConstant(Offset); 766 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff); 767 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, 768 MachinePointerInfo(), 769 false, false, 0)); 770 continue; 771 } 772 } 773 774 SDValue StackPtr = DAG.CreateStackTemporary(MVT::f64, MVT::i32); 775 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, 776 Arg, StackPtr, MachinePointerInfo(), 777 false, false, 0); 778 // Sparc is big-endian, so the high part comes first. 779 SDValue Hi = DAG.getLoad(MVT::i32, dl, Store, StackPtr, 780 MachinePointerInfo(), false, false, false, 0); 781 // Increment the pointer to the other half. 782 StackPtr = DAG.getNode(ISD::ADD, dl, StackPtr.getValueType(), StackPtr, 783 DAG.getIntPtrConstant(4)); 784 // Load the low part. 785 SDValue Lo = DAG.getLoad(MVT::i32, dl, Store, StackPtr, 786 MachinePointerInfo(), false, false, false, 0); 787 788 if (VA.isRegLoc()) { 789 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Hi)); 790 assert(i+1 != e); 791 CCValAssign &NextVA = ArgLocs[++i]; 792 if (NextVA.isRegLoc()) { 793 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), Lo)); 794 } else { 795 // Store the low part in stack. 796 unsigned Offset = NextVA.getLocMemOffset() + StackOffset; 797 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32); 798 SDValue PtrOff = DAG.getIntPtrConstant(Offset); 799 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff); 800 MemOpChains.push_back(DAG.getStore(Chain, dl, Lo, PtrOff, 801 MachinePointerInfo(), 802 false, false, 0)); 803 } 804 } else { 805 unsigned Offset = VA.getLocMemOffset() + StackOffset; 806 // Store the high part. 807 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32); 808 SDValue PtrOff = DAG.getIntPtrConstant(Offset); 809 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff); 810 MemOpChains.push_back(DAG.getStore(Chain, dl, Hi, PtrOff, 811 MachinePointerInfo(), 812 false, false, 0)); 813 // Store the low part. 814 PtrOff = DAG.getIntPtrConstant(Offset+4); 815 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff); 816 MemOpChains.push_back(DAG.getStore(Chain, dl, Lo, PtrOff, 817 MachinePointerInfo(), 818 false, false, 0)); 819 } 820 continue; 821 } 822 823 // Arguments that can be passed on register must be kept at 824 // RegsToPass vector 825 if (VA.isRegLoc()) { 826 if (VA.getLocVT() != MVT::f32) { 827 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 828 continue; 829 } 830 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 831 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 832 continue; 833 } 834 835 assert(VA.isMemLoc()); 836 837 // Create a store off the stack pointer for this argument. 838 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32); 839 SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset()+StackOffset); 840 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff); 841 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, 842 MachinePointerInfo(), 843 false, false, 0)); 844 } 845 846 847 // Emit all stores, make sure the occur before any copies into physregs. 848 if (!MemOpChains.empty()) 849 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 850 &MemOpChains[0], MemOpChains.size()); 851 852 // Build a sequence of copy-to-reg nodes chained together with token 853 // chain and flag operands which copy the outgoing args into registers. 854 // The InFlag in necessary since all emitted instructions must be 855 // stuck together. 856 SDValue InFlag; 857 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 858 unsigned Reg = toCallerWindow(RegsToPass[i].first); 859 Chain = DAG.getCopyToReg(Chain, dl, Reg, RegsToPass[i].second, InFlag); 860 InFlag = Chain.getValue(1); 861 } 862 863 unsigned SRetArgSize = (hasStructRetAttr)? getSRetArgSize(DAG, Callee):0; 864 865 // If the callee is a GlobalAddress node (quite common, every direct call is) 866 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 867 // Likewise ExternalSymbol -> TargetExternalSymbol. 868 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 869 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32); 870 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) 871 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32); 872 873 // Returns a chain & a flag for retval copy to use 874 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 875 SmallVector<SDValue, 8> Ops; 876 Ops.push_back(Chain); 877 Ops.push_back(Callee); 878 if (hasStructRetAttr) 879 Ops.push_back(DAG.getTargetConstant(SRetArgSize, MVT::i32)); 880 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 881 Ops.push_back(DAG.getRegister(toCallerWindow(RegsToPass[i].first), 882 RegsToPass[i].second.getValueType())); 883 if (InFlag.getNode()) 884 Ops.push_back(InFlag); 885 886 Chain = DAG.getNode(SPISD::CALL, dl, NodeTys, &Ops[0], Ops.size()); 887 InFlag = Chain.getValue(1); 888 889 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(ArgsSize, true), 890 DAG.getIntPtrConstant(0, true), InFlag, dl); 891 InFlag = Chain.getValue(1); 892 893 // Assign locations to each value returned by this call. 894 SmallVector<CCValAssign, 16> RVLocs; 895 CCState RVInfo(CallConv, isVarArg, DAG.getMachineFunction(), 896 DAG.getTarget(), RVLocs, *DAG.getContext()); 897 898 RVInfo.AnalyzeCallResult(Ins, RetCC_Sparc32); 899 900 // Copy all of the result registers out of their specified physreg. 901 for (unsigned i = 0; i != RVLocs.size(); ++i) { 902 Chain = DAG.getCopyFromReg(Chain, dl, toCallerWindow(RVLocs[i].getLocReg()), 903 RVLocs[i].getValVT(), InFlag).getValue(1); 904 InFlag = Chain.getValue(2); 905 InVals.push_back(Chain.getValue(0)); 906 } 907 908 return Chain; 909} 910 911unsigned 912SparcTargetLowering::getSRetArgSize(SelectionDAG &DAG, SDValue Callee) const 913{ 914 const Function *CalleeFn = 0; 915 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 916 CalleeFn = dyn_cast<Function>(G->getGlobal()); 917 } else if (ExternalSymbolSDNode *E = 918 dyn_cast<ExternalSymbolSDNode>(Callee)) { 919 const Function *Fn = DAG.getMachineFunction().getFunction(); 920 const Module *M = Fn->getParent(); 921 CalleeFn = M->getFunction(E->getSymbol()); 922 } 923 924 if (!CalleeFn) 925 return 0; 926 927 assert(CalleeFn->hasStructRetAttr() && 928 "Callee does not have the StructRet attribute."); 929 930 PointerType *Ty = cast<PointerType>(CalleeFn->arg_begin()->getType()); 931 Type *ElementTy = Ty->getElementType(); 932 return getDataLayout()->getTypeAllocSize(ElementTy); 933} 934 935 936// Fixup floating point arguments in the ... part of a varargs call. 937// 938// The SPARC v9 ABI requires that floating point arguments are treated the same 939// as integers when calling a varargs function. This does not apply to the 940// fixed arguments that are part of the function's prototype. 941// 942// This function post-processes a CCValAssign array created by 943// AnalyzeCallOperands(). 944static void fixupVariableFloatArgs(SmallVectorImpl<CCValAssign> &ArgLocs, 945 ArrayRef<ISD::OutputArg> Outs) { 946 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 947 const CCValAssign &VA = ArgLocs[i]; 948 // FIXME: What about f32 arguments? C promotes them to f64 when calling 949 // varargs functions. 950 if (!VA.isRegLoc() || VA.getLocVT() != MVT::f64) 951 continue; 952 // The fixed arguments to a varargs function still go in FP registers. 953 if (Outs[VA.getValNo()].IsFixed) 954 continue; 955 956 // This floating point argument should be reassigned. 957 CCValAssign NewVA; 958 959 // Determine the offset into the argument array. 960 unsigned Offset = 8 * (VA.getLocReg() - SP::D0); 961 assert(Offset < 16*8 && "Offset out of range, bad register enum?"); 962 963 if (Offset < 6*8) { 964 // This argument should go in %i0-%i5. 965 unsigned IReg = SP::I0 + Offset/8; 966 // Full register, just bitconvert into i64. 967 NewVA = CCValAssign::getReg(VA.getValNo(), VA.getValVT(), 968 IReg, MVT::i64, CCValAssign::BCvt); 969 } else { 970 // This needs to go to memory, we're out of integer registers. 971 NewVA = CCValAssign::getMem(VA.getValNo(), VA.getValVT(), 972 Offset, VA.getLocVT(), VA.getLocInfo()); 973 } 974 ArgLocs[i] = NewVA; 975 } 976} 977 978// Lower a call for the 64-bit ABI. 979SDValue 980SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI, 981 SmallVectorImpl<SDValue> &InVals) const { 982 SelectionDAG &DAG = CLI.DAG; 983 SDLoc DL = CLI.DL; 984 SDValue Chain = CLI.Chain; 985 986 // Analyze operands of the call, assigning locations to each operand. 987 SmallVector<CCValAssign, 16> ArgLocs; 988 CCState CCInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), 989 DAG.getTarget(), ArgLocs, *DAG.getContext()); 990 CCInfo.AnalyzeCallOperands(CLI.Outs, CC_Sparc64); 991 992 // Get the size of the outgoing arguments stack space requirement. 993 // The stack offset computed by CC_Sparc64 includes all arguments. 994 // Called functions expect 6 argument words to exist in the stack frame, used 995 // or not. 996 unsigned ArgsSize = std::max(6*8u, CCInfo.getNextStackOffset()); 997 998 // Keep stack frames 16-byte aligned. 999 ArgsSize = RoundUpToAlignment(ArgsSize, 16); 1000 1001 // Varargs calls require special treatment. 1002 if (CLI.IsVarArg) 1003 fixupVariableFloatArgs(ArgLocs, CLI.Outs); 1004 1005 // Adjust the stack pointer to make room for the arguments. 1006 // FIXME: Use hasReservedCallFrame to avoid %sp adjustments around all calls 1007 // with more than 6 arguments. 1008 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(ArgsSize, true), 1009 DL); 1010 1011 // Collect the set of registers to pass to the function and their values. 1012 // This will be emitted as a sequence of CopyToReg nodes glued to the call 1013 // instruction. 1014 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 1015 1016 // Collect chains from all the memory opeations that copy arguments to the 1017 // stack. They must follow the stack pointer adjustment above and precede the 1018 // call instruction itself. 1019 SmallVector<SDValue, 8> MemOpChains; 1020 1021 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1022 const CCValAssign &VA = ArgLocs[i]; 1023 SDValue Arg = CLI.OutVals[i]; 1024 1025 // Promote the value if needed. 1026 switch (VA.getLocInfo()) { 1027 default: 1028 llvm_unreachable("Unknown location info!"); 1029 case CCValAssign::Full: 1030 break; 1031 case CCValAssign::SExt: 1032 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg); 1033 break; 1034 case CCValAssign::ZExt: 1035 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg); 1036 break; 1037 case CCValAssign::AExt: 1038 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg); 1039 break; 1040 case CCValAssign::BCvt: 1041 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg); 1042 break; 1043 } 1044 1045 if (VA.isRegLoc()) { 1046 // The custom bit on an i32 return value indicates that it should be 1047 // passed in the high bits of the register. 1048 if (VA.getValVT() == MVT::i32 && VA.needsCustom()) { 1049 Arg = DAG.getNode(ISD::SHL, DL, MVT::i64, Arg, 1050 DAG.getConstant(32, MVT::i32)); 1051 1052 // The next value may go in the low bits of the same register. 1053 // Handle both at once. 1054 if (i+1 < ArgLocs.size() && ArgLocs[i+1].isRegLoc() && 1055 ArgLocs[i+1].getLocReg() == VA.getLocReg()) { 1056 SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, 1057 CLI.OutVals[i+1]); 1058 Arg = DAG.getNode(ISD::OR, DL, MVT::i64, Arg, NV); 1059 // Skip the next value, it's already done. 1060 ++i; 1061 } 1062 } 1063 RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()), Arg)); 1064 continue; 1065 } 1066 1067 assert(VA.isMemLoc()); 1068 1069 // Create a store off the stack pointer for this argument. 1070 SDValue StackPtr = DAG.getRegister(SP::O6, getPointerTy()); 1071 // The argument area starts at %fp+BIAS+128 in the callee frame, 1072 // %sp+BIAS+128 in ours. 1073 SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset() + 1074 Subtarget->getStackPointerBias() + 1075 128); 1076 PtrOff = DAG.getNode(ISD::ADD, DL, getPointerTy(), StackPtr, PtrOff); 1077 MemOpChains.push_back(DAG.getStore(Chain, DL, Arg, PtrOff, 1078 MachinePointerInfo(), 1079 false, false, 0)); 1080 } 1081 1082 // Emit all stores, make sure they occur before the call. 1083 if (!MemOpChains.empty()) 1084 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, 1085 &MemOpChains[0], MemOpChains.size()); 1086 1087 // Build a sequence of CopyToReg nodes glued together with token chain and 1088 // glue operands which copy the outgoing args into registers. The InGlue is 1089 // necessary since all emitted instructions must be stuck together in order 1090 // to pass the live physical registers. 1091 SDValue InGlue; 1092 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1093 Chain = DAG.getCopyToReg(Chain, DL, 1094 RegsToPass[i].first, RegsToPass[i].second, InGlue); 1095 InGlue = Chain.getValue(1); 1096 } 1097 1098 // If the callee is a GlobalAddress node (quite common, every direct call is) 1099 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 1100 // Likewise ExternalSymbol -> TargetExternalSymbol. 1101 SDValue Callee = CLI.Callee; 1102 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 1103 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, getPointerTy()); 1104 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) 1105 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), getPointerTy()); 1106 1107 // Build the operands for the call instruction itself. 1108 SmallVector<SDValue, 8> Ops; 1109 Ops.push_back(Chain); 1110 Ops.push_back(Callee); 1111 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1112 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1113 RegsToPass[i].second.getValueType())); 1114 1115 // Make sure the CopyToReg nodes are glued to the call instruction which 1116 // consumes the registers. 1117 if (InGlue.getNode()) 1118 Ops.push_back(InGlue); 1119 1120 // Now the call itself. 1121 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 1122 Chain = DAG.getNode(SPISD::CALL, DL, NodeTys, &Ops[0], Ops.size()); 1123 InGlue = Chain.getValue(1); 1124 1125 // Revert the stack pointer immediately after the call. 1126 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(ArgsSize, true), 1127 DAG.getIntPtrConstant(0, true), InGlue, DL); 1128 InGlue = Chain.getValue(1); 1129 1130 // Now extract the return values. This is more or less the same as 1131 // LowerFormalArguments_64. 1132 1133 // Assign locations to each value returned by this call. 1134 SmallVector<CCValAssign, 16> RVLocs; 1135 CCState RVInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), 1136 DAG.getTarget(), RVLocs, *DAG.getContext()); 1137 RVInfo.AnalyzeCallResult(CLI.Ins, CC_Sparc64); 1138 1139 // Copy all of the result registers out of their specified physreg. 1140 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1141 CCValAssign &VA = RVLocs[i]; 1142 unsigned Reg = toCallerWindow(VA.getLocReg()); 1143 1144 // When returning 'inreg {i32, i32 }', two consecutive i32 arguments can 1145 // reside in the same register in the high and low bits. Reuse the 1146 // CopyFromReg previous node to avoid duplicate copies. 1147 SDValue RV; 1148 if (RegisterSDNode *SrcReg = dyn_cast<RegisterSDNode>(Chain.getOperand(1))) 1149 if (SrcReg->getReg() == Reg && Chain->getOpcode() == ISD::CopyFromReg) 1150 RV = Chain.getValue(0); 1151 1152 // But usually we'll create a new CopyFromReg for a different register. 1153 if (!RV.getNode()) { 1154 RV = DAG.getCopyFromReg(Chain, DL, Reg, RVLocs[i].getLocVT(), InGlue); 1155 Chain = RV.getValue(1); 1156 InGlue = Chain.getValue(2); 1157 } 1158 1159 // Get the high bits for i32 struct elements. 1160 if (VA.getValVT() == MVT::i32 && VA.needsCustom()) 1161 RV = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), RV, 1162 DAG.getConstant(32, MVT::i32)); 1163 1164 // The callee promoted the return value, so insert an Assert?ext SDNode so 1165 // we won't promote the value again in this function. 1166 switch (VA.getLocInfo()) { 1167 case CCValAssign::SExt: 1168 RV = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), RV, 1169 DAG.getValueType(VA.getValVT())); 1170 break; 1171 case CCValAssign::ZExt: 1172 RV = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), RV, 1173 DAG.getValueType(VA.getValVT())); 1174 break; 1175 default: 1176 break; 1177 } 1178 1179 // Truncate the register down to the return value type. 1180 if (VA.isExtInLoc()) 1181 RV = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), RV); 1182 1183 InVals.push_back(RV); 1184 } 1185 1186 return Chain; 1187} 1188 1189//===----------------------------------------------------------------------===// 1190// TargetLowering Implementation 1191//===----------------------------------------------------------------------===// 1192 1193/// IntCondCCodeToICC - Convert a DAG integer condition code to a SPARC ICC 1194/// condition. 1195static SPCC::CondCodes IntCondCCodeToICC(ISD::CondCode CC) { 1196 switch (CC) { 1197 default: llvm_unreachable("Unknown integer condition code!"); 1198 case ISD::SETEQ: return SPCC::ICC_E; 1199 case ISD::SETNE: return SPCC::ICC_NE; 1200 case ISD::SETLT: return SPCC::ICC_L; 1201 case ISD::SETGT: return SPCC::ICC_G; 1202 case ISD::SETLE: return SPCC::ICC_LE; 1203 case ISD::SETGE: return SPCC::ICC_GE; 1204 case ISD::SETULT: return SPCC::ICC_CS; 1205 case ISD::SETULE: return SPCC::ICC_LEU; 1206 case ISD::SETUGT: return SPCC::ICC_GU; 1207 case ISD::SETUGE: return SPCC::ICC_CC; 1208 } 1209} 1210 1211/// FPCondCCodeToFCC - Convert a DAG floatingp oint condition code to a SPARC 1212/// FCC condition. 1213static SPCC::CondCodes FPCondCCodeToFCC(ISD::CondCode CC) { 1214 switch (CC) { 1215 default: llvm_unreachable("Unknown fp condition code!"); 1216 case ISD::SETEQ: 1217 case ISD::SETOEQ: return SPCC::FCC_E; 1218 case ISD::SETNE: 1219 case ISD::SETUNE: return SPCC::FCC_NE; 1220 case ISD::SETLT: 1221 case ISD::SETOLT: return SPCC::FCC_L; 1222 case ISD::SETGT: 1223 case ISD::SETOGT: return SPCC::FCC_G; 1224 case ISD::SETLE: 1225 case ISD::SETOLE: return SPCC::FCC_LE; 1226 case ISD::SETGE: 1227 case ISD::SETOGE: return SPCC::FCC_GE; 1228 case ISD::SETULT: return SPCC::FCC_UL; 1229 case ISD::SETULE: return SPCC::FCC_ULE; 1230 case ISD::SETUGT: return SPCC::FCC_UG; 1231 case ISD::SETUGE: return SPCC::FCC_UGE; 1232 case ISD::SETUO: return SPCC::FCC_U; 1233 case ISD::SETO: return SPCC::FCC_O; 1234 case ISD::SETONE: return SPCC::FCC_LG; 1235 case ISD::SETUEQ: return SPCC::FCC_UE; 1236 } 1237} 1238 1239SparcTargetLowering::SparcTargetLowering(TargetMachine &TM) 1240 : TargetLowering(TM, new TargetLoweringObjectFileELF()) { 1241 Subtarget = &TM.getSubtarget<SparcSubtarget>(); 1242 1243 // Set up the register classes. 1244 addRegisterClass(MVT::i32, &SP::IntRegsRegClass); 1245 addRegisterClass(MVT::f32, &SP::FPRegsRegClass); 1246 addRegisterClass(MVT::f64, &SP::DFPRegsRegClass); 1247 if (Subtarget->is64Bit()) 1248 addRegisterClass(MVT::i64, &SP::I64RegsRegClass); 1249 1250 // Turn FP extload into load/fextend 1251 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand); 1252 // Sparc doesn't have i1 sign extending load 1253 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 1254 // Turn FP truncstore into trunc + store. 1255 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 1256 1257 // Custom legalize GlobalAddress nodes into LO/HI parts. 1258 setOperationAction(ISD::GlobalAddress, getPointerTy(), Custom); 1259 setOperationAction(ISD::GlobalTLSAddress, getPointerTy(), Custom); 1260 setOperationAction(ISD::ConstantPool, getPointerTy(), Custom); 1261 setOperationAction(ISD::BlockAddress, getPointerTy(), Custom); 1262 1263 // Sparc doesn't have sext_inreg, replace them with shl/sra 1264 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); 1265 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Expand); 1266 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand); 1267 1268 // Sparc has no REM or DIVREM operations. 1269 setOperationAction(ISD::UREM, MVT::i32, Expand); 1270 setOperationAction(ISD::SREM, MVT::i32, Expand); 1271 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 1272 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 1273 1274 // Custom expand fp<->sint 1275 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 1276 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 1277 1278 // Expand fp<->uint 1279 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); 1280 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); 1281 1282 setOperationAction(ISD::BITCAST, MVT::f32, Expand); 1283 setOperationAction(ISD::BITCAST, MVT::i32, Expand); 1284 1285 // Sparc has no select or setcc: expand to SELECT_CC. 1286 setOperationAction(ISD::SELECT, MVT::i32, Expand); 1287 setOperationAction(ISD::SELECT, MVT::f32, Expand); 1288 setOperationAction(ISD::SELECT, MVT::f64, Expand); 1289 setOperationAction(ISD::SETCC, MVT::i32, Expand); 1290 setOperationAction(ISD::SETCC, MVT::f32, Expand); 1291 setOperationAction(ISD::SETCC, MVT::f64, Expand); 1292 1293 // Sparc doesn't have BRCOND either, it has BR_CC. 1294 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 1295 setOperationAction(ISD::BRIND, MVT::Other, Expand); 1296 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 1297 setOperationAction(ISD::BR_CC, MVT::i32, Custom); 1298 setOperationAction(ISD::BR_CC, MVT::f32, Custom); 1299 setOperationAction(ISD::BR_CC, MVT::f64, Custom); 1300 1301 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); 1302 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 1303 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 1304 1305 if (Subtarget->is64Bit()) { 1306 setOperationAction(ISD::BITCAST, MVT::f64, Expand); 1307 setOperationAction(ISD::BITCAST, MVT::i64, Expand); 1308 setOperationAction(ISD::SELECT, MVT::i64, Expand); 1309 setOperationAction(ISD::SETCC, MVT::i64, Expand); 1310 setOperationAction(ISD::BR_CC, MVT::i64, Custom); 1311 setOperationAction(ISD::SELECT_CC, MVT::i64, Custom); 1312 } 1313 1314 // FIXME: There are instructions available for ATOMIC_FENCE 1315 // on SparcV8 and later. 1316 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Expand); 1317 1318 if (!Subtarget->isV9()) { 1319 // SparcV8 does not have FNEGD and FABSD. 1320 setOperationAction(ISD::FNEG, MVT::f64, Custom); 1321 setOperationAction(ISD::FABS, MVT::f64, Custom); 1322 } 1323 1324 setOperationAction(ISD::FSIN , MVT::f64, Expand); 1325 setOperationAction(ISD::FCOS , MVT::f64, Expand); 1326 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 1327 setOperationAction(ISD::FREM , MVT::f64, Expand); 1328 setOperationAction(ISD::FMA , MVT::f64, Expand); 1329 setOperationAction(ISD::FSIN , MVT::f32, Expand); 1330 setOperationAction(ISD::FCOS , MVT::f32, Expand); 1331 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 1332 setOperationAction(ISD::FREM , MVT::f32, Expand); 1333 setOperationAction(ISD::FMA , MVT::f32, Expand); 1334 setOperationAction(ISD::CTPOP, MVT::i32, Expand); 1335 setOperationAction(ISD::CTTZ , MVT::i32, Expand); 1336 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand); 1337 setOperationAction(ISD::CTLZ , MVT::i32, Expand); 1338 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand); 1339 setOperationAction(ISD::ROTL , MVT::i32, Expand); 1340 setOperationAction(ISD::ROTR , MVT::i32, Expand); 1341 setOperationAction(ISD::BSWAP, MVT::i32, Expand); 1342 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 1343 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 1344 setOperationAction(ISD::FPOW , MVT::f64, Expand); 1345 setOperationAction(ISD::FPOW , MVT::f32, Expand); 1346 1347 setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand); 1348 setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand); 1349 setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand); 1350 1351 // FIXME: Sparc provides these multiplies, but we don't have them yet. 1352 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 1353 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 1354 1355 setOperationAction(ISD::EH_LABEL, MVT::Other, Expand); 1356 1357 // VASTART needs to be custom lowered to use the VarArgsFrameIndex. 1358 setOperationAction(ISD::VASTART , MVT::Other, Custom); 1359 // VAARG needs to be lowered to not do unaligned accesses for doubles. 1360 setOperationAction(ISD::VAARG , MVT::Other, Custom); 1361 1362 // Use the default implementation. 1363 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 1364 setOperationAction(ISD::VAEND , MVT::Other, Expand); 1365 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand); 1366 setOperationAction(ISD::STACKRESTORE , MVT::Other, Expand); 1367 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom); 1368 1369 // No debug info support yet. 1370 setOperationAction(ISD::EH_LABEL, MVT::Other, Expand); 1371 1372 setStackPointerRegisterToSaveRestore(SP::O6); 1373 1374 if (Subtarget->isV9()) 1375 setOperationAction(ISD::CTPOP, MVT::i32, Legal); 1376 1377 setMinFunctionAlignment(2); 1378 1379 computeRegisterProperties(); 1380} 1381 1382const char *SparcTargetLowering::getTargetNodeName(unsigned Opcode) const { 1383 switch (Opcode) { 1384 default: return 0; 1385 case SPISD::CMPICC: return "SPISD::CMPICC"; 1386 case SPISD::CMPFCC: return "SPISD::CMPFCC"; 1387 case SPISD::BRICC: return "SPISD::BRICC"; 1388 case SPISD::BRXCC: return "SPISD::BRXCC"; 1389 case SPISD::BRFCC: return "SPISD::BRFCC"; 1390 case SPISD::SELECT_ICC: return "SPISD::SELECT_ICC"; 1391 case SPISD::SELECT_XCC: return "SPISD::SELECT_XCC"; 1392 case SPISD::SELECT_FCC: return "SPISD::SELECT_FCC"; 1393 case SPISD::Hi: return "SPISD::Hi"; 1394 case SPISD::Lo: return "SPISD::Lo"; 1395 case SPISD::FTOI: return "SPISD::FTOI"; 1396 case SPISD::ITOF: return "SPISD::ITOF"; 1397 case SPISD::CALL: return "SPISD::CALL"; 1398 case SPISD::RET_FLAG: return "SPISD::RET_FLAG"; 1399 case SPISD::GLOBAL_BASE_REG: return "SPISD::GLOBAL_BASE_REG"; 1400 case SPISD::FLUSHW: return "SPISD::FLUSHW"; 1401 } 1402} 1403 1404/// isMaskedValueZeroForTargetNode - Return true if 'Op & Mask' is known to 1405/// be zero. Op is expected to be a target specific node. Used by DAG 1406/// combiner. 1407void SparcTargetLowering::computeMaskedBitsForTargetNode 1408 (const SDValue Op, 1409 APInt &KnownZero, 1410 APInt &KnownOne, 1411 const SelectionDAG &DAG, 1412 unsigned Depth) const { 1413 APInt KnownZero2, KnownOne2; 1414 KnownZero = KnownOne = APInt(KnownZero.getBitWidth(), 0); 1415 1416 switch (Op.getOpcode()) { 1417 default: break; 1418 case SPISD::SELECT_ICC: 1419 case SPISD::SELECT_XCC: 1420 case SPISD::SELECT_FCC: 1421 DAG.ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1); 1422 DAG.ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1); 1423 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1424 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 1425 1426 // Only known if known in both the LHS and RHS. 1427 KnownOne &= KnownOne2; 1428 KnownZero &= KnownZero2; 1429 break; 1430 } 1431} 1432 1433// Look at LHS/RHS/CC and see if they are a lowered setcc instruction. If so 1434// set LHS/RHS and SPCC to the LHS/RHS of the setcc and SPCC to the condition. 1435static void LookThroughSetCC(SDValue &LHS, SDValue &RHS, 1436 ISD::CondCode CC, unsigned &SPCC) { 1437 if (isa<ConstantSDNode>(RHS) && 1438 cast<ConstantSDNode>(RHS)->isNullValue() && 1439 CC == ISD::SETNE && 1440 (((LHS.getOpcode() == SPISD::SELECT_ICC || 1441 LHS.getOpcode() == SPISD::SELECT_XCC) && 1442 LHS.getOperand(3).getOpcode() == SPISD::CMPICC) || 1443 (LHS.getOpcode() == SPISD::SELECT_FCC && 1444 LHS.getOperand(3).getOpcode() == SPISD::CMPFCC)) && 1445 isa<ConstantSDNode>(LHS.getOperand(0)) && 1446 isa<ConstantSDNode>(LHS.getOperand(1)) && 1447 cast<ConstantSDNode>(LHS.getOperand(0))->isOne() && 1448 cast<ConstantSDNode>(LHS.getOperand(1))->isNullValue()) { 1449 SDValue CMPCC = LHS.getOperand(3); 1450 SPCC = cast<ConstantSDNode>(LHS.getOperand(2))->getZExtValue(); 1451 LHS = CMPCC.getOperand(0); 1452 RHS = CMPCC.getOperand(1); 1453 } 1454} 1455 1456// Convert to a target node and set target flags. 1457SDValue SparcTargetLowering::withTargetFlags(SDValue Op, unsigned TF, 1458 SelectionDAG &DAG) const { 1459 if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op)) 1460 return DAG.getTargetGlobalAddress(GA->getGlobal(), 1461 SDLoc(GA), 1462 GA->getValueType(0), 1463 GA->getOffset(), TF); 1464 1465 if (const ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op)) 1466 return DAG.getTargetConstantPool(CP->getConstVal(), 1467 CP->getValueType(0), 1468 CP->getAlignment(), 1469 CP->getOffset(), TF); 1470 1471 if (const BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(Op)) 1472 return DAG.getTargetBlockAddress(BA->getBlockAddress(), 1473 Op.getValueType(), 1474 0, 1475 TF); 1476 1477 if (const ExternalSymbolSDNode *ES = dyn_cast<ExternalSymbolSDNode>(Op)) 1478 return DAG.getTargetExternalSymbol(ES->getSymbol(), 1479 ES->getValueType(0), TF); 1480 1481 llvm_unreachable("Unhandled address SDNode"); 1482} 1483 1484// Split Op into high and low parts according to HiTF and LoTF. 1485// Return an ADD node combining the parts. 1486SDValue SparcTargetLowering::makeHiLoPair(SDValue Op, 1487 unsigned HiTF, unsigned LoTF, 1488 SelectionDAG &DAG) const { 1489 SDLoc DL(Op); 1490 EVT VT = Op.getValueType(); 1491 SDValue Hi = DAG.getNode(SPISD::Hi, DL, VT, withTargetFlags(Op, HiTF, DAG)); 1492 SDValue Lo = DAG.getNode(SPISD::Lo, DL, VT, withTargetFlags(Op, LoTF, DAG)); 1493 return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo); 1494} 1495 1496// Build SDNodes for producing an address from a GlobalAddress, ConstantPool, 1497// or ExternalSymbol SDNode. 1498SDValue SparcTargetLowering::makeAddress(SDValue Op, SelectionDAG &DAG) const { 1499 SDLoc DL(Op); 1500 EVT VT = getPointerTy(); 1501 1502 // Handle PIC mode first. 1503 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) { 1504 // This is the pic32 code model, the GOT is known to be smaller than 4GB. 1505 SDValue HiLo = makeHiLoPair(Op, SPII::MO_HI, SPII::MO_LO, DAG); 1506 SDValue GlobalBase = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, VT); 1507 SDValue AbsAddr = DAG.getNode(ISD::ADD, DL, VT, GlobalBase, HiLo); 1508 return DAG.getLoad(VT, DL, DAG.getEntryNode(), AbsAddr, 1509 MachinePointerInfo::getGOT(), false, false, false, 0); 1510 } 1511 1512 // This is one of the absolute code models. 1513 switch(getTargetMachine().getCodeModel()) { 1514 default: 1515 llvm_unreachable("Unsupported absolute code model"); 1516 case CodeModel::Small: 1517 // abs32. 1518 return makeHiLoPair(Op, SPII::MO_HI, SPII::MO_LO, DAG); 1519 case CodeModel::Medium: { 1520 // abs44. 1521 SDValue H44 = makeHiLoPair(Op, SPII::MO_H44, SPII::MO_M44, DAG); 1522 H44 = DAG.getNode(ISD::SHL, DL, VT, H44, DAG.getConstant(12, MVT::i32)); 1523 SDValue L44 = withTargetFlags(Op, SPII::MO_L44, DAG); 1524 L44 = DAG.getNode(SPISD::Lo, DL, VT, L44); 1525 return DAG.getNode(ISD::ADD, DL, VT, H44, L44); 1526 } 1527 case CodeModel::Large: { 1528 // abs64. 1529 SDValue Hi = makeHiLoPair(Op, SPII::MO_HH, SPII::MO_HM, DAG); 1530 Hi = DAG.getNode(ISD::SHL, DL, VT, Hi, DAG.getConstant(32, MVT::i32)); 1531 SDValue Lo = makeHiLoPair(Op, SPII::MO_HI, SPII::MO_LO, DAG); 1532 return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo); 1533 } 1534 } 1535} 1536 1537SDValue SparcTargetLowering::LowerGlobalAddress(SDValue Op, 1538 SelectionDAG &DAG) const { 1539 return makeAddress(Op, DAG); 1540} 1541 1542SDValue SparcTargetLowering::LowerConstantPool(SDValue Op, 1543 SelectionDAG &DAG) const { 1544 return makeAddress(Op, DAG); 1545} 1546 1547SDValue SparcTargetLowering::LowerBlockAddress(SDValue Op, 1548 SelectionDAG &DAG) const { 1549 return makeAddress(Op, DAG); 1550} 1551 1552static SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) { 1553 SDLoc dl(Op); 1554 // Convert the fp value to integer in an FP register. 1555 assert(Op.getValueType() == MVT::i32); 1556 Op = DAG.getNode(SPISD::FTOI, dl, MVT::f32, Op.getOperand(0)); 1557 return DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op); 1558} 1559 1560static SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) { 1561 SDLoc dl(Op); 1562 assert(Op.getOperand(0).getValueType() == MVT::i32); 1563 SDValue Tmp = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Op.getOperand(0)); 1564 // Convert the int value to FP in an FP register. 1565 return DAG.getNode(SPISD::ITOF, dl, Op.getValueType(), Tmp); 1566} 1567 1568static SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) { 1569 SDValue Chain = Op.getOperand(0); 1570 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 1571 SDValue LHS = Op.getOperand(2); 1572 SDValue RHS = Op.getOperand(3); 1573 SDValue Dest = Op.getOperand(4); 1574 SDLoc dl(Op); 1575 unsigned Opc, SPCC = ~0U; 1576 1577 // If this is a br_cc of a "setcc", and if the setcc got lowered into 1578 // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values. 1579 LookThroughSetCC(LHS, RHS, CC, SPCC); 1580 1581 // Get the condition flag. 1582 SDValue CompareFlag; 1583 if (LHS.getValueType().isInteger()) { 1584 CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS); 1585 if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC); 1586 // 32-bit compares use the icc flags, 64-bit uses the xcc flags. 1587 Opc = LHS.getValueType() == MVT::i32 ? SPISD::BRICC : SPISD::BRXCC; 1588 } else { 1589 CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, MVT::Glue, LHS, RHS); 1590 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC); 1591 Opc = SPISD::BRFCC; 1592 } 1593 return DAG.getNode(Opc, dl, MVT::Other, Chain, Dest, 1594 DAG.getConstant(SPCC, MVT::i32), CompareFlag); 1595} 1596 1597static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) { 1598 SDValue LHS = Op.getOperand(0); 1599 SDValue RHS = Op.getOperand(1); 1600 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 1601 SDValue TrueVal = Op.getOperand(2); 1602 SDValue FalseVal = Op.getOperand(3); 1603 SDLoc dl(Op); 1604 unsigned Opc, SPCC = ~0U; 1605 1606 // If this is a select_cc of a "setcc", and if the setcc got lowered into 1607 // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values. 1608 LookThroughSetCC(LHS, RHS, CC, SPCC); 1609 1610 SDValue CompareFlag; 1611 if (LHS.getValueType().isInteger()) { 1612 CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS); 1613 Opc = LHS.getValueType() == MVT::i32 ? 1614 SPISD::SELECT_ICC : SPISD::SELECT_XCC; 1615 if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC); 1616 } else { 1617 CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, MVT::Glue, LHS, RHS); 1618 Opc = SPISD::SELECT_FCC; 1619 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC); 1620 } 1621 return DAG.getNode(Opc, dl, TrueVal.getValueType(), TrueVal, FalseVal, 1622 DAG.getConstant(SPCC, MVT::i32), CompareFlag); 1623} 1624 1625static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG, 1626 const SparcTargetLowering &TLI) { 1627 MachineFunction &MF = DAG.getMachineFunction(); 1628 SparcMachineFunctionInfo *FuncInfo = MF.getInfo<SparcMachineFunctionInfo>(); 1629 1630 // Need frame address to find the address of VarArgsFrameIndex. 1631 MF.getFrameInfo()->setFrameAddressIsTaken(true); 1632 1633 // vastart just stores the address of the VarArgsFrameIndex slot into the 1634 // memory location argument. 1635 SDLoc DL(Op); 1636 SDValue Offset = 1637 DAG.getNode(ISD::ADD, DL, TLI.getPointerTy(), 1638 DAG.getRegister(SP::I6, TLI.getPointerTy()), 1639 DAG.getIntPtrConstant(FuncInfo->getVarArgsFrameOffset())); 1640 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 1641 return DAG.getStore(Op.getOperand(0), DL, Offset, Op.getOperand(1), 1642 MachinePointerInfo(SV), false, false, 0); 1643} 1644 1645static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) { 1646 SDNode *Node = Op.getNode(); 1647 EVT VT = Node->getValueType(0); 1648 SDValue InChain = Node->getOperand(0); 1649 SDValue VAListPtr = Node->getOperand(1); 1650 EVT PtrVT = VAListPtr.getValueType(); 1651 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 1652 SDLoc DL(Node); 1653 SDValue VAList = DAG.getLoad(PtrVT, DL, InChain, VAListPtr, 1654 MachinePointerInfo(SV), false, false, false, 0); 1655 // Increment the pointer, VAList, to the next vaarg. 1656 SDValue NextPtr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList, 1657 DAG.getIntPtrConstant(VT.getSizeInBits()/8)); 1658 // Store the incremented VAList to the legalized pointer. 1659 InChain = DAG.getStore(VAList.getValue(1), DL, NextPtr, 1660 VAListPtr, MachinePointerInfo(SV), false, false, 0); 1661 // Load the actual argument out of the pointer VAList. 1662 // We can't count on greater alignment than the word size. 1663 return DAG.getLoad(VT, DL, InChain, VAList, MachinePointerInfo(), 1664 false, false, false, 1665 std::min(PtrVT.getSizeInBits(), VT.getSizeInBits())/8); 1666} 1667 1668static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) { 1669 SDValue Chain = Op.getOperand(0); // Legalize the chain. 1670 SDValue Size = Op.getOperand(1); // Legalize the size. 1671 SDLoc dl(Op); 1672 1673 unsigned SPReg = SP::O6; 1674 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, MVT::i32); 1675 SDValue NewSP = DAG.getNode(ISD::SUB, dl, MVT::i32, SP, Size); // Value 1676 Chain = DAG.getCopyToReg(SP.getValue(1), dl, SPReg, NewSP); // Output chain 1677 1678 // The resultant pointer is actually 16 words from the bottom of the stack, 1679 // to provide a register spill area. 1680 SDValue NewVal = DAG.getNode(ISD::ADD, dl, MVT::i32, NewSP, 1681 DAG.getConstant(96, MVT::i32)); 1682 SDValue Ops[2] = { NewVal, Chain }; 1683 return DAG.getMergeValues(Ops, 2, dl); 1684} 1685 1686 1687static SDValue getFLUSHW(SDValue Op, SelectionDAG &DAG) { 1688 SDLoc dl(Op); 1689 SDValue Chain = DAG.getNode(SPISD::FLUSHW, 1690 dl, MVT::Other, DAG.getEntryNode()); 1691 return Chain; 1692} 1693 1694static SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) { 1695 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 1696 MFI->setFrameAddressIsTaken(true); 1697 1698 EVT VT = Op.getValueType(); 1699 SDLoc dl(Op); 1700 unsigned FrameReg = SP::I6; 1701 1702 uint64_t depth = Op.getConstantOperandVal(0); 1703 1704 SDValue FrameAddr; 1705 if (depth == 0) 1706 FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); 1707 else { 1708 // flush first to make sure the windowed registers' values are in stack 1709 SDValue Chain = getFLUSHW(Op, DAG); 1710 FrameAddr = DAG.getCopyFromReg(Chain, dl, FrameReg, VT); 1711 1712 for (uint64_t i = 0; i != depth; ++i) { 1713 SDValue Ptr = DAG.getNode(ISD::ADD, 1714 dl, MVT::i32, 1715 FrameAddr, DAG.getIntPtrConstant(56)); 1716 FrameAddr = DAG.getLoad(MVT::i32, dl, 1717 Chain, 1718 Ptr, 1719 MachinePointerInfo(), false, false, false, 0); 1720 } 1721 } 1722 return FrameAddr; 1723} 1724 1725static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG, 1726 const SparcTargetLowering &TLI) { 1727 MachineFunction &MF = DAG.getMachineFunction(); 1728 MachineFrameInfo *MFI = MF.getFrameInfo(); 1729 MFI->setReturnAddressIsTaken(true); 1730 1731 EVT VT = Op.getValueType(); 1732 SDLoc dl(Op); 1733 uint64_t depth = Op.getConstantOperandVal(0); 1734 1735 SDValue RetAddr; 1736 if (depth == 0) { 1737 unsigned RetReg = MF.addLiveIn(SP::I7, 1738 TLI.getRegClassFor(TLI.getPointerTy())); 1739 RetAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, RetReg, VT); 1740 } else { 1741 // Need frame address to find return address of the caller. 1742 MFI->setFrameAddressIsTaken(true); 1743 1744 // flush first to make sure the windowed registers' values are in stack 1745 SDValue Chain = getFLUSHW(Op, DAG); 1746 RetAddr = DAG.getCopyFromReg(Chain, dl, SP::I6, VT); 1747 1748 for (uint64_t i = 0; i != depth; ++i) { 1749 SDValue Ptr = DAG.getNode(ISD::ADD, 1750 dl, MVT::i32, 1751 RetAddr, 1752 DAG.getIntPtrConstant((i == depth-1)?60:56)); 1753 RetAddr = DAG.getLoad(MVT::i32, dl, 1754 Chain, 1755 Ptr, 1756 MachinePointerInfo(), false, false, false, 0); 1757 } 1758 } 1759 return RetAddr; 1760} 1761 1762static SDValue LowerF64Op(SDValue Op, SelectionDAG &DAG) 1763{ 1764 SDLoc dl(Op); 1765 1766 assert(Op.getValueType() == MVT::f64 && "LowerF64Op called on non-double!"); 1767 assert(Op.getOpcode() == ISD::FNEG || Op.getOpcode() == ISD::FABS); 1768 1769 // Lower fneg/fabs on f64 to fneg/fabs on f32. 1770 // fneg f64 => fneg f32:sub_even, fmov f32:sub_odd. 1771 // fabs f64 => fabs f32:sub_even, fmov f32:sub_odd. 1772 1773 SDValue SrcReg64 = Op.getOperand(0); 1774 SDValue Hi32 = DAG.getTargetExtractSubreg(SP::sub_even, dl, MVT::f32, 1775 SrcReg64); 1776 SDValue Lo32 = DAG.getTargetExtractSubreg(SP::sub_odd, dl, MVT::f32, 1777 SrcReg64); 1778 1779 Hi32 = DAG.getNode(Op.getOpcode(), dl, MVT::f32, Hi32); 1780 1781 SDValue DstReg64 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, 1782 dl, MVT::f64), 0); 1783 DstReg64 = DAG.getTargetInsertSubreg(SP::sub_even, dl, MVT::f64, 1784 DstReg64, Hi32); 1785 DstReg64 = DAG.getTargetInsertSubreg(SP::sub_odd, dl, MVT::f64, 1786 DstReg64, Lo32); 1787 return DstReg64; 1788} 1789 1790SDValue SparcTargetLowering:: 1791LowerOperation(SDValue Op, SelectionDAG &DAG) const { 1792 switch (Op.getOpcode()) { 1793 default: llvm_unreachable("Should not custom lower this!"); 1794 1795 case ISD::FNEG: 1796 case ISD::FABS: return LowerF64Op(Op, DAG); 1797 1798 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG, *this); 1799 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 1800 case ISD::GlobalTLSAddress: 1801 llvm_unreachable("TLS not implemented for Sparc."); 1802 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 1803 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 1804 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 1805 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG); 1806 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG); 1807 case ISD::BR_CC: return LowerBR_CC(Op, DAG); 1808 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 1809 case ISD::VASTART: return LowerVASTART(Op, DAG, *this); 1810 case ISD::VAARG: return LowerVAARG(Op, DAG); 1811 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); 1812 } 1813} 1814 1815MachineBasicBlock * 1816SparcTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 1817 MachineBasicBlock *BB) const { 1818 const TargetInstrInfo &TII = *getTargetMachine().getInstrInfo(); 1819 unsigned BROpcode; 1820 unsigned CC; 1821 DebugLoc dl = MI->getDebugLoc(); 1822 // Figure out the conditional branch opcode to use for this select_cc. 1823 switch (MI->getOpcode()) { 1824 default: llvm_unreachable("Unknown SELECT_CC!"); 1825 case SP::SELECT_CC_Int_ICC: 1826 case SP::SELECT_CC_FP_ICC: 1827 case SP::SELECT_CC_DFP_ICC: 1828 BROpcode = SP::BCOND; 1829 break; 1830 case SP::SELECT_CC_Int_FCC: 1831 case SP::SELECT_CC_FP_FCC: 1832 case SP::SELECT_CC_DFP_FCC: 1833 BROpcode = SP::FBCOND; 1834 break; 1835 } 1836 1837 CC = (SPCC::CondCodes)MI->getOperand(3).getImm(); 1838 1839 // To "insert" a SELECT_CC instruction, we actually have to insert the diamond 1840 // control-flow pattern. The incoming instruction knows the destination vreg 1841 // to set, the condition code register to branch on, the true/false values to 1842 // select between, and a branch opcode to use. 1843 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 1844 MachineFunction::iterator It = BB; 1845 ++It; 1846 1847 // thisMBB: 1848 // ... 1849 // TrueVal = ... 1850 // [f]bCC copy1MBB 1851 // fallthrough --> copy0MBB 1852 MachineBasicBlock *thisMBB = BB; 1853 MachineFunction *F = BB->getParent(); 1854 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 1855 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 1856 F->insert(It, copy0MBB); 1857 F->insert(It, sinkMBB); 1858 1859 // Transfer the remainder of BB and its successor edges to sinkMBB. 1860 sinkMBB->splice(sinkMBB->begin(), BB, 1861 llvm::next(MachineBasicBlock::iterator(MI)), 1862 BB->end()); 1863 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 1864 1865 // Add the true and fallthrough blocks as its successors. 1866 BB->addSuccessor(copy0MBB); 1867 BB->addSuccessor(sinkMBB); 1868 1869 BuildMI(BB, dl, TII.get(BROpcode)).addMBB(sinkMBB).addImm(CC); 1870 1871 // copy0MBB: 1872 // %FalseValue = ... 1873 // # fallthrough to sinkMBB 1874 BB = copy0MBB; 1875 1876 // Update machine-CFG edges 1877 BB->addSuccessor(sinkMBB); 1878 1879 // sinkMBB: 1880 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 1881 // ... 1882 BB = sinkMBB; 1883 BuildMI(*BB, BB->begin(), dl, TII.get(SP::PHI), MI->getOperand(0).getReg()) 1884 .addReg(MI->getOperand(2).getReg()).addMBB(copy0MBB) 1885 .addReg(MI->getOperand(1).getReg()).addMBB(thisMBB); 1886 1887 MI->eraseFromParent(); // The pseudo instruction is gone now. 1888 return BB; 1889} 1890 1891//===----------------------------------------------------------------------===// 1892// Sparc Inline Assembly Support 1893//===----------------------------------------------------------------------===// 1894 1895/// getConstraintType - Given a constraint letter, return the type of 1896/// constraint it is for this target. 1897SparcTargetLowering::ConstraintType 1898SparcTargetLowering::getConstraintType(const std::string &Constraint) const { 1899 if (Constraint.size() == 1) { 1900 switch (Constraint[0]) { 1901 default: break; 1902 case 'r': return C_RegisterClass; 1903 } 1904 } 1905 1906 return TargetLowering::getConstraintType(Constraint); 1907} 1908 1909std::pair<unsigned, const TargetRegisterClass*> 1910SparcTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 1911 MVT VT) const { 1912 if (Constraint.size() == 1) { 1913 switch (Constraint[0]) { 1914 case 'r': 1915 return std::make_pair(0U, &SP::IntRegsRegClass); 1916 } 1917 } 1918 1919 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 1920} 1921 1922bool 1923SparcTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 1924 // The Sparc target isn't yet aware of offsets. 1925 return false; 1926} 1927