1//===-- SparcISelLowering.cpp - Sparc DAG Lowering Implementation ---------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the interfaces that Sparc uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#include "SparcISelLowering.h" 16#include "MCTargetDesc/SparcMCExpr.h" 17#include "SparcMachineFunctionInfo.h" 18#include "SparcRegisterInfo.h" 19#include "SparcTargetMachine.h" 20#include "SparcTargetObjectFile.h" 21#include "llvm/CodeGen/CallingConvLower.h" 22#include "llvm/CodeGen/MachineFrameInfo.h" 23#include "llvm/CodeGen/MachineFunction.h" 24#include "llvm/CodeGen/MachineInstrBuilder.h" 25#include "llvm/CodeGen/MachineRegisterInfo.h" 26#include "llvm/CodeGen/SelectionDAG.h" 27#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 28#include "llvm/IR/DerivedTypes.h" 29#include "llvm/IR/Function.h" 30#include "llvm/IR/Module.h" 31#include "llvm/Support/ErrorHandling.h" 32using namespace llvm; 33 34 35//===----------------------------------------------------------------------===// 36// Calling Convention Implementation 37//===----------------------------------------------------------------------===// 38 39static bool CC_Sparc_Assign_SRet(unsigned &ValNo, MVT &ValVT, 40 MVT &LocVT, CCValAssign::LocInfo &LocInfo, 41 ISD::ArgFlagsTy &ArgFlags, CCState &State) 42{ 43 assert (ArgFlags.isSRet()); 44 45 // Assign SRet argument. 46 State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT, 47 0, 48 LocVT, LocInfo)); 49 return true; 50} 51 52static bool CC_Sparc_Assign_Split_64(unsigned &ValNo, MVT &ValVT, 53 MVT &LocVT, CCValAssign::LocInfo &LocInfo, 54 ISD::ArgFlagsTy &ArgFlags, CCState &State) 55{ 56 static const MCPhysReg RegList[] = { 57 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5 58 }; 59 // Try to get first reg. 60 if (unsigned Reg = State.AllocateReg(RegList)) { 61 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 62 } else { 63 // Assign whole thing in stack. 64 State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT, 65 State.AllocateStack(8,4), 66 LocVT, LocInfo)); 67 return true; 68 } 69 70 // Try to get second reg. 71 if (unsigned Reg = State.AllocateReg(RegList)) 72 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 73 else 74 State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT, 75 State.AllocateStack(4,4), 76 LocVT, LocInfo)); 77 return true; 78} 79 80static bool CC_Sparc_Assign_Ret_Split_64(unsigned &ValNo, MVT &ValVT, 81 MVT &LocVT, CCValAssign::LocInfo &LocInfo, 82 ISD::ArgFlagsTy &ArgFlags, CCState &State) 83{ 84 static const MCPhysReg RegList[] = { 85 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5 86 }; 87 88 // Try to get first reg. 89 if (unsigned Reg = State.AllocateReg(RegList)) 90 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 91 else 92 return false; 93 94 // Try to get second reg. 95 if (unsigned Reg = State.AllocateReg(RegList)) 96 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 97 else 98 return false; 99 100 return true; 101} 102 103// Allocate a full-sized argument for the 64-bit ABI. 104static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, 105 MVT &LocVT, CCValAssign::LocInfo &LocInfo, 106 ISD::ArgFlagsTy &ArgFlags, CCState &State) { 107 assert((LocVT == MVT::f32 || LocVT == MVT::f128 108 || LocVT.getSizeInBits() == 64) && 109 "Can't handle non-64 bits locations"); 110 111 // Stack space is allocated for all arguments starting from [%fp+BIAS+128]. 112 unsigned size = (LocVT == MVT::f128) ? 16 : 8; 113 unsigned alignment = (LocVT == MVT::f128) ? 16 : 8; 114 unsigned Offset = State.AllocateStack(size, alignment); 115 unsigned Reg = 0; 116 117 if (LocVT == MVT::i64 && Offset < 6*8) 118 // Promote integers to %i0-%i5. 119 Reg = SP::I0 + Offset/8; 120 else if (LocVT == MVT::f64 && Offset < 16*8) 121 // Promote doubles to %d0-%d30. (Which LLVM calls D0-D15). 122 Reg = SP::D0 + Offset/8; 123 else if (LocVT == MVT::f32 && Offset < 16*8) 124 // Promote floats to %f1, %f3, ... 125 Reg = SP::F1 + Offset/4; 126 else if (LocVT == MVT::f128 && Offset < 16*8) 127 // Promote long doubles to %q0-%q28. (Which LLVM calls Q0-Q7). 128 Reg = SP::Q0 + Offset/16; 129 130 // Promote to register when possible, otherwise use the stack slot. 131 if (Reg) { 132 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 133 return true; 134 } 135 136 // This argument goes on the stack in an 8-byte slot. 137 // When passing floats, LocVT is smaller than 8 bytes. Adjust the offset to 138 // the right-aligned float. The first 4 bytes of the stack slot are undefined. 139 if (LocVT == MVT::f32) 140 Offset += 4; 141 142 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo)); 143 return true; 144} 145 146// Allocate a half-sized argument for the 64-bit ABI. 147// 148// This is used when passing { float, int } structs by value in registers. 149static bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, 150 MVT &LocVT, CCValAssign::LocInfo &LocInfo, 151 ISD::ArgFlagsTy &ArgFlags, CCState &State) { 152 assert(LocVT.getSizeInBits() == 32 && "Can't handle non-32 bits locations"); 153 unsigned Offset = State.AllocateStack(4, 4); 154 155 if (LocVT == MVT::f32 && Offset < 16*8) { 156 // Promote floats to %f0-%f31. 157 State.addLoc(CCValAssign::getReg(ValNo, ValVT, SP::F0 + Offset/4, 158 LocVT, LocInfo)); 159 return true; 160 } 161 162 if (LocVT == MVT::i32 && Offset < 6*8) { 163 // Promote integers to %i0-%i5, using half the register. 164 unsigned Reg = SP::I0 + Offset/8; 165 LocVT = MVT::i64; 166 LocInfo = CCValAssign::AExt; 167 168 // Set the Custom bit if this i32 goes in the high bits of a register. 169 if (Offset % 8 == 0) 170 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, 171 LocVT, LocInfo)); 172 else 173 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 174 return true; 175 } 176 177 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo)); 178 return true; 179} 180 181#include "SparcGenCallingConv.inc" 182 183// The calling conventions in SparcCallingConv.td are described in terms of the 184// callee's register window. This function translates registers to the 185// corresponding caller window %o register. 186static unsigned toCallerWindow(unsigned Reg) { 187 assert(SP::I0 + 7 == SP::I7 && SP::O0 + 7 == SP::O7 && "Unexpected enum"); 188 if (Reg >= SP::I0 && Reg <= SP::I7) 189 return Reg - SP::I0 + SP::O0; 190 return Reg; 191} 192 193SDValue 194SparcTargetLowering::LowerReturn(SDValue Chain, 195 CallingConv::ID CallConv, bool IsVarArg, 196 const SmallVectorImpl<ISD::OutputArg> &Outs, 197 const SmallVectorImpl<SDValue> &OutVals, 198 SDLoc DL, SelectionDAG &DAG) const { 199 if (Subtarget->is64Bit()) 200 return LowerReturn_64(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG); 201 return LowerReturn_32(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG); 202} 203 204SDValue 205SparcTargetLowering::LowerReturn_32(SDValue Chain, 206 CallingConv::ID CallConv, bool IsVarArg, 207 const SmallVectorImpl<ISD::OutputArg> &Outs, 208 const SmallVectorImpl<SDValue> &OutVals, 209 SDLoc DL, SelectionDAG &DAG) const { 210 MachineFunction &MF = DAG.getMachineFunction(); 211 212 // CCValAssign - represent the assignment of the return value to locations. 213 SmallVector<CCValAssign, 16> RVLocs; 214 215 // CCState - Info about the registers and stack slot. 216 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs, 217 *DAG.getContext()); 218 219 // Analyze return values. 220 CCInfo.AnalyzeReturn(Outs, RetCC_Sparc32); 221 222 SDValue Flag; 223 SmallVector<SDValue, 4> RetOps(1, Chain); 224 // Make room for the return address offset. 225 RetOps.push_back(SDValue()); 226 227 // Copy the result values into the output registers. 228 for (unsigned i = 0, realRVLocIdx = 0; 229 i != RVLocs.size(); 230 ++i, ++realRVLocIdx) { 231 CCValAssign &VA = RVLocs[i]; 232 assert(VA.isRegLoc() && "Can only return in registers!"); 233 234 SDValue Arg = OutVals[realRVLocIdx]; 235 236 if (VA.needsCustom()) { 237 assert(VA.getLocVT() == MVT::v2i32); 238 // Legalize ret v2i32 -> ret 2 x i32 (Basically: do what would 239 // happen by default if this wasn't a legal type) 240 241 SDValue Part0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, 242 Arg, 243 DAG.getConstant(0, DL, getVectorIdxTy(DAG.getDataLayout()))); 244 SDValue Part1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, 245 Arg, 246 DAG.getConstant(1, DL, getVectorIdxTy(DAG.getDataLayout()))); 247 248 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part0, Flag); 249 Flag = Chain.getValue(1); 250 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 251 VA = RVLocs[++i]; // skip ahead to next loc 252 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part1, 253 Flag); 254 } else 255 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag); 256 257 // Guarantee that all emitted copies are stuck together with flags. 258 Flag = Chain.getValue(1); 259 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 260 } 261 262 unsigned RetAddrOffset = 8; // Call Inst + Delay Slot 263 // If the function returns a struct, copy the SRetReturnReg to I0 264 if (MF.getFunction()->hasStructRetAttr()) { 265 SparcMachineFunctionInfo *SFI = MF.getInfo<SparcMachineFunctionInfo>(); 266 unsigned Reg = SFI->getSRetReturnReg(); 267 if (!Reg) 268 llvm_unreachable("sret virtual register not created in the entry block"); 269 auto PtrVT = getPointerTy(DAG.getDataLayout()); 270 SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, PtrVT); 271 Chain = DAG.getCopyToReg(Chain, DL, SP::I0, Val, Flag); 272 Flag = Chain.getValue(1); 273 RetOps.push_back(DAG.getRegister(SP::I0, PtrVT)); 274 RetAddrOffset = 12; // CallInst + Delay Slot + Unimp 275 } 276 277 RetOps[0] = Chain; // Update chain. 278 RetOps[1] = DAG.getConstant(RetAddrOffset, DL, MVT::i32); 279 280 // Add the flag if we have it. 281 if (Flag.getNode()) 282 RetOps.push_back(Flag); 283 284 return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other, RetOps); 285} 286 287// Lower return values for the 64-bit ABI. 288// Return values are passed the exactly the same way as function arguments. 289SDValue 290SparcTargetLowering::LowerReturn_64(SDValue Chain, 291 CallingConv::ID CallConv, bool IsVarArg, 292 const SmallVectorImpl<ISD::OutputArg> &Outs, 293 const SmallVectorImpl<SDValue> &OutVals, 294 SDLoc DL, SelectionDAG &DAG) const { 295 // CCValAssign - represent the assignment of the return value to locations. 296 SmallVector<CCValAssign, 16> RVLocs; 297 298 // CCState - Info about the registers and stack slot. 299 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs, 300 *DAG.getContext()); 301 302 // Analyze return values. 303 CCInfo.AnalyzeReturn(Outs, RetCC_Sparc64); 304 305 SDValue Flag; 306 SmallVector<SDValue, 4> RetOps(1, Chain); 307 308 // The second operand on the return instruction is the return address offset. 309 // The return address is always %i7+8 with the 64-bit ABI. 310 RetOps.push_back(DAG.getConstant(8, DL, MVT::i32)); 311 312 // Copy the result values into the output registers. 313 for (unsigned i = 0; i != RVLocs.size(); ++i) { 314 CCValAssign &VA = RVLocs[i]; 315 assert(VA.isRegLoc() && "Can only return in registers!"); 316 SDValue OutVal = OutVals[i]; 317 318 // Integer return values must be sign or zero extended by the callee. 319 switch (VA.getLocInfo()) { 320 case CCValAssign::Full: break; 321 case CCValAssign::SExt: 322 OutVal = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), OutVal); 323 break; 324 case CCValAssign::ZExt: 325 OutVal = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), OutVal); 326 break; 327 case CCValAssign::AExt: 328 OutVal = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), OutVal); 329 break; 330 default: 331 llvm_unreachable("Unknown loc info!"); 332 } 333 334 // The custom bit on an i32 return value indicates that it should be passed 335 // in the high bits of the register. 336 if (VA.getValVT() == MVT::i32 && VA.needsCustom()) { 337 OutVal = DAG.getNode(ISD::SHL, DL, MVT::i64, OutVal, 338 DAG.getConstant(32, DL, MVT::i32)); 339 340 // The next value may go in the low bits of the same register. 341 // Handle both at once. 342 if (i+1 < RVLocs.size() && RVLocs[i+1].getLocReg() == VA.getLocReg()) { 343 SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, OutVals[i+1]); 344 OutVal = DAG.getNode(ISD::OR, DL, MVT::i64, OutVal, NV); 345 // Skip the next value, it's already done. 346 ++i; 347 } 348 } 349 350 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVal, Flag); 351 352 // Guarantee that all emitted copies are stuck together with flags. 353 Flag = Chain.getValue(1); 354 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 355 } 356 357 RetOps[0] = Chain; // Update chain. 358 359 // Add the flag if we have it. 360 if (Flag.getNode()) 361 RetOps.push_back(Flag); 362 363 return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other, RetOps); 364} 365 366SDValue SparcTargetLowering:: 367LowerFormalArguments(SDValue Chain, 368 CallingConv::ID CallConv, 369 bool IsVarArg, 370 const SmallVectorImpl<ISD::InputArg> &Ins, 371 SDLoc DL, 372 SelectionDAG &DAG, 373 SmallVectorImpl<SDValue> &InVals) const { 374 if (Subtarget->is64Bit()) 375 return LowerFormalArguments_64(Chain, CallConv, IsVarArg, Ins, 376 DL, DAG, InVals); 377 return LowerFormalArguments_32(Chain, CallConv, IsVarArg, Ins, 378 DL, DAG, InVals); 379} 380 381/// LowerFormalArguments32 - V8 uses a very simple ABI, where all values are 382/// passed in either one or two GPRs, including FP values. TODO: we should 383/// pass FP values in FP registers for fastcc functions. 384SDValue SparcTargetLowering:: 385LowerFormalArguments_32(SDValue Chain, 386 CallingConv::ID CallConv, 387 bool isVarArg, 388 const SmallVectorImpl<ISD::InputArg> &Ins, 389 SDLoc dl, 390 SelectionDAG &DAG, 391 SmallVectorImpl<SDValue> &InVals) const { 392 MachineFunction &MF = DAG.getMachineFunction(); 393 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 394 SparcMachineFunctionInfo *FuncInfo = MF.getInfo<SparcMachineFunctionInfo>(); 395 396 // Assign locations to all of the incoming arguments. 397 SmallVector<CCValAssign, 16> ArgLocs; 398 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 399 *DAG.getContext()); 400 CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc32); 401 402 const unsigned StackOffset = 92; 403 bool IsLittleEndian = DAG.getDataLayout().isLittleEndian(); 404 405 unsigned InIdx = 0; 406 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i, ++InIdx) { 407 CCValAssign &VA = ArgLocs[i]; 408 409 if (Ins[InIdx].Flags.isSRet()) { 410 if (InIdx != 0) 411 report_fatal_error("sparc only supports sret on the first parameter"); 412 // Get SRet from [%fp+64]. 413 int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, 64, true); 414 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32); 415 SDValue Arg = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, 416 MachinePointerInfo(), 417 false, false, false, 0); 418 InVals.push_back(Arg); 419 continue; 420 } 421 422 if (VA.isRegLoc()) { 423 if (VA.needsCustom()) { 424 assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32); 425 426 unsigned VRegHi = RegInfo.createVirtualRegister(&SP::IntRegsRegClass); 427 MF.getRegInfo().addLiveIn(VA.getLocReg(), VRegHi); 428 SDValue HiVal = DAG.getCopyFromReg(Chain, dl, VRegHi, MVT::i32); 429 430 assert(i+1 < e); 431 CCValAssign &NextVA = ArgLocs[++i]; 432 433 SDValue LoVal; 434 if (NextVA.isMemLoc()) { 435 int FrameIdx = MF.getFrameInfo()-> 436 CreateFixedObject(4, StackOffset+NextVA.getLocMemOffset(),true); 437 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32); 438 LoVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, 439 MachinePointerInfo(), 440 false, false, false, 0); 441 } else { 442 unsigned loReg = MF.addLiveIn(NextVA.getLocReg(), 443 &SP::IntRegsRegClass); 444 LoVal = DAG.getCopyFromReg(Chain, dl, loReg, MVT::i32); 445 } 446 447 if (IsLittleEndian) 448 std::swap(LoVal, HiVal); 449 450 SDValue WholeValue = 451 DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal); 452 WholeValue = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), WholeValue); 453 InVals.push_back(WholeValue); 454 continue; 455 } 456 unsigned VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass); 457 MF.getRegInfo().addLiveIn(VA.getLocReg(), VReg); 458 SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 459 if (VA.getLocVT() == MVT::f32) 460 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Arg); 461 else if (VA.getLocVT() != MVT::i32) { 462 Arg = DAG.getNode(ISD::AssertSext, dl, MVT::i32, Arg, 463 DAG.getValueType(VA.getLocVT())); 464 Arg = DAG.getNode(ISD::TRUNCATE, dl, VA.getLocVT(), Arg); 465 } 466 InVals.push_back(Arg); 467 continue; 468 } 469 470 assert(VA.isMemLoc()); 471 472 unsigned Offset = VA.getLocMemOffset()+StackOffset; 473 auto PtrVT = getPointerTy(DAG.getDataLayout()); 474 475 if (VA.needsCustom()) { 476 assert(VA.getValVT() == MVT::f64 || MVT::v2i32); 477 // If it is double-word aligned, just load. 478 if (Offset % 8 == 0) { 479 int FI = MF.getFrameInfo()->CreateFixedObject(8, 480 Offset, 481 true); 482 SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT); 483 SDValue Load = DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, 484 MachinePointerInfo(), 485 false,false, false, 0); 486 InVals.push_back(Load); 487 continue; 488 } 489 490 int FI = MF.getFrameInfo()->CreateFixedObject(4, 491 Offset, 492 true); 493 SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT); 494 SDValue HiVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, 495 MachinePointerInfo(), 496 false, false, false, 0); 497 int FI2 = MF.getFrameInfo()->CreateFixedObject(4, 498 Offset+4, 499 true); 500 SDValue FIPtr2 = DAG.getFrameIndex(FI2, PtrVT); 501 502 SDValue LoVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr2, 503 MachinePointerInfo(), 504 false, false, false, 0); 505 506 if (IsLittleEndian) 507 std::swap(LoVal, HiVal); 508 509 SDValue WholeValue = 510 DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal); 511 WholeValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), WholeValue); 512 InVals.push_back(WholeValue); 513 continue; 514 } 515 516 int FI = MF.getFrameInfo()->CreateFixedObject(4, 517 Offset, 518 true); 519 SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT); 520 SDValue Load ; 521 if (VA.getValVT() == MVT::i32 || VA.getValVT() == MVT::f32) { 522 Load = DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, 523 MachinePointerInfo(), 524 false, false, false, 0); 525 } else if (VA.getValVT() == MVT::f128) { 526 report_fatal_error("SPARCv8 does not handle f128 in calls; " 527 "pass indirectly"); 528 } else { 529 // We shouldn't see any other value types here. 530 llvm_unreachable("Unexpected ValVT encountered in frame lowering."); 531 } 532 InVals.push_back(Load); 533 } 534 535 if (MF.getFunction()->hasStructRetAttr()) { 536 // Copy the SRet Argument to SRetReturnReg. 537 SparcMachineFunctionInfo *SFI = MF.getInfo<SparcMachineFunctionInfo>(); 538 unsigned Reg = SFI->getSRetReturnReg(); 539 if (!Reg) { 540 Reg = MF.getRegInfo().createVirtualRegister(&SP::IntRegsRegClass); 541 SFI->setSRetReturnReg(Reg); 542 } 543 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]); 544 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain); 545 } 546 547 // Store remaining ArgRegs to the stack if this is a varargs function. 548 if (isVarArg) { 549 static const MCPhysReg ArgRegs[] = { 550 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5 551 }; 552 unsigned NumAllocated = CCInfo.getFirstUnallocated(ArgRegs); 553 const MCPhysReg *CurArgReg = ArgRegs+NumAllocated, *ArgRegEnd = ArgRegs+6; 554 unsigned ArgOffset = CCInfo.getNextStackOffset(); 555 if (NumAllocated == 6) 556 ArgOffset += StackOffset; 557 else { 558 assert(!ArgOffset); 559 ArgOffset = 68+4*NumAllocated; 560 } 561 562 // Remember the vararg offset for the va_start implementation. 563 FuncInfo->setVarArgsFrameOffset(ArgOffset); 564 565 std::vector<SDValue> OutChains; 566 567 for (; CurArgReg != ArgRegEnd; ++CurArgReg) { 568 unsigned VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass); 569 MF.getRegInfo().addLiveIn(*CurArgReg, VReg); 570 SDValue Arg = DAG.getCopyFromReg(DAG.getRoot(), dl, VReg, MVT::i32); 571 572 int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset, 573 true); 574 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32); 575 576 OutChains.push_back(DAG.getStore(DAG.getRoot(), dl, Arg, FIPtr, 577 MachinePointerInfo(), 578 false, false, 0)); 579 ArgOffset += 4; 580 } 581 582 if (!OutChains.empty()) { 583 OutChains.push_back(Chain); 584 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 585 } 586 } 587 588 return Chain; 589} 590 591// Lower formal arguments for the 64 bit ABI. 592SDValue SparcTargetLowering:: 593LowerFormalArguments_64(SDValue Chain, 594 CallingConv::ID CallConv, 595 bool IsVarArg, 596 const SmallVectorImpl<ISD::InputArg> &Ins, 597 SDLoc DL, 598 SelectionDAG &DAG, 599 SmallVectorImpl<SDValue> &InVals) const { 600 MachineFunction &MF = DAG.getMachineFunction(); 601 602 // Analyze arguments according to CC_Sparc64. 603 SmallVector<CCValAssign, 16> ArgLocs; 604 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs, 605 *DAG.getContext()); 606 CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc64); 607 608 // The argument array begins at %fp+BIAS+128, after the register save area. 609 const unsigned ArgArea = 128; 610 611 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 612 CCValAssign &VA = ArgLocs[i]; 613 if (VA.isRegLoc()) { 614 // This argument is passed in a register. 615 // All integer register arguments are promoted by the caller to i64. 616 617 // Create a virtual register for the promoted live-in value. 618 unsigned VReg = MF.addLiveIn(VA.getLocReg(), 619 getRegClassFor(VA.getLocVT())); 620 SDValue Arg = DAG.getCopyFromReg(Chain, DL, VReg, VA.getLocVT()); 621 622 // Get the high bits for i32 struct elements. 623 if (VA.getValVT() == MVT::i32 && VA.needsCustom()) 624 Arg = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), Arg, 625 DAG.getConstant(32, DL, MVT::i32)); 626 627 // The caller promoted the argument, so insert an Assert?ext SDNode so we 628 // won't promote the value again in this function. 629 switch (VA.getLocInfo()) { 630 case CCValAssign::SExt: 631 Arg = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Arg, 632 DAG.getValueType(VA.getValVT())); 633 break; 634 case CCValAssign::ZExt: 635 Arg = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Arg, 636 DAG.getValueType(VA.getValVT())); 637 break; 638 default: 639 break; 640 } 641 642 // Truncate the register down to the argument type. 643 if (VA.isExtInLoc()) 644 Arg = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Arg); 645 646 InVals.push_back(Arg); 647 continue; 648 } 649 650 // The registers are exhausted. This argument was passed on the stack. 651 assert(VA.isMemLoc()); 652 // The CC_Sparc64_Full/Half functions compute stack offsets relative to the 653 // beginning of the arguments area at %fp+BIAS+128. 654 unsigned Offset = VA.getLocMemOffset() + ArgArea; 655 unsigned ValSize = VA.getValVT().getSizeInBits() / 8; 656 // Adjust offset for extended arguments, SPARC is big-endian. 657 // The caller will have written the full slot with extended bytes, but we 658 // prefer our own extending loads. 659 if (VA.isExtInLoc()) 660 Offset += 8 - ValSize; 661 int FI = MF.getFrameInfo()->CreateFixedObject(ValSize, Offset, true); 662 InVals.push_back(DAG.getLoad( 663 VA.getValVT(), DL, Chain, 664 DAG.getFrameIndex(FI, getPointerTy(MF.getDataLayout())), 665 MachinePointerInfo::getFixedStack(MF, FI), false, false, false, 0)); 666 } 667 668 if (!IsVarArg) 669 return Chain; 670 671 // This function takes variable arguments, some of which may have been passed 672 // in registers %i0-%i5. Variable floating point arguments are never passed 673 // in floating point registers. They go on %i0-%i5 or on the stack like 674 // integer arguments. 675 // 676 // The va_start intrinsic needs to know the offset to the first variable 677 // argument. 678 unsigned ArgOffset = CCInfo.getNextStackOffset(); 679 SparcMachineFunctionInfo *FuncInfo = MF.getInfo<SparcMachineFunctionInfo>(); 680 // Skip the 128 bytes of register save area. 681 FuncInfo->setVarArgsFrameOffset(ArgOffset + ArgArea + 682 Subtarget->getStackPointerBias()); 683 684 // Save the variable arguments that were passed in registers. 685 // The caller is required to reserve stack space for 6 arguments regardless 686 // of how many arguments were actually passed. 687 SmallVector<SDValue, 8> OutChains; 688 for (; ArgOffset < 6*8; ArgOffset += 8) { 689 unsigned VReg = MF.addLiveIn(SP::I0 + ArgOffset/8, &SP::I64RegsRegClass); 690 SDValue VArg = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64); 691 int FI = MF.getFrameInfo()->CreateFixedObject(8, ArgOffset + ArgArea, true); 692 auto PtrVT = getPointerTy(MF.getDataLayout()); 693 OutChains.push_back(DAG.getStore( 694 Chain, DL, VArg, DAG.getFrameIndex(FI, PtrVT), 695 MachinePointerInfo::getFixedStack(MF, FI), false, false, 0)); 696 } 697 698 if (!OutChains.empty()) 699 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains); 700 701 return Chain; 702} 703 704SDValue 705SparcTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 706 SmallVectorImpl<SDValue> &InVals) const { 707 if (Subtarget->is64Bit()) 708 return LowerCall_64(CLI, InVals); 709 return LowerCall_32(CLI, InVals); 710} 711 712static bool hasReturnsTwiceAttr(SelectionDAG &DAG, SDValue Callee, 713 ImmutableCallSite *CS) { 714 if (CS) 715 return CS->hasFnAttr(Attribute::ReturnsTwice); 716 717 const Function *CalleeFn = nullptr; 718 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 719 CalleeFn = dyn_cast<Function>(G->getGlobal()); 720 } else if (ExternalSymbolSDNode *E = 721 dyn_cast<ExternalSymbolSDNode>(Callee)) { 722 const Function *Fn = DAG.getMachineFunction().getFunction(); 723 const Module *M = Fn->getParent(); 724 const char *CalleeName = E->getSymbol(); 725 CalleeFn = M->getFunction(CalleeName); 726 } 727 728 if (!CalleeFn) 729 return false; 730 return CalleeFn->hasFnAttribute(Attribute::ReturnsTwice); 731} 732 733// Lower a call for the 32-bit ABI. 734SDValue 735SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI, 736 SmallVectorImpl<SDValue> &InVals) const { 737 SelectionDAG &DAG = CLI.DAG; 738 SDLoc &dl = CLI.DL; 739 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 740 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 741 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 742 SDValue Chain = CLI.Chain; 743 SDValue Callee = CLI.Callee; 744 bool &isTailCall = CLI.IsTailCall; 745 CallingConv::ID CallConv = CLI.CallConv; 746 bool isVarArg = CLI.IsVarArg; 747 748 // Sparc target does not yet support tail call optimization. 749 isTailCall = false; 750 751 // Analyze operands of the call, assigning locations to each operand. 752 SmallVector<CCValAssign, 16> ArgLocs; 753 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 754 *DAG.getContext()); 755 CCInfo.AnalyzeCallOperands(Outs, CC_Sparc32); 756 757 // Get the size of the outgoing arguments stack space requirement. 758 unsigned ArgsSize = CCInfo.getNextStackOffset(); 759 760 // Keep stack frames 8-byte aligned. 761 ArgsSize = (ArgsSize+7) & ~7; 762 763 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 764 765 // Create local copies for byval args. 766 SmallVector<SDValue, 8> ByValArgs; 767 for (unsigned i = 0, e = Outs.size(); i != e; ++i) { 768 ISD::ArgFlagsTy Flags = Outs[i].Flags; 769 if (!Flags.isByVal()) 770 continue; 771 772 SDValue Arg = OutVals[i]; 773 unsigned Size = Flags.getByValSize(); 774 unsigned Align = Flags.getByValAlign(); 775 776 int FI = MFI->CreateStackObject(Size, Align, false); 777 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); 778 SDValue SizeNode = DAG.getConstant(Size, dl, MVT::i32); 779 780 Chain = DAG.getMemcpy(Chain, dl, FIPtr, Arg, SizeNode, Align, 781 false, // isVolatile, 782 (Size <= 32), // AlwaysInline if size <= 32, 783 false, // isTailCall 784 MachinePointerInfo(), MachinePointerInfo()); 785 ByValArgs.push_back(FIPtr); 786 } 787 788 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(ArgsSize, dl, true), 789 dl); 790 791 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 792 SmallVector<SDValue, 8> MemOpChains; 793 794 const unsigned StackOffset = 92; 795 bool hasStructRetAttr = false; 796 // Walk the register/memloc assignments, inserting copies/loads. 797 for (unsigned i = 0, realArgIdx = 0, byvalArgIdx = 0, e = ArgLocs.size(); 798 i != e; 799 ++i, ++realArgIdx) { 800 CCValAssign &VA = ArgLocs[i]; 801 SDValue Arg = OutVals[realArgIdx]; 802 803 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 804 805 // Use local copy if it is a byval arg. 806 if (Flags.isByVal()) 807 Arg = ByValArgs[byvalArgIdx++]; 808 809 // Promote the value if needed. 810 switch (VA.getLocInfo()) { 811 default: llvm_unreachable("Unknown loc info!"); 812 case CCValAssign::Full: break; 813 case CCValAssign::SExt: 814 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 815 break; 816 case CCValAssign::ZExt: 817 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 818 break; 819 case CCValAssign::AExt: 820 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 821 break; 822 case CCValAssign::BCvt: 823 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 824 break; 825 } 826 827 if (Flags.isSRet()) { 828 assert(VA.needsCustom()); 829 // store SRet argument in %sp+64 830 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32); 831 SDValue PtrOff = DAG.getIntPtrConstant(64, dl); 832 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff); 833 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, 834 MachinePointerInfo(), 835 false, false, 0)); 836 hasStructRetAttr = true; 837 continue; 838 } 839 840 if (VA.needsCustom()) { 841 assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32); 842 843 if (VA.isMemLoc()) { 844 unsigned Offset = VA.getLocMemOffset() + StackOffset; 845 // if it is double-word aligned, just store. 846 if (Offset % 8 == 0) { 847 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32); 848 SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl); 849 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff); 850 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, 851 MachinePointerInfo(), 852 false, false, 0)); 853 continue; 854 } 855 } 856 857 if (VA.getLocVT() == MVT::f64) { 858 // Move from the float value from float registers into the 859 // integer registers. 860 861 // TODO: The f64 -> v2i32 conversion is super-inefficient for 862 // constants: it sticks them in the constant pool, then loads 863 // to a fp register, then stores to temp memory, then loads to 864 // integer registers. 865 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, Arg); 866 } 867 868 SDValue Part0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, 869 Arg, 870 DAG.getConstant(0, dl, getVectorIdxTy(DAG.getDataLayout()))); 871 SDValue Part1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, 872 Arg, 873 DAG.getConstant(1, dl, getVectorIdxTy(DAG.getDataLayout()))); 874 875 if (VA.isRegLoc()) { 876 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Part0)); 877 assert(i+1 != e); 878 CCValAssign &NextVA = ArgLocs[++i]; 879 if (NextVA.isRegLoc()) { 880 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), Part1)); 881 } else { 882 // Store the second part in stack. 883 unsigned Offset = NextVA.getLocMemOffset() + StackOffset; 884 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32); 885 SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl); 886 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff); 887 MemOpChains.push_back(DAG.getStore(Chain, dl, Part1, PtrOff, 888 MachinePointerInfo(), 889 false, false, 0)); 890 } 891 } else { 892 unsigned Offset = VA.getLocMemOffset() + StackOffset; 893 // Store the first part. 894 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32); 895 SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl); 896 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff); 897 MemOpChains.push_back(DAG.getStore(Chain, dl, Part0, PtrOff, 898 MachinePointerInfo(), 899 false, false, 0)); 900 // Store the second part. 901 PtrOff = DAG.getIntPtrConstant(Offset + 4, dl); 902 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff); 903 MemOpChains.push_back(DAG.getStore(Chain, dl, Part1, PtrOff, 904 MachinePointerInfo(), 905 false, false, 0)); 906 } 907 continue; 908 } 909 910 // Arguments that can be passed on register must be kept at 911 // RegsToPass vector 912 if (VA.isRegLoc()) { 913 if (VA.getLocVT() != MVT::f32) { 914 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 915 continue; 916 } 917 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 918 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 919 continue; 920 } 921 922 assert(VA.isMemLoc()); 923 924 // Create a store off the stack pointer for this argument. 925 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32); 926 SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset() + StackOffset, 927 dl); 928 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff); 929 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, 930 MachinePointerInfo(), 931 false, false, 0)); 932 } 933 934 935 // Emit all stores, make sure the occur before any copies into physregs. 936 if (!MemOpChains.empty()) 937 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 938 939 // Build a sequence of copy-to-reg nodes chained together with token 940 // chain and flag operands which copy the outgoing args into registers. 941 // The InFlag in necessary since all emitted instructions must be 942 // stuck together. 943 SDValue InFlag; 944 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 945 unsigned Reg = toCallerWindow(RegsToPass[i].first); 946 Chain = DAG.getCopyToReg(Chain, dl, Reg, RegsToPass[i].second, InFlag); 947 InFlag = Chain.getValue(1); 948 } 949 950 unsigned SRetArgSize = (hasStructRetAttr)? getSRetArgSize(DAG, Callee):0; 951 bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CS); 952 953 // If the callee is a GlobalAddress node (quite common, every direct call is) 954 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 955 // Likewise ExternalSymbol -> TargetExternalSymbol. 956 unsigned TF = ((getTargetMachine().getRelocationModel() == Reloc::PIC_) 957 ? SparcMCExpr::VK_Sparc_WPLT30 : 0); 958 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 959 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32, 0, TF); 960 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) 961 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32, TF); 962 963 // Returns a chain & a flag for retval copy to use 964 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 965 SmallVector<SDValue, 8> Ops; 966 Ops.push_back(Chain); 967 Ops.push_back(Callee); 968 if (hasStructRetAttr) 969 Ops.push_back(DAG.getTargetConstant(SRetArgSize, dl, MVT::i32)); 970 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 971 Ops.push_back(DAG.getRegister(toCallerWindow(RegsToPass[i].first), 972 RegsToPass[i].second.getValueType())); 973 974 // Add a register mask operand representing the call-preserved registers. 975 const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo(); 976 const uint32_t *Mask = 977 ((hasReturnsTwice) 978 ? TRI->getRTCallPreservedMask(CallConv) 979 : TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv)); 980 assert(Mask && "Missing call preserved mask for calling convention"); 981 Ops.push_back(DAG.getRegisterMask(Mask)); 982 983 if (InFlag.getNode()) 984 Ops.push_back(InFlag); 985 986 Chain = DAG.getNode(SPISD::CALL, dl, NodeTys, Ops); 987 InFlag = Chain.getValue(1); 988 989 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(ArgsSize, dl, true), 990 DAG.getIntPtrConstant(0, dl, true), InFlag, dl); 991 InFlag = Chain.getValue(1); 992 993 // Assign locations to each value returned by this call. 994 SmallVector<CCValAssign, 16> RVLocs; 995 CCState RVInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 996 *DAG.getContext()); 997 998 RVInfo.AnalyzeCallResult(Ins, RetCC_Sparc32); 999 1000 // Copy all of the result registers out of their specified physreg. 1001 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1002 Chain = DAG.getCopyFromReg(Chain, dl, toCallerWindow(RVLocs[i].getLocReg()), 1003 RVLocs[i].getValVT(), InFlag).getValue(1); 1004 InFlag = Chain.getValue(2); 1005 InVals.push_back(Chain.getValue(0)); 1006 } 1007 1008 return Chain; 1009} 1010 1011// This functions returns true if CalleeName is a ABI function that returns 1012// a long double (fp128). 1013static bool isFP128ABICall(const char *CalleeName) 1014{ 1015 static const char *const ABICalls[] = 1016 { "_Q_add", "_Q_sub", "_Q_mul", "_Q_div", 1017 "_Q_sqrt", "_Q_neg", 1018 "_Q_itoq", "_Q_stoq", "_Q_dtoq", "_Q_utoq", 1019 "_Q_lltoq", "_Q_ulltoq", 1020 nullptr 1021 }; 1022 for (const char * const *I = ABICalls; *I != nullptr; ++I) 1023 if (strcmp(CalleeName, *I) == 0) 1024 return true; 1025 return false; 1026} 1027 1028unsigned 1029SparcTargetLowering::getSRetArgSize(SelectionDAG &DAG, SDValue Callee) const 1030{ 1031 const Function *CalleeFn = nullptr; 1032 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1033 CalleeFn = dyn_cast<Function>(G->getGlobal()); 1034 } else if (ExternalSymbolSDNode *E = 1035 dyn_cast<ExternalSymbolSDNode>(Callee)) { 1036 const Function *Fn = DAG.getMachineFunction().getFunction(); 1037 const Module *M = Fn->getParent(); 1038 const char *CalleeName = E->getSymbol(); 1039 CalleeFn = M->getFunction(CalleeName); 1040 if (!CalleeFn && isFP128ABICall(CalleeName)) 1041 return 16; // Return sizeof(fp128) 1042 } 1043 1044 if (!CalleeFn) 1045 return 0; 1046 1047 // It would be nice to check for the sret attribute on CalleeFn here, 1048 // but since it is not part of the function type, any check will misfire. 1049 1050 PointerType *Ty = cast<PointerType>(CalleeFn->arg_begin()->getType()); 1051 Type *ElementTy = Ty->getElementType(); 1052 return DAG.getDataLayout().getTypeAllocSize(ElementTy); 1053} 1054 1055 1056// Fixup floating point arguments in the ... part of a varargs call. 1057// 1058// The SPARC v9 ABI requires that floating point arguments are treated the same 1059// as integers when calling a varargs function. This does not apply to the 1060// fixed arguments that are part of the function's prototype. 1061// 1062// This function post-processes a CCValAssign array created by 1063// AnalyzeCallOperands(). 1064static void fixupVariableFloatArgs(SmallVectorImpl<CCValAssign> &ArgLocs, 1065 ArrayRef<ISD::OutputArg> Outs) { 1066 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1067 const CCValAssign &VA = ArgLocs[i]; 1068 MVT ValTy = VA.getLocVT(); 1069 // FIXME: What about f32 arguments? C promotes them to f64 when calling 1070 // varargs functions. 1071 if (!VA.isRegLoc() || (ValTy != MVT::f64 && ValTy != MVT::f128)) 1072 continue; 1073 // The fixed arguments to a varargs function still go in FP registers. 1074 if (Outs[VA.getValNo()].IsFixed) 1075 continue; 1076 1077 // This floating point argument should be reassigned. 1078 CCValAssign NewVA; 1079 1080 // Determine the offset into the argument array. 1081 unsigned firstReg = (ValTy == MVT::f64) ? SP::D0 : SP::Q0; 1082 unsigned argSize = (ValTy == MVT::f64) ? 8 : 16; 1083 unsigned Offset = argSize * (VA.getLocReg() - firstReg); 1084 assert(Offset < 16*8 && "Offset out of range, bad register enum?"); 1085 1086 if (Offset < 6*8) { 1087 // This argument should go in %i0-%i5. 1088 unsigned IReg = SP::I0 + Offset/8; 1089 if (ValTy == MVT::f64) 1090 // Full register, just bitconvert into i64. 1091 NewVA = CCValAssign::getReg(VA.getValNo(), VA.getValVT(), 1092 IReg, MVT::i64, CCValAssign::BCvt); 1093 else { 1094 assert(ValTy == MVT::f128 && "Unexpected type!"); 1095 // Full register, just bitconvert into i128 -- We will lower this into 1096 // two i64s in LowerCall_64. 1097 NewVA = CCValAssign::getCustomReg(VA.getValNo(), VA.getValVT(), 1098 IReg, MVT::i128, CCValAssign::BCvt); 1099 } 1100 } else { 1101 // This needs to go to memory, we're out of integer registers. 1102 NewVA = CCValAssign::getMem(VA.getValNo(), VA.getValVT(), 1103 Offset, VA.getLocVT(), VA.getLocInfo()); 1104 } 1105 ArgLocs[i] = NewVA; 1106 } 1107} 1108 1109// Lower a call for the 64-bit ABI. 1110SDValue 1111SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI, 1112 SmallVectorImpl<SDValue> &InVals) const { 1113 SelectionDAG &DAG = CLI.DAG; 1114 SDLoc DL = CLI.DL; 1115 SDValue Chain = CLI.Chain; 1116 auto PtrVT = getPointerTy(DAG.getDataLayout()); 1117 1118 // Sparc target does not yet support tail call optimization. 1119 CLI.IsTailCall = false; 1120 1121 // Analyze operands of the call, assigning locations to each operand. 1122 SmallVector<CCValAssign, 16> ArgLocs; 1123 CCState CCInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), ArgLocs, 1124 *DAG.getContext()); 1125 CCInfo.AnalyzeCallOperands(CLI.Outs, CC_Sparc64); 1126 1127 // Get the size of the outgoing arguments stack space requirement. 1128 // The stack offset computed by CC_Sparc64 includes all arguments. 1129 // Called functions expect 6 argument words to exist in the stack frame, used 1130 // or not. 1131 unsigned ArgsSize = std::max(6*8u, CCInfo.getNextStackOffset()); 1132 1133 // Keep stack frames 16-byte aligned. 1134 ArgsSize = RoundUpToAlignment(ArgsSize, 16); 1135 1136 // Varargs calls require special treatment. 1137 if (CLI.IsVarArg) 1138 fixupVariableFloatArgs(ArgLocs, CLI.Outs); 1139 1140 // Adjust the stack pointer to make room for the arguments. 1141 // FIXME: Use hasReservedCallFrame to avoid %sp adjustments around all calls 1142 // with more than 6 arguments. 1143 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(ArgsSize, DL, true), 1144 DL); 1145 1146 // Collect the set of registers to pass to the function and their values. 1147 // This will be emitted as a sequence of CopyToReg nodes glued to the call 1148 // instruction. 1149 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 1150 1151 // Collect chains from all the memory opeations that copy arguments to the 1152 // stack. They must follow the stack pointer adjustment above and precede the 1153 // call instruction itself. 1154 SmallVector<SDValue, 8> MemOpChains; 1155 1156 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1157 const CCValAssign &VA = ArgLocs[i]; 1158 SDValue Arg = CLI.OutVals[i]; 1159 1160 // Promote the value if needed. 1161 switch (VA.getLocInfo()) { 1162 default: 1163 llvm_unreachable("Unknown location info!"); 1164 case CCValAssign::Full: 1165 break; 1166 case CCValAssign::SExt: 1167 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg); 1168 break; 1169 case CCValAssign::ZExt: 1170 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg); 1171 break; 1172 case CCValAssign::AExt: 1173 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg); 1174 break; 1175 case CCValAssign::BCvt: 1176 // fixupVariableFloatArgs() may create bitcasts from f128 to i128. But 1177 // SPARC does not support i128 natively. Lower it into two i64, see below. 1178 if (!VA.needsCustom() || VA.getValVT() != MVT::f128 1179 || VA.getLocVT() != MVT::i128) 1180 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg); 1181 break; 1182 } 1183 1184 if (VA.isRegLoc()) { 1185 if (VA.needsCustom() && VA.getValVT() == MVT::f128 1186 && VA.getLocVT() == MVT::i128) { 1187 // Store and reload into the interger register reg and reg+1. 1188 unsigned Offset = 8 * (VA.getLocReg() - SP::I0); 1189 unsigned StackOffset = Offset + Subtarget->getStackPointerBias() + 128; 1190 SDValue StackPtr = DAG.getRegister(SP::O6, PtrVT); 1191 SDValue HiPtrOff = DAG.getIntPtrConstant(StackOffset, DL); 1192 HiPtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, HiPtrOff); 1193 SDValue LoPtrOff = DAG.getIntPtrConstant(StackOffset + 8, DL); 1194 LoPtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, LoPtrOff); 1195 1196 // Store to %sp+BIAS+128+Offset 1197 SDValue Store = DAG.getStore(Chain, DL, Arg, HiPtrOff, 1198 MachinePointerInfo(), 1199 false, false, 0); 1200 // Load into Reg and Reg+1 1201 SDValue Hi64 = DAG.getLoad(MVT::i64, DL, Store, HiPtrOff, 1202 MachinePointerInfo(), 1203 false, false, false, 0); 1204 SDValue Lo64 = DAG.getLoad(MVT::i64, DL, Store, LoPtrOff, 1205 MachinePointerInfo(), 1206 false, false, false, 0); 1207 RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()), 1208 Hi64)); 1209 RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()+1), 1210 Lo64)); 1211 continue; 1212 } 1213 1214 // The custom bit on an i32 return value indicates that it should be 1215 // passed in the high bits of the register. 1216 if (VA.getValVT() == MVT::i32 && VA.needsCustom()) { 1217 Arg = DAG.getNode(ISD::SHL, DL, MVT::i64, Arg, 1218 DAG.getConstant(32, DL, MVT::i32)); 1219 1220 // The next value may go in the low bits of the same register. 1221 // Handle both at once. 1222 if (i+1 < ArgLocs.size() && ArgLocs[i+1].isRegLoc() && 1223 ArgLocs[i+1].getLocReg() == VA.getLocReg()) { 1224 SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, 1225 CLI.OutVals[i+1]); 1226 Arg = DAG.getNode(ISD::OR, DL, MVT::i64, Arg, NV); 1227 // Skip the next value, it's already done. 1228 ++i; 1229 } 1230 } 1231 RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()), Arg)); 1232 continue; 1233 } 1234 1235 assert(VA.isMemLoc()); 1236 1237 // Create a store off the stack pointer for this argument. 1238 SDValue StackPtr = DAG.getRegister(SP::O6, PtrVT); 1239 // The argument area starts at %fp+BIAS+128 in the callee frame, 1240 // %sp+BIAS+128 in ours. 1241 SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset() + 1242 Subtarget->getStackPointerBias() + 1243 128, DL); 1244 PtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, PtrOff); 1245 MemOpChains.push_back(DAG.getStore(Chain, DL, Arg, PtrOff, 1246 MachinePointerInfo(), 1247 false, false, 0)); 1248 } 1249 1250 // Emit all stores, make sure they occur before the call. 1251 if (!MemOpChains.empty()) 1252 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains); 1253 1254 // Build a sequence of CopyToReg nodes glued together with token chain and 1255 // glue operands which copy the outgoing args into registers. The InGlue is 1256 // necessary since all emitted instructions must be stuck together in order 1257 // to pass the live physical registers. 1258 SDValue InGlue; 1259 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1260 Chain = DAG.getCopyToReg(Chain, DL, 1261 RegsToPass[i].first, RegsToPass[i].second, InGlue); 1262 InGlue = Chain.getValue(1); 1263 } 1264 1265 // If the callee is a GlobalAddress node (quite common, every direct call is) 1266 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 1267 // Likewise ExternalSymbol -> TargetExternalSymbol. 1268 SDValue Callee = CLI.Callee; 1269 bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CS); 1270 unsigned TF = ((getTargetMachine().getRelocationModel() == Reloc::PIC_) 1271 ? SparcMCExpr::VK_Sparc_WPLT30 : 0); 1272 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 1273 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT, 0, TF); 1274 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) 1275 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT, TF); 1276 1277 // Build the operands for the call instruction itself. 1278 SmallVector<SDValue, 8> Ops; 1279 Ops.push_back(Chain); 1280 Ops.push_back(Callee); 1281 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1282 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1283 RegsToPass[i].second.getValueType())); 1284 1285 // Add a register mask operand representing the call-preserved registers. 1286 const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo(); 1287 const uint32_t *Mask = 1288 ((hasReturnsTwice) ? TRI->getRTCallPreservedMask(CLI.CallConv) 1289 : TRI->getCallPreservedMask(DAG.getMachineFunction(), 1290 CLI.CallConv)); 1291 assert(Mask && "Missing call preserved mask for calling convention"); 1292 Ops.push_back(DAG.getRegisterMask(Mask)); 1293 1294 // Make sure the CopyToReg nodes are glued to the call instruction which 1295 // consumes the registers. 1296 if (InGlue.getNode()) 1297 Ops.push_back(InGlue); 1298 1299 // Now the call itself. 1300 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 1301 Chain = DAG.getNode(SPISD::CALL, DL, NodeTys, Ops); 1302 InGlue = Chain.getValue(1); 1303 1304 // Revert the stack pointer immediately after the call. 1305 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(ArgsSize, DL, true), 1306 DAG.getIntPtrConstant(0, DL, true), InGlue, DL); 1307 InGlue = Chain.getValue(1); 1308 1309 // Now extract the return values. This is more or less the same as 1310 // LowerFormalArguments_64. 1311 1312 // Assign locations to each value returned by this call. 1313 SmallVector<CCValAssign, 16> RVLocs; 1314 CCState RVInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), RVLocs, 1315 *DAG.getContext()); 1316 1317 // Set inreg flag manually for codegen generated library calls that 1318 // return float. 1319 if (CLI.Ins.size() == 1 && CLI.Ins[0].VT == MVT::f32 && CLI.CS == nullptr) 1320 CLI.Ins[0].Flags.setInReg(); 1321 1322 RVInfo.AnalyzeCallResult(CLI.Ins, RetCC_Sparc64); 1323 1324 // Copy all of the result registers out of their specified physreg. 1325 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1326 CCValAssign &VA = RVLocs[i]; 1327 unsigned Reg = toCallerWindow(VA.getLocReg()); 1328 1329 // When returning 'inreg {i32, i32 }', two consecutive i32 arguments can 1330 // reside in the same register in the high and low bits. Reuse the 1331 // CopyFromReg previous node to avoid duplicate copies. 1332 SDValue RV; 1333 if (RegisterSDNode *SrcReg = dyn_cast<RegisterSDNode>(Chain.getOperand(1))) 1334 if (SrcReg->getReg() == Reg && Chain->getOpcode() == ISD::CopyFromReg) 1335 RV = Chain.getValue(0); 1336 1337 // But usually we'll create a new CopyFromReg for a different register. 1338 if (!RV.getNode()) { 1339 RV = DAG.getCopyFromReg(Chain, DL, Reg, RVLocs[i].getLocVT(), InGlue); 1340 Chain = RV.getValue(1); 1341 InGlue = Chain.getValue(2); 1342 } 1343 1344 // Get the high bits for i32 struct elements. 1345 if (VA.getValVT() == MVT::i32 && VA.needsCustom()) 1346 RV = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), RV, 1347 DAG.getConstant(32, DL, MVT::i32)); 1348 1349 // The callee promoted the return value, so insert an Assert?ext SDNode so 1350 // we won't promote the value again in this function. 1351 switch (VA.getLocInfo()) { 1352 case CCValAssign::SExt: 1353 RV = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), RV, 1354 DAG.getValueType(VA.getValVT())); 1355 break; 1356 case CCValAssign::ZExt: 1357 RV = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), RV, 1358 DAG.getValueType(VA.getValVT())); 1359 break; 1360 default: 1361 break; 1362 } 1363 1364 // Truncate the register down to the return value type. 1365 if (VA.isExtInLoc()) 1366 RV = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), RV); 1367 1368 InVals.push_back(RV); 1369 } 1370 1371 return Chain; 1372} 1373 1374//===----------------------------------------------------------------------===// 1375// TargetLowering Implementation 1376//===----------------------------------------------------------------------===// 1377 1378/// IntCondCCodeToICC - Convert a DAG integer condition code to a SPARC ICC 1379/// condition. 1380static SPCC::CondCodes IntCondCCodeToICC(ISD::CondCode CC) { 1381 switch (CC) { 1382 default: llvm_unreachable("Unknown integer condition code!"); 1383 case ISD::SETEQ: return SPCC::ICC_E; 1384 case ISD::SETNE: return SPCC::ICC_NE; 1385 case ISD::SETLT: return SPCC::ICC_L; 1386 case ISD::SETGT: return SPCC::ICC_G; 1387 case ISD::SETLE: return SPCC::ICC_LE; 1388 case ISD::SETGE: return SPCC::ICC_GE; 1389 case ISD::SETULT: return SPCC::ICC_CS; 1390 case ISD::SETULE: return SPCC::ICC_LEU; 1391 case ISD::SETUGT: return SPCC::ICC_GU; 1392 case ISD::SETUGE: return SPCC::ICC_CC; 1393 } 1394} 1395 1396/// FPCondCCodeToFCC - Convert a DAG floatingp oint condition code to a SPARC 1397/// FCC condition. 1398static SPCC::CondCodes FPCondCCodeToFCC(ISD::CondCode CC) { 1399 switch (CC) { 1400 default: llvm_unreachable("Unknown fp condition code!"); 1401 case ISD::SETEQ: 1402 case ISD::SETOEQ: return SPCC::FCC_E; 1403 case ISD::SETNE: 1404 case ISD::SETUNE: return SPCC::FCC_NE; 1405 case ISD::SETLT: 1406 case ISD::SETOLT: return SPCC::FCC_L; 1407 case ISD::SETGT: 1408 case ISD::SETOGT: return SPCC::FCC_G; 1409 case ISD::SETLE: 1410 case ISD::SETOLE: return SPCC::FCC_LE; 1411 case ISD::SETGE: 1412 case ISD::SETOGE: return SPCC::FCC_GE; 1413 case ISD::SETULT: return SPCC::FCC_UL; 1414 case ISD::SETULE: return SPCC::FCC_ULE; 1415 case ISD::SETUGT: return SPCC::FCC_UG; 1416 case ISD::SETUGE: return SPCC::FCC_UGE; 1417 case ISD::SETUO: return SPCC::FCC_U; 1418 case ISD::SETO: return SPCC::FCC_O; 1419 case ISD::SETONE: return SPCC::FCC_LG; 1420 case ISD::SETUEQ: return SPCC::FCC_UE; 1421 } 1422} 1423 1424SparcTargetLowering::SparcTargetLowering(TargetMachine &TM, 1425 const SparcSubtarget &STI) 1426 : TargetLowering(TM), Subtarget(&STI) { 1427 MVT PtrVT = MVT::getIntegerVT(8 * TM.getPointerSize()); 1428 1429 // Instructions which use registers as conditionals examine all the 1430 // bits (as does the pseudo SELECT_CC expansion). I don't think it 1431 // matters much whether it's ZeroOrOneBooleanContent, or 1432 // ZeroOrNegativeOneBooleanContent, so, arbitrarily choose the 1433 // former. 1434 setBooleanContents(ZeroOrOneBooleanContent); 1435 setBooleanVectorContents(ZeroOrOneBooleanContent); 1436 1437 // Set up the register classes. 1438 addRegisterClass(MVT::i32, &SP::IntRegsRegClass); 1439 addRegisterClass(MVT::f32, &SP::FPRegsRegClass); 1440 addRegisterClass(MVT::f64, &SP::DFPRegsRegClass); 1441 addRegisterClass(MVT::f128, &SP::QFPRegsRegClass); 1442 if (Subtarget->is64Bit()) { 1443 addRegisterClass(MVT::i64, &SP::I64RegsRegClass); 1444 } else { 1445 // On 32bit sparc, we define a double-register 32bit register 1446 // class, as well. This is modeled in LLVM as a 2-vector of i32. 1447 addRegisterClass(MVT::v2i32, &SP::IntPairRegClass); 1448 1449 // ...but almost all operations must be expanded, so set that as 1450 // the default. 1451 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) { 1452 setOperationAction(Op, MVT::v2i32, Expand); 1453 } 1454 // Truncating/extending stores/loads are also not supported. 1455 for (MVT VT : MVT::integer_vector_valuetypes()) { 1456 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i32, Expand); 1457 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i32, Expand); 1458 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i32, Expand); 1459 1460 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i32, VT, Expand); 1461 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i32, VT, Expand); 1462 setLoadExtAction(ISD::EXTLOAD, MVT::v2i32, VT, Expand); 1463 1464 setTruncStoreAction(VT, MVT::v2i32, Expand); 1465 setTruncStoreAction(MVT::v2i32, VT, Expand); 1466 } 1467 // However, load and store *are* legal. 1468 setOperationAction(ISD::LOAD, MVT::v2i32, Legal); 1469 setOperationAction(ISD::STORE, MVT::v2i32, Legal); 1470 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i32, Legal); 1471 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Legal); 1472 1473 // And we need to promote i64 loads/stores into vector load/store 1474 setOperationAction(ISD::LOAD, MVT::i64, Custom); 1475 setOperationAction(ISD::STORE, MVT::i64, Custom); 1476 1477 // Sadly, this doesn't work: 1478 // AddPromotedToType(ISD::LOAD, MVT::i64, MVT::v2i32); 1479 // AddPromotedToType(ISD::STORE, MVT::i64, MVT::v2i32); 1480 } 1481 1482 // Turn FP extload into load/fextend 1483 for (MVT VT : MVT::fp_valuetypes()) { 1484 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand); 1485 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f64, Expand); 1486 } 1487 1488 // Sparc doesn't have i1 sign extending load 1489 for (MVT VT : MVT::integer_valuetypes()) 1490 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 1491 1492 // Turn FP truncstore into trunc + store. 1493 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 1494 setTruncStoreAction(MVT::f128, MVT::f32, Expand); 1495 setTruncStoreAction(MVT::f128, MVT::f64, Expand); 1496 1497 // Custom legalize GlobalAddress nodes into LO/HI parts. 1498 setOperationAction(ISD::GlobalAddress, PtrVT, Custom); 1499 setOperationAction(ISD::GlobalTLSAddress, PtrVT, Custom); 1500 setOperationAction(ISD::ConstantPool, PtrVT, Custom); 1501 setOperationAction(ISD::BlockAddress, PtrVT, Custom); 1502 1503 // Sparc doesn't have sext_inreg, replace them with shl/sra 1504 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); 1505 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Expand); 1506 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand); 1507 1508 // Sparc has no REM or DIVREM operations. 1509 setOperationAction(ISD::UREM, MVT::i32, Expand); 1510 setOperationAction(ISD::SREM, MVT::i32, Expand); 1511 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 1512 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 1513 1514 // ... nor does SparcV9. 1515 if (Subtarget->is64Bit()) { 1516 setOperationAction(ISD::UREM, MVT::i64, Expand); 1517 setOperationAction(ISD::SREM, MVT::i64, Expand); 1518 setOperationAction(ISD::SDIVREM, MVT::i64, Expand); 1519 setOperationAction(ISD::UDIVREM, MVT::i64, Expand); 1520 } 1521 1522 // Custom expand fp<->sint 1523 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 1524 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 1525 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 1526 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 1527 1528 // Custom Expand fp<->uint 1529 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 1530 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 1531 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); 1532 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); 1533 1534 setOperationAction(ISD::BITCAST, MVT::f32, Expand); 1535 setOperationAction(ISD::BITCAST, MVT::i32, Expand); 1536 1537 // Sparc has no select or setcc: expand to SELECT_CC. 1538 setOperationAction(ISD::SELECT, MVT::i32, Expand); 1539 setOperationAction(ISD::SELECT, MVT::f32, Expand); 1540 setOperationAction(ISD::SELECT, MVT::f64, Expand); 1541 setOperationAction(ISD::SELECT, MVT::f128, Expand); 1542 1543 setOperationAction(ISD::SETCC, MVT::i32, Expand); 1544 setOperationAction(ISD::SETCC, MVT::f32, Expand); 1545 setOperationAction(ISD::SETCC, MVT::f64, Expand); 1546 setOperationAction(ISD::SETCC, MVT::f128, Expand); 1547 1548 // Sparc doesn't have BRCOND either, it has BR_CC. 1549 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 1550 setOperationAction(ISD::BRIND, MVT::Other, Expand); 1551 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 1552 setOperationAction(ISD::BR_CC, MVT::i32, Custom); 1553 setOperationAction(ISD::BR_CC, MVT::f32, Custom); 1554 setOperationAction(ISD::BR_CC, MVT::f64, Custom); 1555 setOperationAction(ISD::BR_CC, MVT::f128, Custom); 1556 1557 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); 1558 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 1559 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 1560 setOperationAction(ISD::SELECT_CC, MVT::f128, Custom); 1561 1562 if (Subtarget->is64Bit()) { 1563 setOperationAction(ISD::ADDC, MVT::i64, Custom); 1564 setOperationAction(ISD::ADDE, MVT::i64, Custom); 1565 setOperationAction(ISD::SUBC, MVT::i64, Custom); 1566 setOperationAction(ISD::SUBE, MVT::i64, Custom); 1567 setOperationAction(ISD::BITCAST, MVT::f64, Expand); 1568 setOperationAction(ISD::BITCAST, MVT::i64, Expand); 1569 setOperationAction(ISD::SELECT, MVT::i64, Expand); 1570 setOperationAction(ISD::SETCC, MVT::i64, Expand); 1571 setOperationAction(ISD::BR_CC, MVT::i64, Custom); 1572 setOperationAction(ISD::SELECT_CC, MVT::i64, Custom); 1573 1574 setOperationAction(ISD::CTPOP, MVT::i64, 1575 Subtarget->usePopc() ? Legal : Expand); 1576 setOperationAction(ISD::CTTZ , MVT::i64, Expand); 1577 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand); 1578 setOperationAction(ISD::CTLZ , MVT::i64, Expand); 1579 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand); 1580 setOperationAction(ISD::BSWAP, MVT::i64, Expand); 1581 setOperationAction(ISD::ROTL , MVT::i64, Expand); 1582 setOperationAction(ISD::ROTR , MVT::i64, Expand); 1583 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Custom); 1584 } 1585 1586 // ATOMICs. 1587 // FIXME: We insert fences for each atomics and generate sub-optimal code 1588 // for PSO/TSO. Also, implement other atomicrmw operations. 1589 1590 setInsertFencesForAtomic(true); 1591 1592 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Legal); 1593 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, 1594 (Subtarget->isV9() ? Legal: Expand)); 1595 1596 1597 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Legal); 1598 1599 // Custom Lower Atomic LOAD/STORE 1600 setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom); 1601 setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom); 1602 1603 if (Subtarget->is64Bit()) { 1604 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Legal); 1605 setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Legal); 1606 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom); 1607 setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Custom); 1608 } 1609 1610 if (!Subtarget->isV9()) { 1611 // SparcV8 does not have FNEGD and FABSD. 1612 setOperationAction(ISD::FNEG, MVT::f64, Custom); 1613 setOperationAction(ISD::FABS, MVT::f64, Custom); 1614 } 1615 1616 setOperationAction(ISD::FSIN , MVT::f128, Expand); 1617 setOperationAction(ISD::FCOS , MVT::f128, Expand); 1618 setOperationAction(ISD::FSINCOS, MVT::f128, Expand); 1619 setOperationAction(ISD::FREM , MVT::f128, Expand); 1620 setOperationAction(ISD::FMA , MVT::f128, Expand); 1621 setOperationAction(ISD::FSIN , MVT::f64, Expand); 1622 setOperationAction(ISD::FCOS , MVT::f64, Expand); 1623 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 1624 setOperationAction(ISD::FREM , MVT::f64, Expand); 1625 setOperationAction(ISD::FMA , MVT::f64, Expand); 1626 setOperationAction(ISD::FSIN , MVT::f32, Expand); 1627 setOperationAction(ISD::FCOS , MVT::f32, Expand); 1628 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 1629 setOperationAction(ISD::FREM , MVT::f32, Expand); 1630 setOperationAction(ISD::FMA , MVT::f32, Expand); 1631 setOperationAction(ISD::CTTZ , MVT::i32, Expand); 1632 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand); 1633 setOperationAction(ISD::CTLZ , MVT::i32, Expand); 1634 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand); 1635 setOperationAction(ISD::ROTL , MVT::i32, Expand); 1636 setOperationAction(ISD::ROTR , MVT::i32, Expand); 1637 setOperationAction(ISD::BSWAP, MVT::i32, Expand); 1638 setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand); 1639 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 1640 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 1641 setOperationAction(ISD::FPOW , MVT::f128, Expand); 1642 setOperationAction(ISD::FPOW , MVT::f64, Expand); 1643 setOperationAction(ISD::FPOW , MVT::f32, Expand); 1644 1645 setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand); 1646 setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand); 1647 setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand); 1648 1649 // FIXME: Sparc provides these multiplies, but we don't have them yet. 1650 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 1651 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 1652 1653 if (Subtarget->is64Bit()) { 1654 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); 1655 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); 1656 setOperationAction(ISD::MULHU, MVT::i64, Expand); 1657 setOperationAction(ISD::MULHS, MVT::i64, Expand); 1658 1659 setOperationAction(ISD::UMULO, MVT::i64, Custom); 1660 setOperationAction(ISD::SMULO, MVT::i64, Custom); 1661 1662 setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand); 1663 setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand); 1664 setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand); 1665 } 1666 1667 // VASTART needs to be custom lowered to use the VarArgsFrameIndex. 1668 setOperationAction(ISD::VASTART , MVT::Other, Custom); 1669 // VAARG needs to be lowered to not do unaligned accesses for doubles. 1670 setOperationAction(ISD::VAARG , MVT::Other, Custom); 1671 1672 setOperationAction(ISD::TRAP , MVT::Other, Legal); 1673 1674 // Use the default implementation. 1675 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 1676 setOperationAction(ISD::VAEND , MVT::Other, Expand); 1677 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand); 1678 setOperationAction(ISD::STACKRESTORE , MVT::Other, Expand); 1679 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom); 1680 1681 setStackPointerRegisterToSaveRestore(SP::O6); 1682 1683 setOperationAction(ISD::CTPOP, MVT::i32, 1684 Subtarget->usePopc() ? Legal : Expand); 1685 1686 if (Subtarget->isV9() && Subtarget->hasHardQuad()) { 1687 setOperationAction(ISD::LOAD, MVT::f128, Legal); 1688 setOperationAction(ISD::STORE, MVT::f128, Legal); 1689 } else { 1690 setOperationAction(ISD::LOAD, MVT::f128, Custom); 1691 setOperationAction(ISD::STORE, MVT::f128, Custom); 1692 } 1693 1694 if (Subtarget->hasHardQuad()) { 1695 setOperationAction(ISD::FADD, MVT::f128, Legal); 1696 setOperationAction(ISD::FSUB, MVT::f128, Legal); 1697 setOperationAction(ISD::FMUL, MVT::f128, Legal); 1698 setOperationAction(ISD::FDIV, MVT::f128, Legal); 1699 setOperationAction(ISD::FSQRT, MVT::f128, Legal); 1700 setOperationAction(ISD::FP_EXTEND, MVT::f128, Legal); 1701 setOperationAction(ISD::FP_ROUND, MVT::f64, Legal); 1702 if (Subtarget->isV9()) { 1703 setOperationAction(ISD::FNEG, MVT::f128, Legal); 1704 setOperationAction(ISD::FABS, MVT::f128, Legal); 1705 } else { 1706 setOperationAction(ISD::FNEG, MVT::f128, Custom); 1707 setOperationAction(ISD::FABS, MVT::f128, Custom); 1708 } 1709 1710 if (!Subtarget->is64Bit()) { 1711 setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Q_qtoll"); 1712 setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Q_qtoull"); 1713 setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Q_lltoq"); 1714 setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Q_ulltoq"); 1715 } 1716 1717 } else { 1718 // Custom legalize f128 operations. 1719 1720 setOperationAction(ISD::FADD, MVT::f128, Custom); 1721 setOperationAction(ISD::FSUB, MVT::f128, Custom); 1722 setOperationAction(ISD::FMUL, MVT::f128, Custom); 1723 setOperationAction(ISD::FDIV, MVT::f128, Custom); 1724 setOperationAction(ISD::FSQRT, MVT::f128, Custom); 1725 setOperationAction(ISD::FNEG, MVT::f128, Custom); 1726 setOperationAction(ISD::FABS, MVT::f128, Custom); 1727 1728 setOperationAction(ISD::FP_EXTEND, MVT::f128, Custom); 1729 setOperationAction(ISD::FP_ROUND, MVT::f64, Custom); 1730 setOperationAction(ISD::FP_ROUND, MVT::f32, Custom); 1731 1732 // Setup Runtime library names. 1733 if (Subtarget->is64Bit()) { 1734 setLibcallName(RTLIB::ADD_F128, "_Qp_add"); 1735 setLibcallName(RTLIB::SUB_F128, "_Qp_sub"); 1736 setLibcallName(RTLIB::MUL_F128, "_Qp_mul"); 1737 setLibcallName(RTLIB::DIV_F128, "_Qp_div"); 1738 setLibcallName(RTLIB::SQRT_F128, "_Qp_sqrt"); 1739 setLibcallName(RTLIB::FPTOSINT_F128_I32, "_Qp_qtoi"); 1740 setLibcallName(RTLIB::FPTOUINT_F128_I32, "_Qp_qtoui"); 1741 setLibcallName(RTLIB::SINTTOFP_I32_F128, "_Qp_itoq"); 1742 setLibcallName(RTLIB::UINTTOFP_I32_F128, "_Qp_uitoq"); 1743 setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Qp_qtox"); 1744 setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Qp_qtoux"); 1745 setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Qp_xtoq"); 1746 setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Qp_uxtoq"); 1747 setLibcallName(RTLIB::FPEXT_F32_F128, "_Qp_stoq"); 1748 setLibcallName(RTLIB::FPEXT_F64_F128, "_Qp_dtoq"); 1749 setLibcallName(RTLIB::FPROUND_F128_F32, "_Qp_qtos"); 1750 setLibcallName(RTLIB::FPROUND_F128_F64, "_Qp_qtod"); 1751 } else { 1752 setLibcallName(RTLIB::ADD_F128, "_Q_add"); 1753 setLibcallName(RTLIB::SUB_F128, "_Q_sub"); 1754 setLibcallName(RTLIB::MUL_F128, "_Q_mul"); 1755 setLibcallName(RTLIB::DIV_F128, "_Q_div"); 1756 setLibcallName(RTLIB::SQRT_F128, "_Q_sqrt"); 1757 setLibcallName(RTLIB::FPTOSINT_F128_I32, "_Q_qtoi"); 1758 setLibcallName(RTLIB::FPTOUINT_F128_I32, "_Q_qtou"); 1759 setLibcallName(RTLIB::SINTTOFP_I32_F128, "_Q_itoq"); 1760 setLibcallName(RTLIB::UINTTOFP_I32_F128, "_Q_utoq"); 1761 setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Q_qtoll"); 1762 setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Q_qtoull"); 1763 setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Q_lltoq"); 1764 setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Q_ulltoq"); 1765 setLibcallName(RTLIB::FPEXT_F32_F128, "_Q_stoq"); 1766 setLibcallName(RTLIB::FPEXT_F64_F128, "_Q_dtoq"); 1767 setLibcallName(RTLIB::FPROUND_F128_F32, "_Q_qtos"); 1768 setLibcallName(RTLIB::FPROUND_F128_F64, "_Q_qtod"); 1769 } 1770 } 1771 1772 setMinFunctionAlignment(2); 1773 1774 computeRegisterProperties(Subtarget->getRegisterInfo()); 1775} 1776 1777const char *SparcTargetLowering::getTargetNodeName(unsigned Opcode) const { 1778 switch ((SPISD::NodeType)Opcode) { 1779 case SPISD::FIRST_NUMBER: break; 1780 case SPISD::CMPICC: return "SPISD::CMPICC"; 1781 case SPISD::CMPFCC: return "SPISD::CMPFCC"; 1782 case SPISD::BRICC: return "SPISD::BRICC"; 1783 case SPISD::BRXCC: return "SPISD::BRXCC"; 1784 case SPISD::BRFCC: return "SPISD::BRFCC"; 1785 case SPISD::SELECT_ICC: return "SPISD::SELECT_ICC"; 1786 case SPISD::SELECT_XCC: return "SPISD::SELECT_XCC"; 1787 case SPISD::SELECT_FCC: return "SPISD::SELECT_FCC"; 1788 case SPISD::Hi: return "SPISD::Hi"; 1789 case SPISD::Lo: return "SPISD::Lo"; 1790 case SPISD::FTOI: return "SPISD::FTOI"; 1791 case SPISD::ITOF: return "SPISD::ITOF"; 1792 case SPISD::FTOX: return "SPISD::FTOX"; 1793 case SPISD::XTOF: return "SPISD::XTOF"; 1794 case SPISD::CALL: return "SPISD::CALL"; 1795 case SPISD::RET_FLAG: return "SPISD::RET_FLAG"; 1796 case SPISD::GLOBAL_BASE_REG: return "SPISD::GLOBAL_BASE_REG"; 1797 case SPISD::FLUSHW: return "SPISD::FLUSHW"; 1798 case SPISD::TLS_ADD: return "SPISD::TLS_ADD"; 1799 case SPISD::TLS_LD: return "SPISD::TLS_LD"; 1800 case SPISD::TLS_CALL: return "SPISD::TLS_CALL"; 1801 } 1802 return nullptr; 1803} 1804 1805EVT SparcTargetLowering::getSetCCResultType(const DataLayout &, LLVMContext &, 1806 EVT VT) const { 1807 if (!VT.isVector()) 1808 return MVT::i32; 1809 return VT.changeVectorElementTypeToInteger(); 1810} 1811 1812/// isMaskedValueZeroForTargetNode - Return true if 'Op & Mask' is known to 1813/// be zero. Op is expected to be a target specific node. Used by DAG 1814/// combiner. 1815void SparcTargetLowering::computeKnownBitsForTargetNode 1816 (const SDValue Op, 1817 APInt &KnownZero, 1818 APInt &KnownOne, 1819 const SelectionDAG &DAG, 1820 unsigned Depth) const { 1821 APInt KnownZero2, KnownOne2; 1822 KnownZero = KnownOne = APInt(KnownZero.getBitWidth(), 0); 1823 1824 switch (Op.getOpcode()) { 1825 default: break; 1826 case SPISD::SELECT_ICC: 1827 case SPISD::SELECT_XCC: 1828 case SPISD::SELECT_FCC: 1829 DAG.computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1); 1830 DAG.computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1); 1831 1832 // Only known if known in both the LHS and RHS. 1833 KnownOne &= KnownOne2; 1834 KnownZero &= KnownZero2; 1835 break; 1836 } 1837} 1838 1839// Look at LHS/RHS/CC and see if they are a lowered setcc instruction. If so 1840// set LHS/RHS and SPCC to the LHS/RHS of the setcc and SPCC to the condition. 1841static void LookThroughSetCC(SDValue &LHS, SDValue &RHS, 1842 ISD::CondCode CC, unsigned &SPCC) { 1843 if (isNullConstant(RHS) && 1844 CC == ISD::SETNE && 1845 (((LHS.getOpcode() == SPISD::SELECT_ICC || 1846 LHS.getOpcode() == SPISD::SELECT_XCC) && 1847 LHS.getOperand(3).getOpcode() == SPISD::CMPICC) || 1848 (LHS.getOpcode() == SPISD::SELECT_FCC && 1849 LHS.getOperand(3).getOpcode() == SPISD::CMPFCC)) && 1850 isOneConstant(LHS.getOperand(0)) && 1851 isNullConstant(LHS.getOperand(1))) { 1852 SDValue CMPCC = LHS.getOperand(3); 1853 SPCC = cast<ConstantSDNode>(LHS.getOperand(2))->getZExtValue(); 1854 LHS = CMPCC.getOperand(0); 1855 RHS = CMPCC.getOperand(1); 1856 } 1857} 1858 1859// Convert to a target node and set target flags. 1860SDValue SparcTargetLowering::withTargetFlags(SDValue Op, unsigned TF, 1861 SelectionDAG &DAG) const { 1862 if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op)) 1863 return DAG.getTargetGlobalAddress(GA->getGlobal(), 1864 SDLoc(GA), 1865 GA->getValueType(0), 1866 GA->getOffset(), TF); 1867 1868 if (const ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op)) 1869 return DAG.getTargetConstantPool(CP->getConstVal(), 1870 CP->getValueType(0), 1871 CP->getAlignment(), 1872 CP->getOffset(), TF); 1873 1874 if (const BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(Op)) 1875 return DAG.getTargetBlockAddress(BA->getBlockAddress(), 1876 Op.getValueType(), 1877 0, 1878 TF); 1879 1880 if (const ExternalSymbolSDNode *ES = dyn_cast<ExternalSymbolSDNode>(Op)) 1881 return DAG.getTargetExternalSymbol(ES->getSymbol(), 1882 ES->getValueType(0), TF); 1883 1884 llvm_unreachable("Unhandled address SDNode"); 1885} 1886 1887// Split Op into high and low parts according to HiTF and LoTF. 1888// Return an ADD node combining the parts. 1889SDValue SparcTargetLowering::makeHiLoPair(SDValue Op, 1890 unsigned HiTF, unsigned LoTF, 1891 SelectionDAG &DAG) const { 1892 SDLoc DL(Op); 1893 EVT VT = Op.getValueType(); 1894 SDValue Hi = DAG.getNode(SPISD::Hi, DL, VT, withTargetFlags(Op, HiTF, DAG)); 1895 SDValue Lo = DAG.getNode(SPISD::Lo, DL, VT, withTargetFlags(Op, LoTF, DAG)); 1896 return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo); 1897} 1898 1899// Build SDNodes for producing an address from a GlobalAddress, ConstantPool, 1900// or ExternalSymbol SDNode. 1901SDValue SparcTargetLowering::makeAddress(SDValue Op, SelectionDAG &DAG) const { 1902 SDLoc DL(Op); 1903 EVT VT = getPointerTy(DAG.getDataLayout()); 1904 1905 // Handle PIC mode first. 1906 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) { 1907 // This is the pic32 code model, the GOT is known to be smaller than 4GB. 1908 SDValue HiLo = makeHiLoPair(Op, SparcMCExpr::VK_Sparc_GOT22, 1909 SparcMCExpr::VK_Sparc_GOT10, DAG); 1910 SDValue GlobalBase = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, VT); 1911 SDValue AbsAddr = DAG.getNode(ISD::ADD, DL, VT, GlobalBase, HiLo); 1912 // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this 1913 // function has calls. 1914 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 1915 MFI->setHasCalls(true); 1916 return DAG.getLoad(VT, DL, DAG.getEntryNode(), AbsAddr, 1917 MachinePointerInfo::getGOT(DAG.getMachineFunction()), 1918 false, false, false, 0); 1919 } 1920 1921 // This is one of the absolute code models. 1922 switch(getTargetMachine().getCodeModel()) { 1923 default: 1924 llvm_unreachable("Unsupported absolute code model"); 1925 case CodeModel::Small: 1926 // abs32. 1927 return makeHiLoPair(Op, SparcMCExpr::VK_Sparc_HI, 1928 SparcMCExpr::VK_Sparc_LO, DAG); 1929 case CodeModel::Medium: { 1930 // abs44. 1931 SDValue H44 = makeHiLoPair(Op, SparcMCExpr::VK_Sparc_H44, 1932 SparcMCExpr::VK_Sparc_M44, DAG); 1933 H44 = DAG.getNode(ISD::SHL, DL, VT, H44, DAG.getConstant(12, DL, MVT::i32)); 1934 SDValue L44 = withTargetFlags(Op, SparcMCExpr::VK_Sparc_L44, DAG); 1935 L44 = DAG.getNode(SPISD::Lo, DL, VT, L44); 1936 return DAG.getNode(ISD::ADD, DL, VT, H44, L44); 1937 } 1938 case CodeModel::Large: { 1939 // abs64. 1940 SDValue Hi = makeHiLoPair(Op, SparcMCExpr::VK_Sparc_HH, 1941 SparcMCExpr::VK_Sparc_HM, DAG); 1942 Hi = DAG.getNode(ISD::SHL, DL, VT, Hi, DAG.getConstant(32, DL, MVT::i32)); 1943 SDValue Lo = makeHiLoPair(Op, SparcMCExpr::VK_Sparc_HI, 1944 SparcMCExpr::VK_Sparc_LO, DAG); 1945 return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo); 1946 } 1947 } 1948} 1949 1950SDValue SparcTargetLowering::LowerGlobalAddress(SDValue Op, 1951 SelectionDAG &DAG) const { 1952 return makeAddress(Op, DAG); 1953} 1954 1955SDValue SparcTargetLowering::LowerConstantPool(SDValue Op, 1956 SelectionDAG &DAG) const { 1957 return makeAddress(Op, DAG); 1958} 1959 1960SDValue SparcTargetLowering::LowerBlockAddress(SDValue Op, 1961 SelectionDAG &DAG) const { 1962 return makeAddress(Op, DAG); 1963} 1964 1965SDValue SparcTargetLowering::LowerGlobalTLSAddress(SDValue Op, 1966 SelectionDAG &DAG) const { 1967 1968 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 1969 if (DAG.getTarget().Options.EmulatedTLS) 1970 return LowerToTLSEmulatedModel(GA, DAG); 1971 1972 SDLoc DL(GA); 1973 const GlobalValue *GV = GA->getGlobal(); 1974 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 1975 1976 TLSModel::Model model = getTargetMachine().getTLSModel(GV); 1977 1978 if (model == TLSModel::GeneralDynamic || model == TLSModel::LocalDynamic) { 1979 unsigned HiTF = ((model == TLSModel::GeneralDynamic) 1980 ? SparcMCExpr::VK_Sparc_TLS_GD_HI22 1981 : SparcMCExpr::VK_Sparc_TLS_LDM_HI22); 1982 unsigned LoTF = ((model == TLSModel::GeneralDynamic) 1983 ? SparcMCExpr::VK_Sparc_TLS_GD_LO10 1984 : SparcMCExpr::VK_Sparc_TLS_LDM_LO10); 1985 unsigned addTF = ((model == TLSModel::GeneralDynamic) 1986 ? SparcMCExpr::VK_Sparc_TLS_GD_ADD 1987 : SparcMCExpr::VK_Sparc_TLS_LDM_ADD); 1988 unsigned callTF = ((model == TLSModel::GeneralDynamic) 1989 ? SparcMCExpr::VK_Sparc_TLS_GD_CALL 1990 : SparcMCExpr::VK_Sparc_TLS_LDM_CALL); 1991 1992 SDValue HiLo = makeHiLoPair(Op, HiTF, LoTF, DAG); 1993 SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT); 1994 SDValue Argument = DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Base, HiLo, 1995 withTargetFlags(Op, addTF, DAG)); 1996 1997 SDValue Chain = DAG.getEntryNode(); 1998 SDValue InFlag; 1999 2000 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(1, DL, true), DL); 2001 Chain = DAG.getCopyToReg(Chain, DL, SP::O0, Argument, InFlag); 2002 InFlag = Chain.getValue(1); 2003 SDValue Callee = DAG.getTargetExternalSymbol("__tls_get_addr", PtrVT); 2004 SDValue Symbol = withTargetFlags(Op, callTF, DAG); 2005 2006 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 2007 SmallVector<SDValue, 4> Ops; 2008 Ops.push_back(Chain); 2009 Ops.push_back(Callee); 2010 Ops.push_back(Symbol); 2011 Ops.push_back(DAG.getRegister(SP::O0, PtrVT)); 2012 const uint32_t *Mask = Subtarget->getRegisterInfo()->getCallPreservedMask( 2013 DAG.getMachineFunction(), CallingConv::C); 2014 assert(Mask && "Missing call preserved mask for calling convention"); 2015 Ops.push_back(DAG.getRegisterMask(Mask)); 2016 Ops.push_back(InFlag); 2017 Chain = DAG.getNode(SPISD::TLS_CALL, DL, NodeTys, Ops); 2018 InFlag = Chain.getValue(1); 2019 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(1, DL, true), 2020 DAG.getIntPtrConstant(0, DL, true), InFlag, DL); 2021 InFlag = Chain.getValue(1); 2022 SDValue Ret = DAG.getCopyFromReg(Chain, DL, SP::O0, PtrVT, InFlag); 2023 2024 if (model != TLSModel::LocalDynamic) 2025 return Ret; 2026 2027 SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT, 2028 withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LDO_HIX22, DAG)); 2029 SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT, 2030 withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LDO_LOX10, DAG)); 2031 HiLo = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo); 2032 return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Ret, HiLo, 2033 withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LDO_ADD, DAG)); 2034 } 2035 2036 if (model == TLSModel::InitialExec) { 2037 unsigned ldTF = ((PtrVT == MVT::i64)? SparcMCExpr::VK_Sparc_TLS_IE_LDX 2038 : SparcMCExpr::VK_Sparc_TLS_IE_LD); 2039 2040 SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT); 2041 2042 // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this 2043 // function has calls. 2044 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 2045 MFI->setHasCalls(true); 2046 2047 SDValue TGA = makeHiLoPair(Op, 2048 SparcMCExpr::VK_Sparc_TLS_IE_HI22, 2049 SparcMCExpr::VK_Sparc_TLS_IE_LO10, DAG); 2050 SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Base, TGA); 2051 SDValue Offset = DAG.getNode(SPISD::TLS_LD, 2052 DL, PtrVT, Ptr, 2053 withTargetFlags(Op, ldTF, DAG)); 2054 return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, 2055 DAG.getRegister(SP::G7, PtrVT), Offset, 2056 withTargetFlags(Op, 2057 SparcMCExpr::VK_Sparc_TLS_IE_ADD, DAG)); 2058 } 2059 2060 assert(model == TLSModel::LocalExec); 2061 SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT, 2062 withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LE_HIX22, DAG)); 2063 SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT, 2064 withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LE_LOX10, DAG)); 2065 SDValue Offset = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo); 2066 2067 return DAG.getNode(ISD::ADD, DL, PtrVT, 2068 DAG.getRegister(SP::G7, PtrVT), Offset); 2069} 2070 2071SDValue 2072SparcTargetLowering::LowerF128_LibCallArg(SDValue Chain, ArgListTy &Args, 2073 SDValue Arg, SDLoc DL, 2074 SelectionDAG &DAG) const { 2075 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 2076 EVT ArgVT = Arg.getValueType(); 2077 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); 2078 2079 ArgListEntry Entry; 2080 Entry.Node = Arg; 2081 Entry.Ty = ArgTy; 2082 2083 if (ArgTy->isFP128Ty()) { 2084 // Create a stack object and pass the pointer to the library function. 2085 int FI = MFI->CreateStackObject(16, 8, false); 2086 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); 2087 Chain = DAG.getStore(Chain, 2088 DL, 2089 Entry.Node, 2090 FIPtr, 2091 MachinePointerInfo(), 2092 false, 2093 false, 2094 8); 2095 2096 Entry.Node = FIPtr; 2097 Entry.Ty = PointerType::getUnqual(ArgTy); 2098 } 2099 Args.push_back(Entry); 2100 return Chain; 2101} 2102 2103SDValue 2104SparcTargetLowering::LowerF128Op(SDValue Op, SelectionDAG &DAG, 2105 const char *LibFuncName, 2106 unsigned numArgs) const { 2107 2108 ArgListTy Args; 2109 2110 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 2111 auto PtrVT = getPointerTy(DAG.getDataLayout()); 2112 2113 SDValue Callee = DAG.getExternalSymbol(LibFuncName, PtrVT); 2114 Type *RetTy = Op.getValueType().getTypeForEVT(*DAG.getContext()); 2115 Type *RetTyABI = RetTy; 2116 SDValue Chain = DAG.getEntryNode(); 2117 SDValue RetPtr; 2118 2119 if (RetTy->isFP128Ty()) { 2120 // Create a Stack Object to receive the return value of type f128. 2121 ArgListEntry Entry; 2122 int RetFI = MFI->CreateStackObject(16, 8, false); 2123 RetPtr = DAG.getFrameIndex(RetFI, PtrVT); 2124 Entry.Node = RetPtr; 2125 Entry.Ty = PointerType::getUnqual(RetTy); 2126 if (!Subtarget->is64Bit()) 2127 Entry.isSRet = true; 2128 Entry.isReturned = false; 2129 Args.push_back(Entry); 2130 RetTyABI = Type::getVoidTy(*DAG.getContext()); 2131 } 2132 2133 assert(Op->getNumOperands() >= numArgs && "Not enough operands!"); 2134 for (unsigned i = 0, e = numArgs; i != e; ++i) { 2135 Chain = LowerF128_LibCallArg(Chain, Args, Op.getOperand(i), SDLoc(Op), DAG); 2136 } 2137 TargetLowering::CallLoweringInfo CLI(DAG); 2138 CLI.setDebugLoc(SDLoc(Op)).setChain(Chain) 2139 .setCallee(CallingConv::C, RetTyABI, Callee, std::move(Args), 0); 2140 2141 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI); 2142 2143 // chain is in second result. 2144 if (RetTyABI == RetTy) 2145 return CallInfo.first; 2146 2147 assert (RetTy->isFP128Ty() && "Unexpected return type!"); 2148 2149 Chain = CallInfo.second; 2150 2151 // Load RetPtr to get the return value. 2152 return DAG.getLoad(Op.getValueType(), 2153 SDLoc(Op), 2154 Chain, 2155 RetPtr, 2156 MachinePointerInfo(), 2157 false, false, false, 8); 2158} 2159 2160SDValue 2161SparcTargetLowering::LowerF128Compare(SDValue LHS, SDValue RHS, 2162 unsigned &SPCC, 2163 SDLoc DL, 2164 SelectionDAG &DAG) const { 2165 2166 const char *LibCall = nullptr; 2167 bool is64Bit = Subtarget->is64Bit(); 2168 switch(SPCC) { 2169 default: llvm_unreachable("Unhandled conditional code!"); 2170 case SPCC::FCC_E : LibCall = is64Bit? "_Qp_feq" : "_Q_feq"; break; 2171 case SPCC::FCC_NE : LibCall = is64Bit? "_Qp_fne" : "_Q_fne"; break; 2172 case SPCC::FCC_L : LibCall = is64Bit? "_Qp_flt" : "_Q_flt"; break; 2173 case SPCC::FCC_G : LibCall = is64Bit? "_Qp_fgt" : "_Q_fgt"; break; 2174 case SPCC::FCC_LE : LibCall = is64Bit? "_Qp_fle" : "_Q_fle"; break; 2175 case SPCC::FCC_GE : LibCall = is64Bit? "_Qp_fge" : "_Q_fge"; break; 2176 case SPCC::FCC_UL : 2177 case SPCC::FCC_ULE: 2178 case SPCC::FCC_UG : 2179 case SPCC::FCC_UGE: 2180 case SPCC::FCC_U : 2181 case SPCC::FCC_O : 2182 case SPCC::FCC_LG : 2183 case SPCC::FCC_UE : LibCall = is64Bit? "_Qp_cmp" : "_Q_cmp"; break; 2184 } 2185 2186 auto PtrVT = getPointerTy(DAG.getDataLayout()); 2187 SDValue Callee = DAG.getExternalSymbol(LibCall, PtrVT); 2188 Type *RetTy = Type::getInt32Ty(*DAG.getContext()); 2189 ArgListTy Args; 2190 SDValue Chain = DAG.getEntryNode(); 2191 Chain = LowerF128_LibCallArg(Chain, Args, LHS, DL, DAG); 2192 Chain = LowerF128_LibCallArg(Chain, Args, RHS, DL, DAG); 2193 2194 TargetLowering::CallLoweringInfo CLI(DAG); 2195 CLI.setDebugLoc(DL).setChain(Chain) 2196 .setCallee(CallingConv::C, RetTy, Callee, std::move(Args), 0); 2197 2198 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI); 2199 2200 // result is in first, and chain is in second result. 2201 SDValue Result = CallInfo.first; 2202 2203 switch(SPCC) { 2204 default: { 2205 SDValue RHS = DAG.getTargetConstant(0, DL, Result.getValueType()); 2206 SPCC = SPCC::ICC_NE; 2207 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS); 2208 } 2209 case SPCC::FCC_UL : { 2210 SDValue Mask = DAG.getTargetConstant(1, DL, Result.getValueType()); 2211 Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask); 2212 SDValue RHS = DAG.getTargetConstant(0, DL, Result.getValueType()); 2213 SPCC = SPCC::ICC_NE; 2214 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS); 2215 } 2216 case SPCC::FCC_ULE: { 2217 SDValue RHS = DAG.getTargetConstant(2, DL, Result.getValueType()); 2218 SPCC = SPCC::ICC_NE; 2219 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS); 2220 } 2221 case SPCC::FCC_UG : { 2222 SDValue RHS = DAG.getTargetConstant(1, DL, Result.getValueType()); 2223 SPCC = SPCC::ICC_G; 2224 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS); 2225 } 2226 case SPCC::FCC_UGE: { 2227 SDValue RHS = DAG.getTargetConstant(1, DL, Result.getValueType()); 2228 SPCC = SPCC::ICC_NE; 2229 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS); 2230 } 2231 2232 case SPCC::FCC_U : { 2233 SDValue RHS = DAG.getTargetConstant(3, DL, Result.getValueType()); 2234 SPCC = SPCC::ICC_E; 2235 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS); 2236 } 2237 case SPCC::FCC_O : { 2238 SDValue RHS = DAG.getTargetConstant(3, DL, Result.getValueType()); 2239 SPCC = SPCC::ICC_NE; 2240 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS); 2241 } 2242 case SPCC::FCC_LG : { 2243 SDValue Mask = DAG.getTargetConstant(3, DL, Result.getValueType()); 2244 Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask); 2245 SDValue RHS = DAG.getTargetConstant(0, DL, Result.getValueType()); 2246 SPCC = SPCC::ICC_NE; 2247 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS); 2248 } 2249 case SPCC::FCC_UE : { 2250 SDValue Mask = DAG.getTargetConstant(3, DL, Result.getValueType()); 2251 Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask); 2252 SDValue RHS = DAG.getTargetConstant(0, DL, Result.getValueType()); 2253 SPCC = SPCC::ICC_E; 2254 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS); 2255 } 2256 } 2257} 2258 2259static SDValue 2260LowerF128_FPEXTEND(SDValue Op, SelectionDAG &DAG, 2261 const SparcTargetLowering &TLI) { 2262 2263 if (Op.getOperand(0).getValueType() == MVT::f64) 2264 return TLI.LowerF128Op(Op, DAG, 2265 TLI.getLibcallName(RTLIB::FPEXT_F64_F128), 1); 2266 2267 if (Op.getOperand(0).getValueType() == MVT::f32) 2268 return TLI.LowerF128Op(Op, DAG, 2269 TLI.getLibcallName(RTLIB::FPEXT_F32_F128), 1); 2270 2271 llvm_unreachable("fpextend with non-float operand!"); 2272 return SDValue(); 2273} 2274 2275static SDValue 2276LowerF128_FPROUND(SDValue Op, SelectionDAG &DAG, 2277 const SparcTargetLowering &TLI) { 2278 // FP_ROUND on f64 and f32 are legal. 2279 if (Op.getOperand(0).getValueType() != MVT::f128) 2280 return Op; 2281 2282 if (Op.getValueType() == MVT::f64) 2283 return TLI.LowerF128Op(Op, DAG, 2284 TLI.getLibcallName(RTLIB::FPROUND_F128_F64), 1); 2285 if (Op.getValueType() == MVT::f32) 2286 return TLI.LowerF128Op(Op, DAG, 2287 TLI.getLibcallName(RTLIB::FPROUND_F128_F32), 1); 2288 2289 llvm_unreachable("fpround to non-float!"); 2290 return SDValue(); 2291} 2292 2293static SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG, 2294 const SparcTargetLowering &TLI, 2295 bool hasHardQuad) { 2296 SDLoc dl(Op); 2297 EVT VT = Op.getValueType(); 2298 assert(VT == MVT::i32 || VT == MVT::i64); 2299 2300 // Expand f128 operations to fp128 abi calls. 2301 if (Op.getOperand(0).getValueType() == MVT::f128 2302 && (!hasHardQuad || !TLI.isTypeLegal(VT))) { 2303 const char *libName = TLI.getLibcallName(VT == MVT::i32 2304 ? RTLIB::FPTOSINT_F128_I32 2305 : RTLIB::FPTOSINT_F128_I64); 2306 return TLI.LowerF128Op(Op, DAG, libName, 1); 2307 } 2308 2309 // Expand if the resulting type is illegal. 2310 if (!TLI.isTypeLegal(VT)) 2311 return SDValue(); 2312 2313 // Otherwise, Convert the fp value to integer in an FP register. 2314 if (VT == MVT::i32) 2315 Op = DAG.getNode(SPISD::FTOI, dl, MVT::f32, Op.getOperand(0)); 2316 else 2317 Op = DAG.getNode(SPISD::FTOX, dl, MVT::f64, Op.getOperand(0)); 2318 2319 return DAG.getNode(ISD::BITCAST, dl, VT, Op); 2320} 2321 2322static SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG, 2323 const SparcTargetLowering &TLI, 2324 bool hasHardQuad) { 2325 SDLoc dl(Op); 2326 EVT OpVT = Op.getOperand(0).getValueType(); 2327 assert(OpVT == MVT::i32 || (OpVT == MVT::i64)); 2328 2329 EVT floatVT = (OpVT == MVT::i32) ? MVT::f32 : MVT::f64; 2330 2331 // Expand f128 operations to fp128 ABI calls. 2332 if (Op.getValueType() == MVT::f128 2333 && (!hasHardQuad || !TLI.isTypeLegal(OpVT))) { 2334 const char *libName = TLI.getLibcallName(OpVT == MVT::i32 2335 ? RTLIB::SINTTOFP_I32_F128 2336 : RTLIB::SINTTOFP_I64_F128); 2337 return TLI.LowerF128Op(Op, DAG, libName, 1); 2338 } 2339 2340 // Expand if the operand type is illegal. 2341 if (!TLI.isTypeLegal(OpVT)) 2342 return SDValue(); 2343 2344 // Otherwise, Convert the int value to FP in an FP register. 2345 SDValue Tmp = DAG.getNode(ISD::BITCAST, dl, floatVT, Op.getOperand(0)); 2346 unsigned opcode = (OpVT == MVT::i32)? SPISD::ITOF : SPISD::XTOF; 2347 return DAG.getNode(opcode, dl, Op.getValueType(), Tmp); 2348} 2349 2350static SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG, 2351 const SparcTargetLowering &TLI, 2352 bool hasHardQuad) { 2353 SDLoc dl(Op); 2354 EVT VT = Op.getValueType(); 2355 2356 // Expand if it does not involve f128 or the target has support for 2357 // quad floating point instructions and the resulting type is legal. 2358 if (Op.getOperand(0).getValueType() != MVT::f128 || 2359 (hasHardQuad && TLI.isTypeLegal(VT))) 2360 return SDValue(); 2361 2362 assert(VT == MVT::i32 || VT == MVT::i64); 2363 2364 return TLI.LowerF128Op(Op, DAG, 2365 TLI.getLibcallName(VT == MVT::i32 2366 ? RTLIB::FPTOUINT_F128_I32 2367 : RTLIB::FPTOUINT_F128_I64), 2368 1); 2369} 2370 2371static SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG, 2372 const SparcTargetLowering &TLI, 2373 bool hasHardQuad) { 2374 SDLoc dl(Op); 2375 EVT OpVT = Op.getOperand(0).getValueType(); 2376 assert(OpVT == MVT::i32 || OpVT == MVT::i64); 2377 2378 // Expand if it does not involve f128 or the target has support for 2379 // quad floating point instructions and the operand type is legal. 2380 if (Op.getValueType() != MVT::f128 || (hasHardQuad && TLI.isTypeLegal(OpVT))) 2381 return SDValue(); 2382 2383 return TLI.LowerF128Op(Op, DAG, 2384 TLI.getLibcallName(OpVT == MVT::i32 2385 ? RTLIB::UINTTOFP_I32_F128 2386 : RTLIB::UINTTOFP_I64_F128), 2387 1); 2388} 2389 2390static SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG, 2391 const SparcTargetLowering &TLI, 2392 bool hasHardQuad) { 2393 SDValue Chain = Op.getOperand(0); 2394 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 2395 SDValue LHS = Op.getOperand(2); 2396 SDValue RHS = Op.getOperand(3); 2397 SDValue Dest = Op.getOperand(4); 2398 SDLoc dl(Op); 2399 unsigned Opc, SPCC = ~0U; 2400 2401 // If this is a br_cc of a "setcc", and if the setcc got lowered into 2402 // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values. 2403 LookThroughSetCC(LHS, RHS, CC, SPCC); 2404 2405 // Get the condition flag. 2406 SDValue CompareFlag; 2407 if (LHS.getValueType().isInteger()) { 2408 CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS); 2409 if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC); 2410 // 32-bit compares use the icc flags, 64-bit uses the xcc flags. 2411 Opc = LHS.getValueType() == MVT::i32 ? SPISD::BRICC : SPISD::BRXCC; 2412 } else { 2413 if (!hasHardQuad && LHS.getValueType() == MVT::f128) { 2414 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC); 2415 CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG); 2416 Opc = SPISD::BRICC; 2417 } else { 2418 CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, MVT::Glue, LHS, RHS); 2419 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC); 2420 Opc = SPISD::BRFCC; 2421 } 2422 } 2423 return DAG.getNode(Opc, dl, MVT::Other, Chain, Dest, 2424 DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag); 2425} 2426 2427static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG, 2428 const SparcTargetLowering &TLI, 2429 bool hasHardQuad) { 2430 SDValue LHS = Op.getOperand(0); 2431 SDValue RHS = Op.getOperand(1); 2432 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 2433 SDValue TrueVal = Op.getOperand(2); 2434 SDValue FalseVal = Op.getOperand(3); 2435 SDLoc dl(Op); 2436 unsigned Opc, SPCC = ~0U; 2437 2438 // If this is a select_cc of a "setcc", and if the setcc got lowered into 2439 // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values. 2440 LookThroughSetCC(LHS, RHS, CC, SPCC); 2441 2442 SDValue CompareFlag; 2443 if (LHS.getValueType().isInteger()) { 2444 CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS); 2445 Opc = LHS.getValueType() == MVT::i32 ? 2446 SPISD::SELECT_ICC : SPISD::SELECT_XCC; 2447 if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC); 2448 } else { 2449 if (!hasHardQuad && LHS.getValueType() == MVT::f128) { 2450 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC); 2451 CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG); 2452 Opc = SPISD::SELECT_ICC; 2453 } else { 2454 CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, MVT::Glue, LHS, RHS); 2455 Opc = SPISD::SELECT_FCC; 2456 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC); 2457 } 2458 } 2459 return DAG.getNode(Opc, dl, TrueVal.getValueType(), TrueVal, FalseVal, 2460 DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag); 2461} 2462 2463static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG, 2464 const SparcTargetLowering &TLI) { 2465 MachineFunction &MF = DAG.getMachineFunction(); 2466 SparcMachineFunctionInfo *FuncInfo = MF.getInfo<SparcMachineFunctionInfo>(); 2467 auto PtrVT = TLI.getPointerTy(DAG.getDataLayout()); 2468 2469 // Need frame address to find the address of VarArgsFrameIndex. 2470 MF.getFrameInfo()->setFrameAddressIsTaken(true); 2471 2472 // vastart just stores the address of the VarArgsFrameIndex slot into the 2473 // memory location argument. 2474 SDLoc DL(Op); 2475 SDValue Offset = 2476 DAG.getNode(ISD::ADD, DL, PtrVT, DAG.getRegister(SP::I6, PtrVT), 2477 DAG.getIntPtrConstant(FuncInfo->getVarArgsFrameOffset(), DL)); 2478 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2479 return DAG.getStore(Op.getOperand(0), DL, Offset, Op.getOperand(1), 2480 MachinePointerInfo(SV), false, false, 0); 2481} 2482 2483static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) { 2484 SDNode *Node = Op.getNode(); 2485 EVT VT = Node->getValueType(0); 2486 SDValue InChain = Node->getOperand(0); 2487 SDValue VAListPtr = Node->getOperand(1); 2488 EVT PtrVT = VAListPtr.getValueType(); 2489 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 2490 SDLoc DL(Node); 2491 SDValue VAList = DAG.getLoad(PtrVT, DL, InChain, VAListPtr, 2492 MachinePointerInfo(SV), false, false, false, 0); 2493 // Increment the pointer, VAList, to the next vaarg. 2494 SDValue NextPtr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList, 2495 DAG.getIntPtrConstant(VT.getSizeInBits()/8, 2496 DL)); 2497 // Store the incremented VAList to the legalized pointer. 2498 InChain = DAG.getStore(VAList.getValue(1), DL, NextPtr, 2499 VAListPtr, MachinePointerInfo(SV), false, false, 0); 2500 // Load the actual argument out of the pointer VAList. 2501 // We can't count on greater alignment than the word size. 2502 return DAG.getLoad(VT, DL, InChain, VAList, MachinePointerInfo(), 2503 false, false, false, 2504 std::min(PtrVT.getSizeInBits(), VT.getSizeInBits())/8); 2505} 2506 2507static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG, 2508 const SparcSubtarget *Subtarget) { 2509 SDValue Chain = Op.getOperand(0); // Legalize the chain. 2510 SDValue Size = Op.getOperand(1); // Legalize the size. 2511 EVT VT = Size->getValueType(0); 2512 SDLoc dl(Op); 2513 2514 unsigned SPReg = SP::O6; 2515 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT); 2516 SDValue NewSP = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value 2517 Chain = DAG.getCopyToReg(SP.getValue(1), dl, SPReg, NewSP); // Output chain 2518 2519 // The resultant pointer is actually 16 words from the bottom of the stack, 2520 // to provide a register spill area. 2521 unsigned regSpillArea = Subtarget->is64Bit() ? 128 : 96; 2522 regSpillArea += Subtarget->getStackPointerBias(); 2523 2524 SDValue NewVal = DAG.getNode(ISD::ADD, dl, VT, NewSP, 2525 DAG.getConstant(regSpillArea, dl, VT)); 2526 SDValue Ops[2] = { NewVal, Chain }; 2527 return DAG.getMergeValues(Ops, dl); 2528} 2529 2530 2531static SDValue getFLUSHW(SDValue Op, SelectionDAG &DAG) { 2532 SDLoc dl(Op); 2533 SDValue Chain = DAG.getNode(SPISD::FLUSHW, 2534 dl, MVT::Other, DAG.getEntryNode()); 2535 return Chain; 2536} 2537 2538static SDValue getFRAMEADDR(uint64_t depth, SDValue Op, SelectionDAG &DAG, 2539 const SparcSubtarget *Subtarget) { 2540 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 2541 MFI->setFrameAddressIsTaken(true); 2542 2543 EVT VT = Op.getValueType(); 2544 SDLoc dl(Op); 2545 unsigned FrameReg = SP::I6; 2546 unsigned stackBias = Subtarget->getStackPointerBias(); 2547 2548 SDValue FrameAddr; 2549 2550 if (depth == 0) { 2551 FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); 2552 if (Subtarget->is64Bit()) 2553 FrameAddr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr, 2554 DAG.getIntPtrConstant(stackBias, dl)); 2555 return FrameAddr; 2556 } 2557 2558 // flush first to make sure the windowed registers' values are in stack 2559 SDValue Chain = getFLUSHW(Op, DAG); 2560 FrameAddr = DAG.getCopyFromReg(Chain, dl, FrameReg, VT); 2561 2562 unsigned Offset = (Subtarget->is64Bit()) ? (stackBias + 112) : 56; 2563 2564 while (depth--) { 2565 SDValue Ptr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr, 2566 DAG.getIntPtrConstant(Offset, dl)); 2567 FrameAddr = DAG.getLoad(VT, dl, Chain, Ptr, MachinePointerInfo(), 2568 false, false, false, 0); 2569 } 2570 if (Subtarget->is64Bit()) 2571 FrameAddr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr, 2572 DAG.getIntPtrConstant(stackBias, dl)); 2573 return FrameAddr; 2574} 2575 2576 2577static SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG, 2578 const SparcSubtarget *Subtarget) { 2579 2580 uint64_t depth = Op.getConstantOperandVal(0); 2581 2582 return getFRAMEADDR(depth, Op, DAG, Subtarget); 2583 2584} 2585 2586static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG, 2587 const SparcTargetLowering &TLI, 2588 const SparcSubtarget *Subtarget) { 2589 MachineFunction &MF = DAG.getMachineFunction(); 2590 MachineFrameInfo *MFI = MF.getFrameInfo(); 2591 MFI->setReturnAddressIsTaken(true); 2592 2593 if (TLI.verifyReturnAddressArgumentIsConstant(Op, DAG)) 2594 return SDValue(); 2595 2596 EVT VT = Op.getValueType(); 2597 SDLoc dl(Op); 2598 uint64_t depth = Op.getConstantOperandVal(0); 2599 2600 SDValue RetAddr; 2601 if (depth == 0) { 2602 auto PtrVT = TLI.getPointerTy(DAG.getDataLayout()); 2603 unsigned RetReg = MF.addLiveIn(SP::I7, TLI.getRegClassFor(PtrVT)); 2604 RetAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, RetReg, VT); 2605 return RetAddr; 2606 } 2607 2608 // Need frame address to find return address of the caller. 2609 SDValue FrameAddr = getFRAMEADDR(depth - 1, Op, DAG, Subtarget); 2610 2611 unsigned Offset = (Subtarget->is64Bit()) ? 120 : 60; 2612 SDValue Ptr = DAG.getNode(ISD::ADD, 2613 dl, VT, 2614 FrameAddr, 2615 DAG.getIntPtrConstant(Offset, dl)); 2616 RetAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), Ptr, 2617 MachinePointerInfo(), false, false, false, 0); 2618 2619 return RetAddr; 2620} 2621 2622static SDValue LowerF64Op(SDValue Op, SelectionDAG &DAG, unsigned opcode) 2623{ 2624 SDLoc dl(Op); 2625 2626 assert(Op.getValueType() == MVT::f64 && "LowerF64Op called on non-double!"); 2627 assert(opcode == ISD::FNEG || opcode == ISD::FABS); 2628 2629 // Lower fneg/fabs on f64 to fneg/fabs on f32. 2630 // fneg f64 => fneg f32:sub_even, fmov f32:sub_odd. 2631 // fabs f64 => fabs f32:sub_even, fmov f32:sub_odd. 2632 2633 SDValue SrcReg64 = Op.getOperand(0); 2634 SDValue Hi32 = DAG.getTargetExtractSubreg(SP::sub_even, dl, MVT::f32, 2635 SrcReg64); 2636 SDValue Lo32 = DAG.getTargetExtractSubreg(SP::sub_odd, dl, MVT::f32, 2637 SrcReg64); 2638 2639 Hi32 = DAG.getNode(opcode, dl, MVT::f32, Hi32); 2640 2641 SDValue DstReg64 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, 2642 dl, MVT::f64), 0); 2643 DstReg64 = DAG.getTargetInsertSubreg(SP::sub_even, dl, MVT::f64, 2644 DstReg64, Hi32); 2645 DstReg64 = DAG.getTargetInsertSubreg(SP::sub_odd, dl, MVT::f64, 2646 DstReg64, Lo32); 2647 return DstReg64; 2648} 2649 2650// Lower a f128 load into two f64 loads. 2651static SDValue LowerF128Load(SDValue Op, SelectionDAG &DAG) 2652{ 2653 SDLoc dl(Op); 2654 LoadSDNode *LdNode = dyn_cast<LoadSDNode>(Op.getNode()); 2655 assert(LdNode && LdNode->getOffset().getOpcode() == ISD::UNDEF 2656 && "Unexpected node type"); 2657 2658 unsigned alignment = LdNode->getAlignment(); 2659 if (alignment > 8) 2660 alignment = 8; 2661 2662 SDValue Hi64 = DAG.getLoad(MVT::f64, 2663 dl, 2664 LdNode->getChain(), 2665 LdNode->getBasePtr(), 2666 LdNode->getPointerInfo(), 2667 false, false, false, alignment); 2668 EVT addrVT = LdNode->getBasePtr().getValueType(); 2669 SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT, 2670 LdNode->getBasePtr(), 2671 DAG.getConstant(8, dl, addrVT)); 2672 SDValue Lo64 = DAG.getLoad(MVT::f64, 2673 dl, 2674 LdNode->getChain(), 2675 LoPtr, 2676 LdNode->getPointerInfo(), 2677 false, false, false, alignment); 2678 2679 SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32); 2680 SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32); 2681 2682 SDNode *InFP128 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, 2683 dl, MVT::f128); 2684 InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl, 2685 MVT::f128, 2686 SDValue(InFP128, 0), 2687 Hi64, 2688 SubRegEven); 2689 InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl, 2690 MVT::f128, 2691 SDValue(InFP128, 0), 2692 Lo64, 2693 SubRegOdd); 2694 SDValue OutChains[2] = { SDValue(Hi64.getNode(), 1), 2695 SDValue(Lo64.getNode(), 1) }; 2696 SDValue OutChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 2697 SDValue Ops[2] = {SDValue(InFP128,0), OutChain}; 2698 return DAG.getMergeValues(Ops, dl); 2699} 2700 2701static SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) 2702{ 2703 LoadSDNode *LdNode = cast<LoadSDNode>(Op.getNode()); 2704 2705 EVT MemVT = LdNode->getMemoryVT(); 2706 if (MemVT == MVT::f128) 2707 return LowerF128Load(Op, DAG); 2708 2709 return Op; 2710} 2711 2712// Lower a f128 store into two f64 stores. 2713static SDValue LowerF128Store(SDValue Op, SelectionDAG &DAG) { 2714 SDLoc dl(Op); 2715 StoreSDNode *StNode = dyn_cast<StoreSDNode>(Op.getNode()); 2716 assert(StNode && StNode->getOffset().getOpcode() == ISD::UNDEF 2717 && "Unexpected node type"); 2718 SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32); 2719 SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32); 2720 2721 SDNode *Hi64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, 2722 dl, 2723 MVT::f64, 2724 StNode->getValue(), 2725 SubRegEven); 2726 SDNode *Lo64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, 2727 dl, 2728 MVT::f64, 2729 StNode->getValue(), 2730 SubRegOdd); 2731 2732 unsigned alignment = StNode->getAlignment(); 2733 if (alignment > 8) 2734 alignment = 8; 2735 2736 SDValue OutChains[2]; 2737 OutChains[0] = DAG.getStore(StNode->getChain(), 2738 dl, 2739 SDValue(Hi64, 0), 2740 StNode->getBasePtr(), 2741 MachinePointerInfo(), 2742 false, false, alignment); 2743 EVT addrVT = StNode->getBasePtr().getValueType(); 2744 SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT, 2745 StNode->getBasePtr(), 2746 DAG.getConstant(8, dl, addrVT)); 2747 OutChains[1] = DAG.getStore(StNode->getChain(), 2748 dl, 2749 SDValue(Lo64, 0), 2750 LoPtr, 2751 MachinePointerInfo(), 2752 false, false, alignment); 2753 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 2754} 2755 2756static SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) 2757{ 2758 SDLoc dl(Op); 2759 StoreSDNode *St = cast<StoreSDNode>(Op.getNode()); 2760 2761 EVT MemVT = St->getMemoryVT(); 2762 if (MemVT == MVT::f128) 2763 return LowerF128Store(Op, DAG); 2764 2765 if (MemVT == MVT::i64) { 2766 // Custom handling for i64 stores: turn it into a bitcast and a 2767 // v2i32 store. 2768 SDValue Val = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, St->getValue()); 2769 SDValue Chain = DAG.getStore( 2770 St->getChain(), dl, Val, St->getBasePtr(), St->getPointerInfo(), 2771 St->isVolatile(), St->isNonTemporal(), St->getAlignment(), 2772 St->getAAInfo()); 2773 return Chain; 2774 } 2775 2776 return SDValue(); 2777} 2778 2779static SDValue LowerFNEGorFABS(SDValue Op, SelectionDAG &DAG, bool isV9) { 2780 assert((Op.getOpcode() == ISD::FNEG || Op.getOpcode() == ISD::FABS) 2781 && "invalid opcode"); 2782 2783 if (Op.getValueType() == MVT::f64) 2784 return LowerF64Op(Op, DAG, Op.getOpcode()); 2785 if (Op.getValueType() != MVT::f128) 2786 return Op; 2787 2788 // Lower fabs/fneg on f128 to fabs/fneg on f64 2789 // fabs/fneg f128 => fabs/fneg f64:sub_even64, fmov f64:sub_odd64 2790 2791 SDLoc dl(Op); 2792 SDValue SrcReg128 = Op.getOperand(0); 2793 SDValue Hi64 = DAG.getTargetExtractSubreg(SP::sub_even64, dl, MVT::f64, 2794 SrcReg128); 2795 SDValue Lo64 = DAG.getTargetExtractSubreg(SP::sub_odd64, dl, MVT::f64, 2796 SrcReg128); 2797 if (isV9) 2798 Hi64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Hi64); 2799 else 2800 Hi64 = LowerF64Op(Hi64, DAG, Op.getOpcode()); 2801 2802 SDValue DstReg128 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, 2803 dl, MVT::f128), 0); 2804 DstReg128 = DAG.getTargetInsertSubreg(SP::sub_even64, dl, MVT::f128, 2805 DstReg128, Hi64); 2806 DstReg128 = DAG.getTargetInsertSubreg(SP::sub_odd64, dl, MVT::f128, 2807 DstReg128, Lo64); 2808 return DstReg128; 2809} 2810 2811static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) { 2812 2813 if (Op.getValueType() != MVT::i64) 2814 return Op; 2815 2816 SDLoc dl(Op); 2817 SDValue Src1 = Op.getOperand(0); 2818 SDValue Src1Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src1); 2819 SDValue Src1Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Src1, 2820 DAG.getConstant(32, dl, MVT::i64)); 2821 Src1Hi = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src1Hi); 2822 2823 SDValue Src2 = Op.getOperand(1); 2824 SDValue Src2Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src2); 2825 SDValue Src2Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Src2, 2826 DAG.getConstant(32, dl, MVT::i64)); 2827 Src2Hi = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src2Hi); 2828 2829 2830 bool hasChain = false; 2831 unsigned hiOpc = Op.getOpcode(); 2832 switch (Op.getOpcode()) { 2833 default: llvm_unreachable("Invalid opcode"); 2834 case ISD::ADDC: hiOpc = ISD::ADDE; break; 2835 case ISD::ADDE: hasChain = true; break; 2836 case ISD::SUBC: hiOpc = ISD::SUBE; break; 2837 case ISD::SUBE: hasChain = true; break; 2838 } 2839 SDValue Lo; 2840 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Glue); 2841 if (hasChain) { 2842 Lo = DAG.getNode(Op.getOpcode(), dl, VTs, Src1Lo, Src2Lo, 2843 Op.getOperand(2)); 2844 } else { 2845 Lo = DAG.getNode(Op.getOpcode(), dl, VTs, Src1Lo, Src2Lo); 2846 } 2847 SDValue Hi = DAG.getNode(hiOpc, dl, VTs, Src1Hi, Src2Hi, Lo.getValue(1)); 2848 SDValue Carry = Hi.getValue(1); 2849 2850 Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Lo); 2851 Hi = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Hi); 2852 Hi = DAG.getNode(ISD::SHL, dl, MVT::i64, Hi, 2853 DAG.getConstant(32, dl, MVT::i64)); 2854 2855 SDValue Dst = DAG.getNode(ISD::OR, dl, MVT::i64, Hi, Lo); 2856 SDValue Ops[2] = { Dst, Carry }; 2857 return DAG.getMergeValues(Ops, dl); 2858} 2859 2860// Custom lower UMULO/SMULO for SPARC. This code is similar to ExpandNode() 2861// in LegalizeDAG.cpp except the order of arguments to the library function. 2862static SDValue LowerUMULO_SMULO(SDValue Op, SelectionDAG &DAG, 2863 const SparcTargetLowering &TLI) 2864{ 2865 unsigned opcode = Op.getOpcode(); 2866 assert((opcode == ISD::UMULO || opcode == ISD::SMULO) && "Invalid Opcode."); 2867 2868 bool isSigned = (opcode == ISD::SMULO); 2869 EVT VT = MVT::i64; 2870 EVT WideVT = MVT::i128; 2871 SDLoc dl(Op); 2872 SDValue LHS = Op.getOperand(0); 2873 2874 if (LHS.getValueType() != VT) 2875 return Op; 2876 2877 SDValue ShiftAmt = DAG.getConstant(63, dl, VT); 2878 2879 SDValue RHS = Op.getOperand(1); 2880 SDValue HiLHS = DAG.getNode(ISD::SRA, dl, VT, LHS, ShiftAmt); 2881 SDValue HiRHS = DAG.getNode(ISD::SRA, dl, MVT::i64, RHS, ShiftAmt); 2882 SDValue Args[] = { HiLHS, LHS, HiRHS, RHS }; 2883 2884 SDValue MulResult = TLI.makeLibCall(DAG, 2885 RTLIB::MUL_I128, WideVT, 2886 Args, isSigned, dl).first; 2887 SDValue BottomHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, 2888 MulResult, DAG.getIntPtrConstant(0, dl)); 2889 SDValue TopHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, 2890 MulResult, DAG.getIntPtrConstant(1, dl)); 2891 if (isSigned) { 2892 SDValue Tmp1 = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, ShiftAmt); 2893 TopHalf = DAG.getSetCC(dl, MVT::i32, TopHalf, Tmp1, ISD::SETNE); 2894 } else { 2895 TopHalf = DAG.getSetCC(dl, MVT::i32, TopHalf, DAG.getConstant(0, dl, VT), 2896 ISD::SETNE); 2897 } 2898 // MulResult is a node with an illegal type. Because such things are not 2899 // generally permitted during this phase of legalization, ensure that 2900 // nothing is left using the node. The above EXTRACT_ELEMENT nodes should have 2901 // been folded. 2902 assert(MulResult->use_empty() && "Illegally typed node still in use!"); 2903 2904 SDValue Ops[2] = { BottomHalf, TopHalf } ; 2905 return DAG.getMergeValues(Ops, dl); 2906} 2907 2908static SDValue LowerATOMIC_LOAD_STORE(SDValue Op, SelectionDAG &DAG) { 2909 // Monotonic load/stores are legal. 2910 if (cast<AtomicSDNode>(Op)->getOrdering() <= Monotonic) 2911 return Op; 2912 2913 // Otherwise, expand with a fence. 2914 return SDValue(); 2915} 2916 2917SDValue SparcTargetLowering:: 2918LowerOperation(SDValue Op, SelectionDAG &DAG) const { 2919 2920 bool hasHardQuad = Subtarget->hasHardQuad(); 2921 bool isV9 = Subtarget->isV9(); 2922 2923 switch (Op.getOpcode()) { 2924 default: llvm_unreachable("Should not custom lower this!"); 2925 2926 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG, *this, 2927 Subtarget); 2928 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG, 2929 Subtarget); 2930 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 2931 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 2932 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 2933 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 2934 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG, *this, 2935 hasHardQuad); 2936 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG, *this, 2937 hasHardQuad); 2938 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG, *this, 2939 hasHardQuad); 2940 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG, *this, 2941 hasHardQuad); 2942 case ISD::BR_CC: return LowerBR_CC(Op, DAG, *this, 2943 hasHardQuad); 2944 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG, *this, 2945 hasHardQuad); 2946 case ISD::VASTART: return LowerVASTART(Op, DAG, *this); 2947 case ISD::VAARG: return LowerVAARG(Op, DAG); 2948 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG, 2949 Subtarget); 2950 2951 case ISD::LOAD: return LowerLOAD(Op, DAG); 2952 case ISD::STORE: return LowerSTORE(Op, DAG); 2953 case ISD::FADD: return LowerF128Op(Op, DAG, 2954 getLibcallName(RTLIB::ADD_F128), 2); 2955 case ISD::FSUB: return LowerF128Op(Op, DAG, 2956 getLibcallName(RTLIB::SUB_F128), 2); 2957 case ISD::FMUL: return LowerF128Op(Op, DAG, 2958 getLibcallName(RTLIB::MUL_F128), 2); 2959 case ISD::FDIV: return LowerF128Op(Op, DAG, 2960 getLibcallName(RTLIB::DIV_F128), 2); 2961 case ISD::FSQRT: return LowerF128Op(Op, DAG, 2962 getLibcallName(RTLIB::SQRT_F128),1); 2963 case ISD::FABS: 2964 case ISD::FNEG: return LowerFNEGorFABS(Op, DAG, isV9); 2965 case ISD::FP_EXTEND: return LowerF128_FPEXTEND(Op, DAG, *this); 2966 case ISD::FP_ROUND: return LowerF128_FPROUND(Op, DAG, *this); 2967 case ISD::ADDC: 2968 case ISD::ADDE: 2969 case ISD::SUBC: 2970 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG); 2971 case ISD::UMULO: 2972 case ISD::SMULO: return LowerUMULO_SMULO(Op, DAG, *this); 2973 case ISD::ATOMIC_LOAD: 2974 case ISD::ATOMIC_STORE: return LowerATOMIC_LOAD_STORE(Op, DAG); 2975 } 2976} 2977 2978MachineBasicBlock * 2979SparcTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 2980 MachineBasicBlock *BB) const { 2981 switch (MI->getOpcode()) { 2982 default: llvm_unreachable("Unknown SELECT_CC!"); 2983 case SP::SELECT_CC_Int_ICC: 2984 case SP::SELECT_CC_FP_ICC: 2985 case SP::SELECT_CC_DFP_ICC: 2986 case SP::SELECT_CC_QFP_ICC: 2987 return expandSelectCC(MI, BB, SP::BCOND); 2988 case SP::SELECT_CC_Int_FCC: 2989 case SP::SELECT_CC_FP_FCC: 2990 case SP::SELECT_CC_DFP_FCC: 2991 case SP::SELECT_CC_QFP_FCC: 2992 return expandSelectCC(MI, BB, SP::FBCOND); 2993 2994 case SP::ATOMIC_LOAD_ADD_32: 2995 return expandAtomicRMW(MI, BB, SP::ADDrr); 2996 case SP::ATOMIC_LOAD_ADD_64: 2997 return expandAtomicRMW(MI, BB, SP::ADDXrr); 2998 case SP::ATOMIC_LOAD_SUB_32: 2999 return expandAtomicRMW(MI, BB, SP::SUBrr); 3000 case SP::ATOMIC_LOAD_SUB_64: 3001 return expandAtomicRMW(MI, BB, SP::SUBXrr); 3002 case SP::ATOMIC_LOAD_AND_32: 3003 return expandAtomicRMW(MI, BB, SP::ANDrr); 3004 case SP::ATOMIC_LOAD_AND_64: 3005 return expandAtomicRMW(MI, BB, SP::ANDXrr); 3006 case SP::ATOMIC_LOAD_OR_32: 3007 return expandAtomicRMW(MI, BB, SP::ORrr); 3008 case SP::ATOMIC_LOAD_OR_64: 3009 return expandAtomicRMW(MI, BB, SP::ORXrr); 3010 case SP::ATOMIC_LOAD_XOR_32: 3011 return expandAtomicRMW(MI, BB, SP::XORrr); 3012 case SP::ATOMIC_LOAD_XOR_64: 3013 return expandAtomicRMW(MI, BB, SP::XORXrr); 3014 case SP::ATOMIC_LOAD_NAND_32: 3015 return expandAtomicRMW(MI, BB, SP::ANDrr); 3016 case SP::ATOMIC_LOAD_NAND_64: 3017 return expandAtomicRMW(MI, BB, SP::ANDXrr); 3018 3019 case SP::ATOMIC_SWAP_64: 3020 return expandAtomicRMW(MI, BB, 0); 3021 3022 case SP::ATOMIC_LOAD_MAX_32: 3023 return expandAtomicRMW(MI, BB, SP::MOVICCrr, SPCC::ICC_G); 3024 case SP::ATOMIC_LOAD_MAX_64: 3025 return expandAtomicRMW(MI, BB, SP::MOVXCCrr, SPCC::ICC_G); 3026 case SP::ATOMIC_LOAD_MIN_32: 3027 return expandAtomicRMW(MI, BB, SP::MOVICCrr, SPCC::ICC_LE); 3028 case SP::ATOMIC_LOAD_MIN_64: 3029 return expandAtomicRMW(MI, BB, SP::MOVXCCrr, SPCC::ICC_LE); 3030 case SP::ATOMIC_LOAD_UMAX_32: 3031 return expandAtomicRMW(MI, BB, SP::MOVICCrr, SPCC::ICC_GU); 3032 case SP::ATOMIC_LOAD_UMAX_64: 3033 return expandAtomicRMW(MI, BB, SP::MOVXCCrr, SPCC::ICC_GU); 3034 case SP::ATOMIC_LOAD_UMIN_32: 3035 return expandAtomicRMW(MI, BB, SP::MOVICCrr, SPCC::ICC_LEU); 3036 case SP::ATOMIC_LOAD_UMIN_64: 3037 return expandAtomicRMW(MI, BB, SP::MOVXCCrr, SPCC::ICC_LEU); 3038 } 3039} 3040 3041MachineBasicBlock* 3042SparcTargetLowering::expandSelectCC(MachineInstr *MI, 3043 MachineBasicBlock *BB, 3044 unsigned BROpcode) const { 3045 const TargetInstrInfo &TII = *Subtarget->getInstrInfo(); 3046 DebugLoc dl = MI->getDebugLoc(); 3047 unsigned CC = (SPCC::CondCodes)MI->getOperand(3).getImm(); 3048 3049 // To "insert" a SELECT_CC instruction, we actually have to insert the diamond 3050 // control-flow pattern. The incoming instruction knows the destination vreg 3051 // to set, the condition code register to branch on, the true/false values to 3052 // select between, and a branch opcode to use. 3053 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 3054 MachineFunction::iterator It = ++BB->getIterator(); 3055 3056 // thisMBB: 3057 // ... 3058 // TrueVal = ... 3059 // [f]bCC copy1MBB 3060 // fallthrough --> copy0MBB 3061 MachineBasicBlock *thisMBB = BB; 3062 MachineFunction *F = BB->getParent(); 3063 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 3064 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 3065 F->insert(It, copy0MBB); 3066 F->insert(It, sinkMBB); 3067 3068 // Transfer the remainder of BB and its successor edges to sinkMBB. 3069 sinkMBB->splice(sinkMBB->begin(), BB, 3070 std::next(MachineBasicBlock::iterator(MI)), 3071 BB->end()); 3072 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 3073 3074 // Add the true and fallthrough blocks as its successors. 3075 BB->addSuccessor(copy0MBB); 3076 BB->addSuccessor(sinkMBB); 3077 3078 BuildMI(BB, dl, TII.get(BROpcode)).addMBB(sinkMBB).addImm(CC); 3079 3080 // copy0MBB: 3081 // %FalseValue = ... 3082 // # fallthrough to sinkMBB 3083 BB = copy0MBB; 3084 3085 // Update machine-CFG edges 3086 BB->addSuccessor(sinkMBB); 3087 3088 // sinkMBB: 3089 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 3090 // ... 3091 BB = sinkMBB; 3092 BuildMI(*BB, BB->begin(), dl, TII.get(SP::PHI), MI->getOperand(0).getReg()) 3093 .addReg(MI->getOperand(2).getReg()).addMBB(copy0MBB) 3094 .addReg(MI->getOperand(1).getReg()).addMBB(thisMBB); 3095 3096 MI->eraseFromParent(); // The pseudo instruction is gone now. 3097 return BB; 3098} 3099 3100MachineBasicBlock* 3101SparcTargetLowering::expandAtomicRMW(MachineInstr *MI, 3102 MachineBasicBlock *MBB, 3103 unsigned Opcode, 3104 unsigned CondCode) const { 3105 const TargetInstrInfo &TII = *Subtarget->getInstrInfo(); 3106 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 3107 DebugLoc DL = MI->getDebugLoc(); 3108 3109 // MI is an atomic read-modify-write instruction of the form: 3110 // 3111 // rd = atomicrmw<op> addr, rs2 3112 // 3113 // All three operands are registers. 3114 unsigned DestReg = MI->getOperand(0).getReg(); 3115 unsigned AddrReg = MI->getOperand(1).getReg(); 3116 unsigned Rs2Reg = MI->getOperand(2).getReg(); 3117 3118 // SelectionDAG has already inserted memory barriers before and after MI, so 3119 // we simply have to implement the operatiuon in terms of compare-and-swap. 3120 // 3121 // %val0 = load %addr 3122 // loop: 3123 // %val = phi %val0, %dest 3124 // %upd = op %val, %rs2 3125 // %dest = cas %addr, %val, %upd 3126 // cmp %val, %dest 3127 // bne loop 3128 // done: 3129 // 3130 bool is64Bit = SP::I64RegsRegClass.hasSubClassEq(MRI.getRegClass(DestReg)); 3131 const TargetRegisterClass *ValueRC = 3132 is64Bit ? &SP::I64RegsRegClass : &SP::IntRegsRegClass; 3133 unsigned Val0Reg = MRI.createVirtualRegister(ValueRC); 3134 3135 BuildMI(*MBB, MI, DL, TII.get(is64Bit ? SP::LDXri : SP::LDri), Val0Reg) 3136 .addReg(AddrReg).addImm(0); 3137 3138 // Split the basic block MBB before MI and insert the loop block in the hole. 3139 MachineFunction::iterator MFI = MBB->getIterator(); 3140 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 3141 MachineFunction *MF = MBB->getParent(); 3142 MachineBasicBlock *LoopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 3143 MachineBasicBlock *DoneMBB = MF->CreateMachineBasicBlock(LLVM_BB); 3144 ++MFI; 3145 MF->insert(MFI, LoopMBB); 3146 MF->insert(MFI, DoneMBB); 3147 3148 // Move MI and following instructions to DoneMBB. 3149 DoneMBB->splice(DoneMBB->begin(), MBB, MI, MBB->end()); 3150 DoneMBB->transferSuccessorsAndUpdatePHIs(MBB); 3151 3152 // Connect the CFG again. 3153 MBB->addSuccessor(LoopMBB); 3154 LoopMBB->addSuccessor(LoopMBB); 3155 LoopMBB->addSuccessor(DoneMBB); 3156 3157 // Build the loop block. 3158 unsigned ValReg = MRI.createVirtualRegister(ValueRC); 3159 // Opcode == 0 means try to write Rs2Reg directly (ATOMIC_SWAP). 3160 unsigned UpdReg = (Opcode ? MRI.createVirtualRegister(ValueRC) : Rs2Reg); 3161 3162 BuildMI(LoopMBB, DL, TII.get(SP::PHI), ValReg) 3163 .addReg(Val0Reg).addMBB(MBB) 3164 .addReg(DestReg).addMBB(LoopMBB); 3165 3166 if (CondCode) { 3167 // This is one of the min/max operations. We need a CMPrr followed by a 3168 // MOVXCC/MOVICC. 3169 BuildMI(LoopMBB, DL, TII.get(SP::CMPrr)).addReg(ValReg).addReg(Rs2Reg); 3170 BuildMI(LoopMBB, DL, TII.get(Opcode), UpdReg) 3171 .addReg(ValReg).addReg(Rs2Reg).addImm(CondCode); 3172 } else if (Opcode) { 3173 BuildMI(LoopMBB, DL, TII.get(Opcode), UpdReg) 3174 .addReg(ValReg).addReg(Rs2Reg); 3175 } 3176 3177 if (MI->getOpcode() == SP::ATOMIC_LOAD_NAND_32 || 3178 MI->getOpcode() == SP::ATOMIC_LOAD_NAND_64) { 3179 unsigned TmpReg = UpdReg; 3180 UpdReg = MRI.createVirtualRegister(ValueRC); 3181 BuildMI(LoopMBB, DL, TII.get(SP::XORri), UpdReg).addReg(TmpReg).addImm(-1); 3182 } 3183 3184 BuildMI(LoopMBB, DL, TII.get(is64Bit ? SP::CASXrr : SP::CASrr), DestReg) 3185 .addReg(AddrReg).addReg(ValReg).addReg(UpdReg) 3186 .setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); 3187 BuildMI(LoopMBB, DL, TII.get(SP::CMPrr)).addReg(ValReg).addReg(DestReg); 3188 BuildMI(LoopMBB, DL, TII.get(is64Bit ? SP::BPXCC : SP::BCOND)) 3189 .addMBB(LoopMBB).addImm(SPCC::ICC_NE); 3190 3191 MI->eraseFromParent(); 3192 return DoneMBB; 3193} 3194 3195//===----------------------------------------------------------------------===// 3196// Sparc Inline Assembly Support 3197//===----------------------------------------------------------------------===// 3198 3199/// getConstraintType - Given a constraint letter, return the type of 3200/// constraint it is for this target. 3201SparcTargetLowering::ConstraintType 3202SparcTargetLowering::getConstraintType(StringRef Constraint) const { 3203 if (Constraint.size() == 1) { 3204 switch (Constraint[0]) { 3205 default: break; 3206 case 'r': return C_RegisterClass; 3207 case 'I': // SIMM13 3208 return C_Other; 3209 } 3210 } 3211 3212 return TargetLowering::getConstraintType(Constraint); 3213} 3214 3215TargetLowering::ConstraintWeight SparcTargetLowering:: 3216getSingleConstraintMatchWeight(AsmOperandInfo &info, 3217 const char *constraint) const { 3218 ConstraintWeight weight = CW_Invalid; 3219 Value *CallOperandVal = info.CallOperandVal; 3220 // If we don't have a value, we can't do a match, 3221 // but allow it at the lowest weight. 3222 if (!CallOperandVal) 3223 return CW_Default; 3224 3225 // Look at the constraint type. 3226 switch (*constraint) { 3227 default: 3228 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 3229 break; 3230 case 'I': // SIMM13 3231 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) { 3232 if (isInt<13>(C->getSExtValue())) 3233 weight = CW_Constant; 3234 } 3235 break; 3236 } 3237 return weight; 3238} 3239 3240/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 3241/// vector. If it is invalid, don't add anything to Ops. 3242void SparcTargetLowering:: 3243LowerAsmOperandForConstraint(SDValue Op, 3244 std::string &Constraint, 3245 std::vector<SDValue> &Ops, 3246 SelectionDAG &DAG) const { 3247 SDValue Result(nullptr, 0); 3248 3249 // Only support length 1 constraints for now. 3250 if (Constraint.length() > 1) 3251 return; 3252 3253 char ConstraintLetter = Constraint[0]; 3254 switch (ConstraintLetter) { 3255 default: break; 3256 case 'I': 3257 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { 3258 if (isInt<13>(C->getSExtValue())) { 3259 Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op), 3260 Op.getValueType()); 3261 break; 3262 } 3263 return; 3264 } 3265 } 3266 3267 if (Result.getNode()) { 3268 Ops.push_back(Result); 3269 return; 3270 } 3271 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 3272} 3273 3274std::pair<unsigned, const TargetRegisterClass *> 3275SparcTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 3276 StringRef Constraint, 3277 MVT VT) const { 3278 if (Constraint.size() == 1) { 3279 switch (Constraint[0]) { 3280 case 'r': 3281 if (VT == MVT::v2i32) 3282 return std::make_pair(0U, &SP::IntPairRegClass); 3283 else 3284 return std::make_pair(0U, &SP::IntRegsRegClass); 3285 } 3286 } else if (!Constraint.empty() && Constraint.size() <= 5 3287 && Constraint[0] == '{' && *(Constraint.end()-1) == '}') { 3288 // constraint = '{r<d>}' 3289 // Remove the braces from around the name. 3290 StringRef name(Constraint.data()+1, Constraint.size()-2); 3291 // Handle register aliases: 3292 // r0-r7 -> g0-g7 3293 // r8-r15 -> o0-o7 3294 // r16-r23 -> l0-l7 3295 // r24-r31 -> i0-i7 3296 uint64_t intVal = 0; 3297 if (name.substr(0, 1).equals("r") 3298 && !name.substr(1).getAsInteger(10, intVal) && intVal <= 31) { 3299 const char regTypes[] = { 'g', 'o', 'l', 'i' }; 3300 char regType = regTypes[intVal/8]; 3301 char regIdx = '0' + (intVal % 8); 3302 char tmp[] = { '{', regType, regIdx, '}', 0 }; 3303 std::string newConstraint = std::string(tmp); 3304 return TargetLowering::getRegForInlineAsmConstraint(TRI, newConstraint, 3305 VT); 3306 } 3307 } 3308 3309 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 3310} 3311 3312bool 3313SparcTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 3314 // The Sparc target isn't yet aware of offsets. 3315 return false; 3316} 3317 3318void SparcTargetLowering::ReplaceNodeResults(SDNode *N, 3319 SmallVectorImpl<SDValue>& Results, 3320 SelectionDAG &DAG) const { 3321 3322 SDLoc dl(N); 3323 3324 RTLIB::Libcall libCall = RTLIB::UNKNOWN_LIBCALL; 3325 3326 switch (N->getOpcode()) { 3327 default: 3328 llvm_unreachable("Do not know how to custom type legalize this operation!"); 3329 3330 case ISD::FP_TO_SINT: 3331 case ISD::FP_TO_UINT: 3332 // Custom lower only if it involves f128 or i64. 3333 if (N->getOperand(0).getValueType() != MVT::f128 3334 || N->getValueType(0) != MVT::i64) 3335 return; 3336 libCall = ((N->getOpcode() == ISD::FP_TO_SINT) 3337 ? RTLIB::FPTOSINT_F128_I64 3338 : RTLIB::FPTOUINT_F128_I64); 3339 3340 Results.push_back(LowerF128Op(SDValue(N, 0), 3341 DAG, 3342 getLibcallName(libCall), 3343 1)); 3344 return; 3345 3346 case ISD::SINT_TO_FP: 3347 case ISD::UINT_TO_FP: 3348 // Custom lower only if it involves f128 or i64. 3349 if (N->getValueType(0) != MVT::f128 3350 || N->getOperand(0).getValueType() != MVT::i64) 3351 return; 3352 3353 libCall = ((N->getOpcode() == ISD::SINT_TO_FP) 3354 ? RTLIB::SINTTOFP_I64_F128 3355 : RTLIB::UINTTOFP_I64_F128); 3356 3357 Results.push_back(LowerF128Op(SDValue(N, 0), 3358 DAG, 3359 getLibcallName(libCall), 3360 1)); 3361 return; 3362 case ISD::LOAD: { 3363 LoadSDNode *Ld = cast<LoadSDNode>(N); 3364 // Custom handling only for i64: turn i64 load into a v2i32 load, 3365 // and a bitcast. 3366 if (Ld->getValueType(0) != MVT::i64 || Ld->getMemoryVT() != MVT::i64) 3367 return; 3368 3369 SDLoc dl(N); 3370 SDValue LoadRes = DAG.getExtLoad( 3371 Ld->getExtensionType(), dl, MVT::v2i32, 3372 Ld->getChain(), Ld->getBasePtr(), Ld->getPointerInfo(), 3373 MVT::v2i32, Ld->isVolatile(), Ld->isNonTemporal(), 3374 Ld->isInvariant(), Ld->getAlignment(), Ld->getAAInfo()); 3375 3376 SDValue Res = DAG.getNode(ISD::BITCAST, dl, MVT::i64, LoadRes); 3377 Results.push_back(Res); 3378 Results.push_back(LoadRes.getValue(1)); 3379 return; 3380 } 3381 } 3382} 3383