1//===-- HexagonISelLowering.cpp - Hexagon DAG Lowering Implementation -----===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the interfaces that Hexagon uses to lower LLVM code 11// into a selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#include "HexagonISelLowering.h" 16#include "HexagonMachineFunctionInfo.h" 17#include "HexagonSubtarget.h" 18#include "HexagonTargetMachine.h" 19#include "HexagonTargetObjectFile.h" 20#include "llvm/CodeGen/CallingConvLower.h" 21#include "llvm/CodeGen/MachineFrameInfo.h" 22#include "llvm/CodeGen/MachineFunction.h" 23#include "llvm/CodeGen/MachineInstrBuilder.h" 24#include "llvm/CodeGen/MachineJumpTableInfo.h" 25#include "llvm/CodeGen/MachineRegisterInfo.h" 26#include "llvm/CodeGen/SelectionDAGISel.h" 27#include "llvm/CodeGen/ValueTypes.h" 28#include "llvm/IR/CallingConv.h" 29#include "llvm/IR/DerivedTypes.h" 30#include "llvm/IR/Function.h" 31#include "llvm/IR/GlobalAlias.h" 32#include "llvm/IR/GlobalVariable.h" 33#include "llvm/IR/InlineAsm.h" 34#include "llvm/IR/Intrinsics.h" 35#include "llvm/Support/CommandLine.h" 36#include "llvm/Support/Debug.h" 37#include "llvm/Support/ErrorHandling.h" 38#include "llvm/Support/raw_ostream.h" 39 40using namespace llvm; 41 42const unsigned Hexagon_MAX_RET_SIZE = 64; 43 44static cl::opt<bool> 45EmitJumpTables("hexagon-emit-jump-tables", cl::init(true), cl::Hidden, 46 cl::desc("Control jump table emission on Hexagon target")); 47 48int NumNamedVarArgParams = -1; 49 50// Implement calling convention for Hexagon. 51static bool 52CC_Hexagon(unsigned ValNo, MVT ValVT, 53 MVT LocVT, CCValAssign::LocInfo LocInfo, 54 ISD::ArgFlagsTy ArgFlags, CCState &State); 55 56static bool 57CC_Hexagon32(unsigned ValNo, MVT ValVT, 58 MVT LocVT, CCValAssign::LocInfo LocInfo, 59 ISD::ArgFlagsTy ArgFlags, CCState &State); 60 61static bool 62CC_Hexagon64(unsigned ValNo, MVT ValVT, 63 MVT LocVT, CCValAssign::LocInfo LocInfo, 64 ISD::ArgFlagsTy ArgFlags, CCState &State); 65 66static bool 67RetCC_Hexagon(unsigned ValNo, MVT ValVT, 68 MVT LocVT, CCValAssign::LocInfo LocInfo, 69 ISD::ArgFlagsTy ArgFlags, CCState &State); 70 71static bool 72RetCC_Hexagon32(unsigned ValNo, MVT ValVT, 73 MVT LocVT, CCValAssign::LocInfo LocInfo, 74 ISD::ArgFlagsTy ArgFlags, CCState &State); 75 76static bool 77RetCC_Hexagon64(unsigned ValNo, MVT ValVT, 78 MVT LocVT, CCValAssign::LocInfo LocInfo, 79 ISD::ArgFlagsTy ArgFlags, CCState &State); 80 81static bool 82CC_Hexagon_VarArg (unsigned ValNo, MVT ValVT, 83 MVT LocVT, CCValAssign::LocInfo LocInfo, 84 ISD::ArgFlagsTy ArgFlags, CCState &State) { 85 86 // NumNamedVarArgParams can not be zero for a VarArg function. 87 assert ( (NumNamedVarArgParams > 0) && 88 "NumNamedVarArgParams is not bigger than zero."); 89 90 if ( (int)ValNo < NumNamedVarArgParams ) { 91 // Deal with named arguments. 92 return CC_Hexagon(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State); 93 } 94 95 // Deal with un-named arguments. 96 unsigned ofst; 97 if (ArgFlags.isByVal()) { 98 // If pass-by-value, the size allocated on stack is decided 99 // by ArgFlags.getByValSize(), not by the size of LocVT. 100 assert ((ArgFlags.getByValSize() > 8) && 101 "ByValSize must be bigger than 8 bytes"); 102 ofst = State.AllocateStack(ArgFlags.getByValSize(), 4); 103 State.addLoc(CCValAssign::getMem(ValNo, ValVT, ofst, LocVT, LocInfo)); 104 return false; 105 } 106 if (LocVT == MVT::i1 || LocVT == MVT::i8 || LocVT == MVT::i16) { 107 LocVT = MVT::i32; 108 ValVT = MVT::i32; 109 if (ArgFlags.isSExt()) 110 LocInfo = CCValAssign::SExt; 111 else if (ArgFlags.isZExt()) 112 LocInfo = CCValAssign::ZExt; 113 else 114 LocInfo = CCValAssign::AExt; 115 } 116 if (LocVT == MVT::i32 || LocVT == MVT::f32) { 117 ofst = State.AllocateStack(4, 4); 118 State.addLoc(CCValAssign::getMem(ValNo, ValVT, ofst, LocVT, LocInfo)); 119 return false; 120 } 121 if (LocVT == MVT::i64 || LocVT == MVT::f64) { 122 ofst = State.AllocateStack(8, 8); 123 State.addLoc(CCValAssign::getMem(ValNo, ValVT, ofst, LocVT, LocInfo)); 124 return false; 125 } 126 llvm_unreachable(0); 127} 128 129 130static bool 131CC_Hexagon (unsigned ValNo, MVT ValVT, 132 MVT LocVT, CCValAssign::LocInfo LocInfo, 133 ISD::ArgFlagsTy ArgFlags, CCState &State) { 134 135 if (ArgFlags.isByVal()) { 136 // Passed on stack. 137 assert ((ArgFlags.getByValSize() > 8) && 138 "ByValSize must be bigger than 8 bytes"); 139 unsigned Offset = State.AllocateStack(ArgFlags.getByValSize(), 4); 140 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo)); 141 return false; 142 } 143 144 if (LocVT == MVT::i1 || LocVT == MVT::i8 || LocVT == MVT::i16) { 145 LocVT = MVT::i32; 146 ValVT = MVT::i32; 147 if (ArgFlags.isSExt()) 148 LocInfo = CCValAssign::SExt; 149 else if (ArgFlags.isZExt()) 150 LocInfo = CCValAssign::ZExt; 151 else 152 LocInfo = CCValAssign::AExt; 153 } 154 155 if (LocVT == MVT::i32 || LocVT == MVT::f32) { 156 if (!CC_Hexagon32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State)) 157 return false; 158 } 159 160 if (LocVT == MVT::i64 || LocVT == MVT::f64) { 161 if (!CC_Hexagon64(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State)) 162 return false; 163 } 164 165 return true; // CC didn't match. 166} 167 168 169static bool CC_Hexagon32(unsigned ValNo, MVT ValVT, 170 MVT LocVT, CCValAssign::LocInfo LocInfo, 171 ISD::ArgFlagsTy ArgFlags, CCState &State) { 172 173 static const uint16_t RegList[] = { 174 Hexagon::R0, Hexagon::R1, Hexagon::R2, Hexagon::R3, Hexagon::R4, 175 Hexagon::R5 176 }; 177 if (unsigned Reg = State.AllocateReg(RegList, 6)) { 178 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 179 return false; 180 } 181 182 unsigned Offset = State.AllocateStack(4, 4); 183 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo)); 184 return false; 185} 186 187static bool CC_Hexagon64(unsigned ValNo, MVT ValVT, 188 MVT LocVT, CCValAssign::LocInfo LocInfo, 189 ISD::ArgFlagsTy ArgFlags, CCState &State) { 190 191 if (unsigned Reg = State.AllocateReg(Hexagon::D0)) { 192 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 193 return false; 194 } 195 196 static const uint16_t RegList1[] = { 197 Hexagon::D1, Hexagon::D2 198 }; 199 static const uint16_t RegList2[] = { 200 Hexagon::R1, Hexagon::R3 201 }; 202 if (unsigned Reg = State.AllocateReg(RegList1, RegList2, 2)) { 203 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 204 return false; 205 } 206 207 unsigned Offset = State.AllocateStack(8, 8, Hexagon::D2); 208 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo)); 209 return false; 210} 211 212static bool RetCC_Hexagon(unsigned ValNo, MVT ValVT, 213 MVT LocVT, CCValAssign::LocInfo LocInfo, 214 ISD::ArgFlagsTy ArgFlags, CCState &State) { 215 216 217 if (LocVT == MVT::i1 || 218 LocVT == MVT::i8 || 219 LocVT == MVT::i16) { 220 LocVT = MVT::i32; 221 ValVT = MVT::i32; 222 if (ArgFlags.isSExt()) 223 LocInfo = CCValAssign::SExt; 224 else if (ArgFlags.isZExt()) 225 LocInfo = CCValAssign::ZExt; 226 else 227 LocInfo = CCValAssign::AExt; 228 } 229 230 if (LocVT == MVT::i32 || LocVT == MVT::f32) { 231 if (!RetCC_Hexagon32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State)) 232 return false; 233 } 234 235 if (LocVT == MVT::i64 || LocVT == MVT::f64) { 236 if (!RetCC_Hexagon64(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State)) 237 return false; 238 } 239 240 return true; // CC didn't match. 241} 242 243static bool RetCC_Hexagon32(unsigned ValNo, MVT ValVT, 244 MVT LocVT, CCValAssign::LocInfo LocInfo, 245 ISD::ArgFlagsTy ArgFlags, CCState &State) { 246 247 if (LocVT == MVT::i32 || LocVT == MVT::f32) { 248 if (unsigned Reg = State.AllocateReg(Hexagon::R0)) { 249 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 250 return false; 251 } 252 } 253 254 unsigned Offset = State.AllocateStack(4, 4); 255 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo)); 256 return false; 257} 258 259static bool RetCC_Hexagon64(unsigned ValNo, MVT ValVT, 260 MVT LocVT, CCValAssign::LocInfo LocInfo, 261 ISD::ArgFlagsTy ArgFlags, CCState &State) { 262 if (LocVT == MVT::i64 || LocVT == MVT::f64) { 263 if (unsigned Reg = State.AllocateReg(Hexagon::D0)) { 264 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 265 return false; 266 } 267 } 268 269 unsigned Offset = State.AllocateStack(8, 8); 270 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo)); 271 return false; 272} 273 274SDValue 275HexagonTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) 276const { 277 return SDValue(); 278} 279 280/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 281/// by "Src" to address "Dst" of size "Size". Alignment information is 282/// specified by the specific parameter attribute. The copy will be passed as 283/// a byval function parameter. Sometimes what we are copying is the end of a 284/// larger object, the part that does not fit in registers. 285static SDValue 286CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, 287 ISD::ArgFlagsTy Flags, SelectionDAG &DAG, 288 SDLoc dl) { 289 290 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32); 291 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), 292 /*isVolatile=*/false, /*AlwaysInline=*/false, 293 MachinePointerInfo(), MachinePointerInfo()); 294} 295 296 297// LowerReturn - Lower ISD::RET. If a struct is larger than 8 bytes and is 298// passed by value, the function prototype is modified to return void and 299// the value is stored in memory pointed by a pointer passed by caller. 300SDValue 301HexagonTargetLowering::LowerReturn(SDValue Chain, 302 CallingConv::ID CallConv, bool isVarArg, 303 const SmallVectorImpl<ISD::OutputArg> &Outs, 304 const SmallVectorImpl<SDValue> &OutVals, 305 SDLoc dl, SelectionDAG &DAG) const { 306 307 // CCValAssign - represent the assignment of the return value to locations. 308 SmallVector<CCValAssign, 16> RVLocs; 309 310 // CCState - Info about the registers and stack slot. 311 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 312 getTargetMachine(), RVLocs, *DAG.getContext()); 313 314 // Analyze return values of ISD::RET 315 CCInfo.AnalyzeReturn(Outs, RetCC_Hexagon); 316 317 SDValue Flag; 318 SmallVector<SDValue, 4> RetOps(1, Chain); 319 320 // Copy the result values into the output registers. 321 for (unsigned i = 0; i != RVLocs.size(); ++i) { 322 CCValAssign &VA = RVLocs[i]; 323 324 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Flag); 325 326 // Guarantee that all emitted copies are stuck together with flags. 327 Flag = Chain.getValue(1); 328 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 329 } 330 331 RetOps[0] = Chain; // Update chain. 332 333 // Add the flag if we have it. 334 if (Flag.getNode()) 335 RetOps.push_back(Flag); 336 337 return DAG.getNode(HexagonISD::RET_FLAG, dl, MVT::Other, 338 &RetOps[0], RetOps.size()); 339} 340 341 342 343 344/// LowerCallResult - Lower the result values of an ISD::CALL into the 345/// appropriate copies out of appropriate physical registers. This assumes that 346/// Chain/InFlag are the input chain/flag to use, and that TheCall is the call 347/// being lowered. Returns a SDNode with the same number of values as the 348/// ISD::CALL. 349SDValue 350HexagonTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 351 CallingConv::ID CallConv, bool isVarArg, 352 const 353 SmallVectorImpl<ISD::InputArg> &Ins, 354 SDLoc dl, SelectionDAG &DAG, 355 SmallVectorImpl<SDValue> &InVals, 356 const SmallVectorImpl<SDValue> &OutVals, 357 SDValue Callee) const { 358 359 // Assign locations to each value returned by this call. 360 SmallVector<CCValAssign, 16> RVLocs; 361 362 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 363 getTargetMachine(), RVLocs, *DAG.getContext()); 364 365 CCInfo.AnalyzeCallResult(Ins, RetCC_Hexagon); 366 367 // Copy all of the result registers out of their specified physreg. 368 for (unsigned i = 0; i != RVLocs.size(); ++i) { 369 Chain = DAG.getCopyFromReg(Chain, dl, 370 RVLocs[i].getLocReg(), 371 RVLocs[i].getValVT(), InFlag).getValue(1); 372 InFlag = Chain.getValue(2); 373 InVals.push_back(Chain.getValue(0)); 374 } 375 376 return Chain; 377} 378 379/// LowerCall - Functions arguments are copied from virtual regs to 380/// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted. 381SDValue 382HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 383 SmallVectorImpl<SDValue> &InVals) const { 384 SelectionDAG &DAG = CLI.DAG; 385 SDLoc &dl = CLI.DL; 386 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 387 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 388 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 389 SDValue Chain = CLI.Chain; 390 SDValue Callee = CLI.Callee; 391 bool &isTailCall = CLI.IsTailCall; 392 CallingConv::ID CallConv = CLI.CallConv; 393 bool isVarArg = CLI.IsVarArg; 394 395 bool IsStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet(); 396 397 // Analyze operands of the call, assigning locations to each operand. 398 SmallVector<CCValAssign, 16> ArgLocs; 399 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 400 getTargetMachine(), ArgLocs, *DAG.getContext()); 401 402 // Check for varargs. 403 NumNamedVarArgParams = -1; 404 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Callee)) 405 { 406 const Function* CalleeFn = NULL; 407 Callee = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, MVT::i32); 408 if ((CalleeFn = dyn_cast<Function>(GA->getGlobal()))) 409 { 410 // If a function has zero args and is a vararg function, that's 411 // disallowed so it must be an undeclared function. Do not assume 412 // varargs if the callee is undefined. 413 if (CalleeFn->isVarArg() && 414 CalleeFn->getFunctionType()->getNumParams() != 0) { 415 NumNamedVarArgParams = CalleeFn->getFunctionType()->getNumParams(); 416 } 417 } 418 } 419 420 if (NumNamedVarArgParams > 0) 421 CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon_VarArg); 422 else 423 CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon); 424 425 426 if(isTailCall) { 427 bool StructAttrFlag = 428 DAG.getMachineFunction().getFunction()->hasStructRetAttr(); 429 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, 430 isVarArg, IsStructRet, 431 StructAttrFlag, 432 Outs, OutVals, Ins, DAG); 433 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i){ 434 CCValAssign &VA = ArgLocs[i]; 435 if (VA.isMemLoc()) { 436 isTailCall = false; 437 break; 438 } 439 } 440 if (isTailCall) { 441 DEBUG(dbgs () << "Eligible for Tail Call\n"); 442 } else { 443 DEBUG(dbgs () << 444 "Argument must be passed on stack. Not eligible for Tail Call\n"); 445 } 446 } 447 // Get a count of how many bytes are to be pushed on the stack. 448 unsigned NumBytes = CCInfo.getNextStackOffset(); 449 SmallVector<std::pair<unsigned, SDValue>, 16> RegsToPass; 450 SmallVector<SDValue, 8> MemOpChains; 451 452 SDValue StackPtr = 453 DAG.getCopyFromReg(Chain, dl, TM.getRegisterInfo()->getStackRegister(), 454 getPointerTy()); 455 456 // Walk the register/memloc assignments, inserting copies/loads. 457 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 458 CCValAssign &VA = ArgLocs[i]; 459 SDValue Arg = OutVals[i]; 460 ISD::ArgFlagsTy Flags = Outs[i].Flags; 461 462 // Promote the value if needed. 463 switch (VA.getLocInfo()) { 464 default: 465 // Loc info must be one of Full, SExt, ZExt, or AExt. 466 llvm_unreachable("Unknown loc info!"); 467 case CCValAssign::Full: 468 break; 469 case CCValAssign::SExt: 470 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 471 break; 472 case CCValAssign::ZExt: 473 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 474 break; 475 case CCValAssign::AExt: 476 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 477 break; 478 } 479 480 if (VA.isMemLoc()) { 481 unsigned LocMemOffset = VA.getLocMemOffset(); 482 SDValue PtrOff = DAG.getConstant(LocMemOffset, StackPtr.getValueType()); 483 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff); 484 485 if (Flags.isByVal()) { 486 // The argument is a struct passed by value. According to LLVM, "Arg" 487 // is is pointer. 488 MemOpChains.push_back(CreateCopyOfByValArgument(Arg, PtrOff, Chain, 489 Flags, DAG, dl)); 490 } else { 491 // The argument is not passed by value. "Arg" is a buildin type. It is 492 // not a pointer. 493 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, 494 MachinePointerInfo(),false, false, 495 0)); 496 } 497 continue; 498 } 499 500 // Arguments that can be passed on register must be kept at RegsToPass 501 // vector. 502 if (VA.isRegLoc()) { 503 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 504 } 505 } 506 507 // Transform all store nodes into one single node because all store 508 // nodes are independent of each other. 509 if (!MemOpChains.empty()) { 510 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &MemOpChains[0], 511 MemOpChains.size()); 512 } 513 514 if (!isTailCall) 515 Chain = DAG.getCALLSEQ_START(Chain, DAG.getConstant(NumBytes, 516 getPointerTy(), true), 517 dl); 518 519 // Build a sequence of copy-to-reg nodes chained together with token 520 // chain and flag operands which copy the outgoing args into registers. 521 // The InFlag in necessary since all emitted instructions must be 522 // stuck together. 523 SDValue InFlag; 524 if (!isTailCall) { 525 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 526 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 527 RegsToPass[i].second, InFlag); 528 InFlag = Chain.getValue(1); 529 } 530 } 531 532 // For tail calls lower the arguments to the 'real' stack slot. 533 if (isTailCall) { 534 // Force all the incoming stack arguments to be loaded from the stack 535 // before any new outgoing arguments are stored to the stack, because the 536 // outgoing stack slots may alias the incoming argument stack slots, and 537 // the alias isn't otherwise explicit. This is slightly more conservative 538 // than necessary, because it means that each store effectively depends 539 // on every argument instead of just those arguments it would clobber. 540 // 541 // Do not flag preceding copytoreg stuff together with the following stuff. 542 InFlag = SDValue(); 543 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 544 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 545 RegsToPass[i].second, InFlag); 546 InFlag = Chain.getValue(1); 547 } 548 InFlag =SDValue(); 549 } 550 551 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every 552 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol 553 // node so that legalize doesn't hack it. 554 if (flag_aligned_memcpy) { 555 const char *MemcpyName = 556 "__hexagon_memcpy_likely_aligned_min32bytes_mult8bytes"; 557 Callee = 558 DAG.getTargetExternalSymbol(MemcpyName, getPointerTy()); 559 flag_aligned_memcpy = false; 560 } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 561 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, getPointerTy()); 562 } else if (ExternalSymbolSDNode *S = 563 dyn_cast<ExternalSymbolSDNode>(Callee)) { 564 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy()); 565 } 566 567 // Returns a chain & a flag for retval copy to use. 568 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 569 SmallVector<SDValue, 8> Ops; 570 Ops.push_back(Chain); 571 Ops.push_back(Callee); 572 573 // Add argument registers to the end of the list so that they are 574 // known live into the call. 575 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 576 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 577 RegsToPass[i].second.getValueType())); 578 } 579 580 if (InFlag.getNode()) { 581 Ops.push_back(InFlag); 582 } 583 584 if (isTailCall) 585 return DAG.getNode(HexagonISD::TC_RETURN, dl, NodeTys, &Ops[0], Ops.size()); 586 587 Chain = DAG.getNode(HexagonISD::CALL, dl, NodeTys, &Ops[0], Ops.size()); 588 InFlag = Chain.getValue(1); 589 590 // Create the CALLSEQ_END node. 591 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 592 DAG.getIntPtrConstant(0, true), InFlag, dl); 593 InFlag = Chain.getValue(1); 594 595 // Handle result values, copying them out of physregs into vregs that we 596 // return. 597 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG, 598 InVals, OutVals, Callee); 599} 600 601static bool getIndexedAddressParts(SDNode *Ptr, EVT VT, 602 bool isSEXTLoad, SDValue &Base, 603 SDValue &Offset, bool &isInc, 604 SelectionDAG &DAG) { 605 if (Ptr->getOpcode() != ISD::ADD) 606 return false; 607 608 if (VT == MVT::i64 || VT == MVT::i32 || VT == MVT::i16 || VT == MVT::i8) { 609 isInc = (Ptr->getOpcode() == ISD::ADD); 610 Base = Ptr->getOperand(0); 611 Offset = Ptr->getOperand(1); 612 // Ensure that Offset is a constant. 613 return (isa<ConstantSDNode>(Offset)); 614 } 615 616 return false; 617} 618 619// TODO: Put this function along with the other isS* functions in 620// HexagonISelDAGToDAG.cpp into a common file. Or better still, use the 621// functions defined in HexagonOperands.td. 622static bool Is_PostInc_S4_Offset(SDNode * S, int ShiftAmount) { 623 ConstantSDNode *N = cast<ConstantSDNode>(S); 624 625 // immS4 predicate - True if the immediate fits in a 4-bit sign extended. 626 // field. 627 int64_t v = (int64_t)N->getSExtValue(); 628 int64_t m = 0; 629 if (ShiftAmount > 0) { 630 m = v % ShiftAmount; 631 v = v >> ShiftAmount; 632 } 633 return (v <= 7) && (v >= -8) && (m == 0); 634} 635 636/// getPostIndexedAddressParts - returns true by value, base pointer and 637/// offset pointer and addressing mode by reference if this node can be 638/// combined with a load / store to form a post-indexed load / store. 639bool HexagonTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op, 640 SDValue &Base, 641 SDValue &Offset, 642 ISD::MemIndexedMode &AM, 643 SelectionDAG &DAG) const 644{ 645 EVT VT; 646 SDValue Ptr; 647 bool isSEXTLoad = false; 648 649 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 650 VT = LD->getMemoryVT(); 651 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 652 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 653 VT = ST->getMemoryVT(); 654 if (ST->getValue().getValueType() == MVT::i64 && ST->isTruncatingStore()) { 655 return false; 656 } 657 } else { 658 return false; 659 } 660 661 bool isInc = false; 662 bool isLegal = getIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, 663 isInc, DAG); 664 // ShiftAmount = number of left-shifted bits in the Hexagon instruction. 665 int ShiftAmount = VT.getSizeInBits() / 16; 666 if (isLegal && Is_PostInc_S4_Offset(Offset.getNode(), ShiftAmount)) { 667 AM = isInc ? ISD::POST_INC : ISD::POST_DEC; 668 return true; 669 } 670 671 return false; 672} 673 674SDValue HexagonTargetLowering::LowerINLINEASM(SDValue Op, 675 SelectionDAG &DAG) const { 676 SDNode *Node = Op.getNode(); 677 MachineFunction &MF = DAG.getMachineFunction(); 678 HexagonMachineFunctionInfo *FuncInfo = 679 MF.getInfo<HexagonMachineFunctionInfo>(); 680 switch (Node->getOpcode()) { 681 case ISD::INLINEASM: { 682 unsigned NumOps = Node->getNumOperands(); 683 if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue) 684 --NumOps; // Ignore the flag operand. 685 686 for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) { 687 if (FuncInfo->hasClobberLR()) 688 break; 689 unsigned Flags = 690 cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue(); 691 unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags); 692 ++i; // Skip the ID value. 693 694 switch (InlineAsm::getKind(Flags)) { 695 default: llvm_unreachable("Bad flags!"); 696 case InlineAsm::Kind_RegDef: 697 case InlineAsm::Kind_RegUse: 698 case InlineAsm::Kind_Imm: 699 case InlineAsm::Kind_Clobber: 700 case InlineAsm::Kind_Mem: { 701 for (; NumVals; --NumVals, ++i) {} 702 break; 703 } 704 case InlineAsm::Kind_RegDefEarlyClobber: { 705 for (; NumVals; --NumVals, ++i) { 706 unsigned Reg = 707 cast<RegisterSDNode>(Node->getOperand(i))->getReg(); 708 709 // Check it to be lr 710 if (Reg == TM.getRegisterInfo()->getRARegister()) { 711 FuncInfo->setHasClobberLR(true); 712 break; 713 } 714 } 715 break; 716 } 717 } 718 } 719 } 720 } // Node->getOpcode 721 return Op; 722} 723 724 725// 726// Taken from the XCore backend. 727// 728SDValue HexagonTargetLowering:: 729LowerBR_JT(SDValue Op, SelectionDAG &DAG) const 730{ 731 SDValue Chain = Op.getOperand(0); 732 SDValue Table = Op.getOperand(1); 733 SDValue Index = Op.getOperand(2); 734 SDLoc dl(Op); 735 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table); 736 unsigned JTI = JT->getIndex(); 737 MachineFunction &MF = DAG.getMachineFunction(); 738 const MachineJumpTableInfo *MJTI = MF.getJumpTableInfo(); 739 SDValue TargetJT = DAG.getTargetJumpTable(JT->getIndex(), MVT::i32); 740 741 // Mark all jump table targets as address taken. 742 const std::vector<MachineJumpTableEntry> &JTE = MJTI->getJumpTables(); 743 const std::vector<MachineBasicBlock*> &JTBBs = JTE[JTI].MBBs; 744 for (unsigned i = 0, e = JTBBs.size(); i != e; ++i) { 745 MachineBasicBlock *MBB = JTBBs[i]; 746 MBB->setHasAddressTaken(); 747 // This line is needed to set the hasAddressTaken flag on the BasicBlock 748 // object. 749 BlockAddress::get(const_cast<BasicBlock *>(MBB->getBasicBlock())); 750 } 751 752 SDValue JumpTableBase = DAG.getNode(HexagonISD::WrapperJT, dl, 753 getPointerTy(), TargetJT); 754 SDValue ShiftIndex = DAG.getNode(ISD::SHL, dl, MVT::i32, Index, 755 DAG.getConstant(2, MVT::i32)); 756 SDValue JTAddress = DAG.getNode(ISD::ADD, dl, MVT::i32, JumpTableBase, 757 ShiftIndex); 758 SDValue LoadTarget = DAG.getLoad(MVT::i32, dl, Chain, JTAddress, 759 MachinePointerInfo(), false, false, false, 760 0); 761 return DAG.getNode(HexagonISD::BR_JT, dl, MVT::Other, Chain, LoadTarget); 762} 763 764 765SDValue 766HexagonTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 767 SelectionDAG &DAG) const { 768 SDValue Chain = Op.getOperand(0); 769 SDValue Size = Op.getOperand(1); 770 SDLoc dl(Op); 771 772 unsigned SPReg = getStackPointerRegisterToSaveRestore(); 773 774 // Get a reference to the stack pointer. 775 SDValue StackPointer = DAG.getCopyFromReg(Chain, dl, SPReg, MVT::i32); 776 777 // Subtract the dynamic size from the actual stack size to 778 // obtain the new stack size. 779 SDValue Sub = DAG.getNode(ISD::SUB, dl, MVT::i32, StackPointer, Size); 780 781 // 782 // For Hexagon, the outgoing memory arguments area should be on top of the 783 // alloca area on the stack i.e., the outgoing memory arguments should be 784 // at a lower address than the alloca area. Move the alloca area down the 785 // stack by adding back the space reserved for outgoing arguments to SP 786 // here. 787 // 788 // We do not know what the size of the outgoing args is at this point. 789 // So, we add a pseudo instruction ADJDYNALLOC that will adjust the 790 // stack pointer. We patch this instruction with the correct, known 791 // offset in emitPrologue(). 792 // 793 // Use a placeholder immediate (zero) for now. This will be patched up 794 // by emitPrologue(). 795 SDValue ArgAdjust = DAG.getNode(HexagonISD::ADJDYNALLOC, dl, 796 MVT::i32, 797 Sub, 798 DAG.getConstant(0, MVT::i32)); 799 800 // The Sub result contains the new stack start address, so it 801 // must be placed in the stack pointer register. 802 SDValue CopyChain = DAG.getCopyToReg(Chain, dl, 803 TM.getRegisterInfo()->getStackRegister(), 804 Sub); 805 806 SDValue Ops[2] = { ArgAdjust, CopyChain }; 807 return DAG.getMergeValues(Ops, 2, dl); 808} 809 810SDValue 811HexagonTargetLowering::LowerFormalArguments(SDValue Chain, 812 CallingConv::ID CallConv, 813 bool isVarArg, 814 const 815 SmallVectorImpl<ISD::InputArg> &Ins, 816 SDLoc dl, SelectionDAG &DAG, 817 SmallVectorImpl<SDValue> &InVals) 818const { 819 820 MachineFunction &MF = DAG.getMachineFunction(); 821 MachineFrameInfo *MFI = MF.getFrameInfo(); 822 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 823 HexagonMachineFunctionInfo *FuncInfo = 824 MF.getInfo<HexagonMachineFunctionInfo>(); 825 826 827 // Assign locations to all of the incoming arguments. 828 SmallVector<CCValAssign, 16> ArgLocs; 829 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 830 getTargetMachine(), ArgLocs, *DAG.getContext()); 831 832 CCInfo.AnalyzeFormalArguments(Ins, CC_Hexagon); 833 834 // For LLVM, in the case when returning a struct by value (>8byte), 835 // the first argument is a pointer that points to the location on caller's 836 // stack where the return value will be stored. For Hexagon, the location on 837 // caller's stack is passed only when the struct size is smaller than (and 838 // equal to) 8 bytes. If not, no address will be passed into callee and 839 // callee return the result direclty through R0/R1. 840 841 SmallVector<SDValue, 4> MemOps; 842 843 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 844 CCValAssign &VA = ArgLocs[i]; 845 ISD::ArgFlagsTy Flags = Ins[i].Flags; 846 unsigned ObjSize; 847 unsigned StackLocation; 848 int FI; 849 850 if ( (VA.isRegLoc() && !Flags.isByVal()) 851 || (VA.isRegLoc() && Flags.isByVal() && Flags.getByValSize() > 8)) { 852 // Arguments passed in registers 853 // 1. int, long long, ptr args that get allocated in register. 854 // 2. Large struct that gets an register to put its address in. 855 EVT RegVT = VA.getLocVT(); 856 if (RegVT == MVT::i8 || RegVT == MVT::i16 || 857 RegVT == MVT::i32 || RegVT == MVT::f32) { 858 unsigned VReg = 859 RegInfo.createVirtualRegister(&Hexagon::IntRegsRegClass); 860 RegInfo.addLiveIn(VA.getLocReg(), VReg); 861 InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT)); 862 } else if (RegVT == MVT::i64) { 863 unsigned VReg = 864 RegInfo.createVirtualRegister(&Hexagon::DoubleRegsRegClass); 865 RegInfo.addLiveIn(VA.getLocReg(), VReg); 866 InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT)); 867 } else { 868 assert (0); 869 } 870 } else if (VA.isRegLoc() && Flags.isByVal() && Flags.getByValSize() <= 8) { 871 assert (0 && "ByValSize must be bigger than 8 bytes"); 872 } else { 873 // Sanity check. 874 assert(VA.isMemLoc()); 875 876 if (Flags.isByVal()) { 877 // If it's a byval parameter, then we need to compute the 878 // "real" size, not the size of the pointer. 879 ObjSize = Flags.getByValSize(); 880 } else { 881 ObjSize = VA.getLocVT().getStoreSizeInBits() >> 3; 882 } 883 884 StackLocation = HEXAGON_LRFP_SIZE + VA.getLocMemOffset(); 885 // Create the frame index object for this incoming parameter... 886 FI = MFI->CreateFixedObject(ObjSize, StackLocation, true); 887 888 // Create the SelectionDAG nodes cordl, responding to a load 889 // from this parameter. 890 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 891 892 if (Flags.isByVal()) { 893 // If it's a pass-by-value aggregate, then do not dereference the stack 894 // location. Instead, we should generate a reference to the stack 895 // location. 896 InVals.push_back(FIN); 897 } else { 898 InVals.push_back(DAG.getLoad(VA.getLocVT(), dl, Chain, FIN, 899 MachinePointerInfo(), false, false, 900 false, 0)); 901 } 902 } 903 } 904 905 if (!MemOps.empty()) 906 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &MemOps[0], 907 MemOps.size()); 908 909 if (isVarArg) { 910 // This will point to the next argument passed via stack. 911 int FrameIndex = MFI->CreateFixedObject(Hexagon_PointerSize, 912 HEXAGON_LRFP_SIZE + 913 CCInfo.getNextStackOffset(), 914 true); 915 FuncInfo->setVarArgsFrameIndex(FrameIndex); 916 } 917 918 return Chain; 919} 920 921SDValue 922HexagonTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { 923 // VASTART stores the address of the VarArgsFrameIndex slot into the 924 // memory location argument. 925 MachineFunction &MF = DAG.getMachineFunction(); 926 HexagonMachineFunctionInfo *QFI = MF.getInfo<HexagonMachineFunctionInfo>(); 927 SDValue Addr = DAG.getFrameIndex(QFI->getVarArgsFrameIndex(), MVT::i32); 928 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 929 return DAG.getStore(Op.getOperand(0), SDLoc(Op), Addr, 930 Op.getOperand(1), MachinePointerInfo(SV), false, 931 false, 0); 932} 933 934SDValue 935HexagonTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 936 SDValue LHS = Op.getOperand(0); 937 SDValue RHS = Op.getOperand(1); 938 SDValue CC = Op.getOperand(4); 939 SDValue TrueVal = Op.getOperand(2); 940 SDValue FalseVal = Op.getOperand(3); 941 SDLoc dl(Op); 942 SDNode* OpNode = Op.getNode(); 943 EVT SVT = OpNode->getValueType(0); 944 945 SDValue Cond = DAG.getNode(ISD::SETCC, dl, MVT::i1, LHS, RHS, CC); 946 return DAG.getNode(ISD::SELECT, dl, SVT, Cond, TrueVal, FalseVal); 947} 948 949SDValue 950HexagonTargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const { 951 EVT ValTy = Op.getValueType(); 952 SDLoc dl(Op); 953 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 954 SDValue Res; 955 if (CP->isMachineConstantPoolEntry()) 956 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), ValTy, 957 CP->getAlignment()); 958 else 959 Res = DAG.getTargetConstantPool(CP->getConstVal(), ValTy, 960 CP->getAlignment()); 961 return DAG.getNode(HexagonISD::CONST32, dl, ValTy, Res); 962} 963 964SDValue 965HexagonTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const { 966 const TargetRegisterInfo *TRI = TM.getRegisterInfo(); 967 MachineFunction &MF = DAG.getMachineFunction(); 968 MachineFrameInfo *MFI = MF.getFrameInfo(); 969 MFI->setReturnAddressIsTaken(true); 970 971 EVT VT = Op.getValueType(); 972 SDLoc dl(Op); 973 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 974 if (Depth) { 975 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 976 SDValue Offset = DAG.getConstant(4, MVT::i32); 977 return DAG.getLoad(VT, dl, DAG.getEntryNode(), 978 DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset), 979 MachinePointerInfo(), false, false, false, 0); 980 } 981 982 // Return LR, which contains the return address. Mark it an implicit live-in. 983 unsigned Reg = MF.addLiveIn(TRI->getRARegister(), getRegClassFor(MVT::i32)); 984 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT); 985} 986 987SDValue 988HexagonTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { 989 const HexagonRegisterInfo *TRI = TM.getRegisterInfo(); 990 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 991 MFI->setFrameAddressIsTaken(true); 992 993 EVT VT = Op.getValueType(); 994 SDLoc dl(Op); 995 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 996 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, 997 TRI->getFrameRegister(), VT); 998 while (Depth--) 999 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, 1000 MachinePointerInfo(), 1001 false, false, false, 0); 1002 return FrameAddr; 1003} 1004 1005SDValue HexagonTargetLowering::LowerATOMIC_FENCE(SDValue Op, 1006 SelectionDAG& DAG) const { 1007 SDLoc dl(Op); 1008 return DAG.getNode(HexagonISD::BARRIER, dl, MVT::Other, Op.getOperand(0)); 1009} 1010 1011 1012SDValue HexagonTargetLowering::LowerGLOBALADDRESS(SDValue Op, 1013 SelectionDAG &DAG) const { 1014 SDValue Result; 1015 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 1016 int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset(); 1017 SDLoc dl(Op); 1018 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), Offset); 1019 1020 const HexagonTargetObjectFile &TLOF = 1021 static_cast<const HexagonTargetObjectFile &>(getObjFileLowering()); 1022 if (TLOF.IsGlobalInSmallSection(GV, getTargetMachine())) { 1023 return DAG.getNode(HexagonISD::CONST32_GP, dl, getPointerTy(), Result); 1024 } 1025 1026 return DAG.getNode(HexagonISD::CONST32, dl, getPointerTy(), Result); 1027} 1028 1029SDValue 1030HexagonTargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const { 1031 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 1032 SDValue BA_SD = DAG.getTargetBlockAddress(BA, MVT::i32); 1033 SDLoc dl(Op); 1034 return DAG.getNode(HexagonISD::CONST32_GP, dl, getPointerTy(), BA_SD); 1035} 1036 1037//===----------------------------------------------------------------------===// 1038// TargetLowering Implementation 1039//===----------------------------------------------------------------------===// 1040 1041HexagonTargetLowering::HexagonTargetLowering(HexagonTargetMachine 1042 &targetmachine) 1043 : TargetLowering(targetmachine, new HexagonTargetObjectFile()), 1044 TM(targetmachine) { 1045 1046 const HexagonRegisterInfo* QRI = TM.getRegisterInfo(); 1047 1048 // Set up the register classes. 1049 addRegisterClass(MVT::i32, &Hexagon::IntRegsRegClass); 1050 addRegisterClass(MVT::i64, &Hexagon::DoubleRegsRegClass); 1051 1052 if (QRI->Subtarget.hasV5TOps()) { 1053 addRegisterClass(MVT::f32, &Hexagon::IntRegsRegClass); 1054 addRegisterClass(MVT::f64, &Hexagon::DoubleRegsRegClass); 1055 } 1056 1057 addRegisterClass(MVT::i1, &Hexagon::PredRegsRegClass); 1058 1059 computeRegisterProperties(); 1060 1061 // Align loop entry 1062 setPrefLoopAlignment(4); 1063 1064 // Limits for inline expansion of memcpy/memmove 1065 MaxStoresPerMemcpy = 6; 1066 MaxStoresPerMemmove = 6; 1067 1068 // 1069 // Library calls for unsupported operations 1070 // 1071 1072 setLibcallName(RTLIB::SINTTOFP_I128_F64, "__hexagon_floattidf"); 1073 setLibcallName(RTLIB::SINTTOFP_I128_F32, "__hexagon_floattisf"); 1074 1075 setLibcallName(RTLIB::FPTOUINT_F32_I128, "__hexagon_fixunssfti"); 1076 setLibcallName(RTLIB::FPTOUINT_F64_I128, "__hexagon_fixunsdfti"); 1077 1078 setLibcallName(RTLIB::FPTOSINT_F32_I128, "__hexagon_fixsfti"); 1079 setLibcallName(RTLIB::FPTOSINT_F64_I128, "__hexagon_fixdfti"); 1080 1081 setLibcallName(RTLIB::SDIV_I32, "__hexagon_divsi3"); 1082 setOperationAction(ISD::SDIV, MVT::i32, Expand); 1083 setLibcallName(RTLIB::SREM_I32, "__hexagon_umodsi3"); 1084 setOperationAction(ISD::SREM, MVT::i32, Expand); 1085 1086 setLibcallName(RTLIB::SDIV_I64, "__hexagon_divdi3"); 1087 setOperationAction(ISD::SDIV, MVT::i64, Expand); 1088 setLibcallName(RTLIB::SREM_I64, "__hexagon_moddi3"); 1089 setOperationAction(ISD::SREM, MVT::i64, Expand); 1090 1091 setLibcallName(RTLIB::UDIV_I32, "__hexagon_udivsi3"); 1092 setOperationAction(ISD::UDIV, MVT::i32, Expand); 1093 1094 setLibcallName(RTLIB::UDIV_I64, "__hexagon_udivdi3"); 1095 setOperationAction(ISD::UDIV, MVT::i64, Expand); 1096 1097 setLibcallName(RTLIB::UREM_I32, "__hexagon_umodsi3"); 1098 setOperationAction(ISD::UREM, MVT::i32, Expand); 1099 1100 setLibcallName(RTLIB::UREM_I64, "__hexagon_umoddi3"); 1101 setOperationAction(ISD::UREM, MVT::i64, Expand); 1102 1103 setLibcallName(RTLIB::DIV_F32, "__hexagon_divsf3"); 1104 setOperationAction(ISD::FDIV, MVT::f32, Expand); 1105 1106 setLibcallName(RTLIB::DIV_F64, "__hexagon_divdf3"); 1107 setOperationAction(ISD::FDIV, MVT::f64, Expand); 1108 1109 setOperationAction(ISD::FSQRT, MVT::f32, Expand); 1110 setOperationAction(ISD::FSQRT, MVT::f64, Expand); 1111 setOperationAction(ISD::FSIN, MVT::f32, Expand); 1112 setOperationAction(ISD::FSIN, MVT::f64, Expand); 1113 1114 if (QRI->Subtarget.hasV5TOps()) { 1115 // Hexagon V5 Support. 1116 setOperationAction(ISD::FADD, MVT::f32, Legal); 1117 setOperationAction(ISD::FADD, MVT::f64, Legal); 1118 setOperationAction(ISD::FP_EXTEND, MVT::f32, Legal); 1119 setCondCodeAction(ISD::SETOEQ, MVT::f32, Legal); 1120 setCondCodeAction(ISD::SETOEQ, MVT::f64, Legal); 1121 setCondCodeAction(ISD::SETUEQ, MVT::f32, Legal); 1122 setCondCodeAction(ISD::SETUEQ, MVT::f64, Legal); 1123 1124 setCondCodeAction(ISD::SETOGE, MVT::f32, Legal); 1125 setCondCodeAction(ISD::SETOGE, MVT::f64, Legal); 1126 setCondCodeAction(ISD::SETUGE, MVT::f32, Legal); 1127 setCondCodeAction(ISD::SETUGE, MVT::f64, Legal); 1128 1129 setCondCodeAction(ISD::SETOGT, MVT::f32, Legal); 1130 setCondCodeAction(ISD::SETOGT, MVT::f64, Legal); 1131 setCondCodeAction(ISD::SETUGT, MVT::f32, Legal); 1132 setCondCodeAction(ISD::SETUGT, MVT::f64, Legal); 1133 1134 setCondCodeAction(ISD::SETOLE, MVT::f32, Legal); 1135 setCondCodeAction(ISD::SETOLE, MVT::f64, Legal); 1136 setCondCodeAction(ISD::SETOLT, MVT::f32, Legal); 1137 setCondCodeAction(ISD::SETOLT, MVT::f64, Legal); 1138 1139 setOperationAction(ISD::ConstantFP, MVT::f32, Legal); 1140 setOperationAction(ISD::ConstantFP, MVT::f64, Legal); 1141 1142 setOperationAction(ISD::FP_TO_UINT, MVT::i1, Promote); 1143 setOperationAction(ISD::FP_TO_SINT, MVT::i1, Promote); 1144 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote); 1145 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote); 1146 1147 setOperationAction(ISD::FP_TO_UINT, MVT::i8, Promote); 1148 setOperationAction(ISD::FP_TO_SINT, MVT::i8, Promote); 1149 setOperationAction(ISD::UINT_TO_FP, MVT::i8, Promote); 1150 setOperationAction(ISD::SINT_TO_FP, MVT::i8, Promote); 1151 1152 setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote); 1153 setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote); 1154 setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote); 1155 setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote); 1156 1157 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal); 1158 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal); 1159 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal); 1160 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal); 1161 1162 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Legal); 1163 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Legal); 1164 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Legal); 1165 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Legal); 1166 1167 setOperationAction(ISD::FABS, MVT::f32, Legal); 1168 setOperationAction(ISD::FABS, MVT::f64, Expand); 1169 1170 setOperationAction(ISD::FNEG, MVT::f32, Legal); 1171 setOperationAction(ISD::FNEG, MVT::f64, Expand); 1172 } else { 1173 1174 // Expand fp<->uint. 1175 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Expand); 1176 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); 1177 1178 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand); 1179 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); 1180 1181 setLibcallName(RTLIB::SINTTOFP_I64_F32, "__hexagon_floatdisf"); 1182 setLibcallName(RTLIB::UINTTOFP_I64_F32, "__hexagon_floatundisf"); 1183 1184 setLibcallName(RTLIB::UINTTOFP_I32_F32, "__hexagon_floatunsisf"); 1185 setLibcallName(RTLIB::SINTTOFP_I32_F32, "__hexagon_floatsisf"); 1186 1187 setLibcallName(RTLIB::SINTTOFP_I64_F64, "__hexagon_floatdidf"); 1188 setLibcallName(RTLIB::UINTTOFP_I64_F64, "__hexagon_floatundidf"); 1189 1190 setLibcallName(RTLIB::UINTTOFP_I32_F64, "__hexagon_floatunsidf"); 1191 setLibcallName(RTLIB::SINTTOFP_I32_F64, "__hexagon_floatsidf"); 1192 1193 setLibcallName(RTLIB::FPTOUINT_F32_I32, "__hexagon_fixunssfsi"); 1194 setLibcallName(RTLIB::FPTOUINT_F32_I64, "__hexagon_fixunssfdi"); 1195 1196 setLibcallName(RTLIB::FPTOSINT_F64_I64, "__hexagon_fixdfdi"); 1197 setLibcallName(RTLIB::FPTOSINT_F32_I64, "__hexagon_fixsfdi"); 1198 1199 setLibcallName(RTLIB::FPTOUINT_F64_I32, "__hexagon_fixunsdfsi"); 1200 setLibcallName(RTLIB::FPTOUINT_F64_I64, "__hexagon_fixunsdfdi"); 1201 1202 setLibcallName(RTLIB::ADD_F64, "__hexagon_adddf3"); 1203 setOperationAction(ISD::FADD, MVT::f64, Expand); 1204 1205 setLibcallName(RTLIB::ADD_F32, "__hexagon_addsf3"); 1206 setOperationAction(ISD::FADD, MVT::f32, Expand); 1207 1208 setLibcallName(RTLIB::FPEXT_F32_F64, "__hexagon_extendsfdf2"); 1209 setOperationAction(ISD::FP_EXTEND, MVT::f32, Expand); 1210 1211 setLibcallName(RTLIB::OEQ_F32, "__hexagon_eqsf2"); 1212 setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand); 1213 1214 setLibcallName(RTLIB::OEQ_F64, "__hexagon_eqdf2"); 1215 setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand); 1216 1217 setLibcallName(RTLIB::OGE_F32, "__hexagon_gesf2"); 1218 setCondCodeAction(ISD::SETOGE, MVT::f32, Expand); 1219 1220 setLibcallName(RTLIB::OGE_F64, "__hexagon_gedf2"); 1221 setCondCodeAction(ISD::SETOGE, MVT::f64, Expand); 1222 1223 setLibcallName(RTLIB::OGT_F32, "__hexagon_gtsf2"); 1224 setCondCodeAction(ISD::SETOGT, MVT::f32, Expand); 1225 1226 setLibcallName(RTLIB::OGT_F64, "__hexagon_gtdf2"); 1227 setCondCodeAction(ISD::SETOGT, MVT::f64, Expand); 1228 1229 setLibcallName(RTLIB::FPTOSINT_F64_I32, "__hexagon_fixdfsi"); 1230 setOperationAction(ISD::FP_TO_SINT, MVT::f64, Expand); 1231 1232 setLibcallName(RTLIB::FPTOSINT_F32_I32, "__hexagon_fixsfsi"); 1233 setOperationAction(ISD::FP_TO_SINT, MVT::f32, Expand); 1234 1235 setLibcallName(RTLIB::OLE_F64, "__hexagon_ledf2"); 1236 setCondCodeAction(ISD::SETOLE, MVT::f64, Expand); 1237 1238 setLibcallName(RTLIB::OLE_F32, "__hexagon_lesf2"); 1239 setCondCodeAction(ISD::SETOLE, MVT::f32, Expand); 1240 1241 setLibcallName(RTLIB::OLT_F64, "__hexagon_ltdf2"); 1242 setCondCodeAction(ISD::SETOLT, MVT::f64, Expand); 1243 1244 setLibcallName(RTLIB::OLT_F32, "__hexagon_ltsf2"); 1245 setCondCodeAction(ISD::SETOLT, MVT::f32, Expand); 1246 1247 setLibcallName(RTLIB::MUL_F64, "__hexagon_muldf3"); 1248 setOperationAction(ISD::FMUL, MVT::f64, Expand); 1249 1250 setLibcallName(RTLIB::MUL_F32, "__hexagon_mulsf3"); 1251 setOperationAction(ISD::MUL, MVT::f32, Expand); 1252 1253 setLibcallName(RTLIB::UNE_F64, "__hexagon_nedf2"); 1254 setCondCodeAction(ISD::SETUNE, MVT::f64, Expand); 1255 1256 setLibcallName(RTLIB::UNE_F32, "__hexagon_nesf2"); 1257 1258 setLibcallName(RTLIB::SUB_F64, "__hexagon_subdf3"); 1259 setOperationAction(ISD::SUB, MVT::f64, Expand); 1260 1261 setLibcallName(RTLIB::SUB_F32, "__hexagon_subsf3"); 1262 setOperationAction(ISD::SUB, MVT::f32, Expand); 1263 1264 setLibcallName(RTLIB::FPROUND_F64_F32, "__hexagon_truncdfsf2"); 1265 setOperationAction(ISD::FP_ROUND, MVT::f64, Expand); 1266 1267 setLibcallName(RTLIB::UO_F64, "__hexagon_unorddf2"); 1268 setCondCodeAction(ISD::SETUO, MVT::f64, Expand); 1269 1270 setLibcallName(RTLIB::O_F64, "__hexagon_unorddf2"); 1271 setCondCodeAction(ISD::SETO, MVT::f64, Expand); 1272 1273 setLibcallName(RTLIB::O_F32, "__hexagon_unordsf2"); 1274 setCondCodeAction(ISD::SETO, MVT::f32, Expand); 1275 1276 setLibcallName(RTLIB::UO_F32, "__hexagon_unordsf2"); 1277 setCondCodeAction(ISD::SETUO, MVT::f32, Expand); 1278 1279 setOperationAction(ISD::FABS, MVT::f32, Expand); 1280 setOperationAction(ISD::FABS, MVT::f64, Expand); 1281 setOperationAction(ISD::FNEG, MVT::f32, Expand); 1282 setOperationAction(ISD::FNEG, MVT::f64, Expand); 1283 } 1284 1285 setLibcallName(RTLIB::SREM_I32, "__hexagon_modsi3"); 1286 setOperationAction(ISD::SREM, MVT::i32, Expand); 1287 1288 setIndexedLoadAction(ISD::POST_INC, MVT::i8, Legal); 1289 setIndexedLoadAction(ISD::POST_INC, MVT::i16, Legal); 1290 setIndexedLoadAction(ISD::POST_INC, MVT::i32, Legal); 1291 setIndexedLoadAction(ISD::POST_INC, MVT::i64, Legal); 1292 1293 setIndexedStoreAction(ISD::POST_INC, MVT::i8, Legal); 1294 setIndexedStoreAction(ISD::POST_INC, MVT::i16, Legal); 1295 setIndexedStoreAction(ISD::POST_INC, MVT::i32, Legal); 1296 setIndexedStoreAction(ISD::POST_INC, MVT::i64, Legal); 1297 1298 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); 1299 1300 // Turn FP extload into load/fextend. 1301 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand); 1302 // Hexagon has a i1 sign extending load. 1303 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Expand); 1304 // Turn FP truncstore into trunc + store. 1305 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 1306 1307 // Custom legalize GlobalAddress nodes into CONST32. 1308 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 1309 setOperationAction(ISD::GlobalAddress, MVT::i8, Custom); 1310 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 1311 // Truncate action? 1312 setOperationAction(ISD::TRUNCATE, MVT::i64, Expand); 1313 1314 // Hexagon doesn't have sext_inreg, replace them with shl/sra. 1315 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand); 1316 1317 // Hexagon has no REM or DIVREM operations. 1318 setOperationAction(ISD::UREM, MVT::i32, Expand); 1319 setOperationAction(ISD::SREM, MVT::i32, Expand); 1320 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 1321 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 1322 setOperationAction(ISD::SREM, MVT::i64, Expand); 1323 setOperationAction(ISD::SDIVREM, MVT::i64, Expand); 1324 setOperationAction(ISD::UDIVREM, MVT::i64, Expand); 1325 1326 setOperationAction(ISD::BSWAP, MVT::i64, Expand); 1327 1328 // Lower SELECT_CC to SETCC and SELECT. 1329 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); 1330 setOperationAction(ISD::SELECT_CC, MVT::i64, Custom); 1331 1332 if (QRI->Subtarget.hasV5TOps()) { 1333 1334 // We need to make the operation type of SELECT node to be Custom, 1335 // such that we don't go into the infinite loop of 1336 // select -> setcc -> select_cc -> select loop. 1337 setOperationAction(ISD::SELECT, MVT::f32, Custom); 1338 setOperationAction(ISD::SELECT, MVT::f64, Custom); 1339 1340 setOperationAction(ISD::SELECT_CC, MVT::f32, Expand); 1341 setOperationAction(ISD::SELECT_CC, MVT::f64, Expand); 1342 setOperationAction(ISD::SELECT_CC, MVT::Other, Expand); 1343 1344 } else { 1345 1346 // Hexagon has no select or setcc: expand to SELECT_CC. 1347 setOperationAction(ISD::SELECT, MVT::f32, Expand); 1348 setOperationAction(ISD::SELECT, MVT::f64, Expand); 1349 1350 // This is a workaround documented in DAGCombiner.cpp:2892 We don't 1351 // support SELECT_CC on every type. 1352 setOperationAction(ISD::SELECT_CC, MVT::Other, Expand); 1353 1354 } 1355 1356 if (EmitJumpTables) { 1357 setOperationAction(ISD::BR_JT, MVT::Other, Custom); 1358 } else { 1359 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 1360 } 1361 // Increase jump tables cutover to 5, was 4. 1362 setMinimumJumpTableEntries(5); 1363 1364 setOperationAction(ISD::BR_CC, MVT::f32, Expand); 1365 setOperationAction(ISD::BR_CC, MVT::f64, Expand); 1366 setOperationAction(ISD::BR_CC, MVT::i1, Expand); 1367 setOperationAction(ISD::BR_CC, MVT::i32, Expand); 1368 setOperationAction(ISD::BR_CC, MVT::i64, Expand); 1369 1370 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom); 1371 1372 setOperationAction(ISD::FSIN , MVT::f64, Expand); 1373 setOperationAction(ISD::FCOS , MVT::f64, Expand); 1374 setOperationAction(ISD::FREM , MVT::f64, Expand); 1375 setOperationAction(ISD::FSIN , MVT::f32, Expand); 1376 setOperationAction(ISD::FCOS , MVT::f32, Expand); 1377 setOperationAction(ISD::FREM , MVT::f32, Expand); 1378 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 1379 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 1380 1381 // In V4, we have double word add/sub with carry. The problem with 1382 // modelling this instruction is that it produces 2 results - Rdd and Px. 1383 // To model update of Px, we will have to use Defs[p0..p3] which will 1384 // cause any predicate live range to spill. So, we pretend we dont't 1385 // have these instructions. 1386 setOperationAction(ISD::ADDE, MVT::i8, Expand); 1387 setOperationAction(ISD::ADDE, MVT::i16, Expand); 1388 setOperationAction(ISD::ADDE, MVT::i32, Expand); 1389 setOperationAction(ISD::ADDE, MVT::i64, Expand); 1390 setOperationAction(ISD::SUBE, MVT::i8, Expand); 1391 setOperationAction(ISD::SUBE, MVT::i16, Expand); 1392 setOperationAction(ISD::SUBE, MVT::i32, Expand); 1393 setOperationAction(ISD::SUBE, MVT::i64, Expand); 1394 setOperationAction(ISD::ADDC, MVT::i8, Expand); 1395 setOperationAction(ISD::ADDC, MVT::i16, Expand); 1396 setOperationAction(ISD::ADDC, MVT::i32, Expand); 1397 setOperationAction(ISD::ADDC, MVT::i64, Expand); 1398 setOperationAction(ISD::SUBC, MVT::i8, Expand); 1399 setOperationAction(ISD::SUBC, MVT::i16, Expand); 1400 setOperationAction(ISD::SUBC, MVT::i32, Expand); 1401 setOperationAction(ISD::SUBC, MVT::i64, Expand); 1402 1403 setOperationAction(ISD::CTPOP, MVT::i32, Expand); 1404 setOperationAction(ISD::CTPOP, MVT::i64, Expand); 1405 setOperationAction(ISD::CTTZ , MVT::i32, Expand); 1406 setOperationAction(ISD::CTTZ , MVT::i64, Expand); 1407 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand); 1408 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand); 1409 setOperationAction(ISD::CTLZ , MVT::i32, Expand); 1410 setOperationAction(ISD::CTLZ , MVT::i64, Expand); 1411 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand); 1412 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand); 1413 setOperationAction(ISD::ROTL , MVT::i32, Expand); 1414 setOperationAction(ISD::ROTR , MVT::i32, Expand); 1415 setOperationAction(ISD::BSWAP, MVT::i32, Expand); 1416 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 1417 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 1418 setOperationAction(ISD::FPOW , MVT::f64, Expand); 1419 setOperationAction(ISD::FPOW , MVT::f32, Expand); 1420 1421 setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand); 1422 setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand); 1423 setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand); 1424 1425 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 1426 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 1427 1428 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); 1429 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); 1430 1431 setOperationAction(ISD::EH_RETURN, MVT::Other, Custom); 1432 1433 if (TM.getSubtargetImpl()->isSubtargetV2()) { 1434 setExceptionPointerRegister(Hexagon::R20); 1435 setExceptionSelectorRegister(Hexagon::R21); 1436 } else { 1437 setExceptionPointerRegister(Hexagon::R0); 1438 setExceptionSelectorRegister(Hexagon::R1); 1439 } 1440 1441 // VASTART needs to be custom lowered to use the VarArgsFrameIndex. 1442 setOperationAction(ISD::VASTART , MVT::Other, Custom); 1443 1444 // Use the default implementation. 1445 setOperationAction(ISD::VAARG , MVT::Other, Expand); 1446 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 1447 setOperationAction(ISD::VAEND , MVT::Other, Expand); 1448 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand); 1449 setOperationAction(ISD::STACKRESTORE , MVT::Other, Expand); 1450 1451 1452 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom); 1453 setOperationAction(ISD::INLINEASM , MVT::Other, Custom); 1454 1455 setMinFunctionAlignment(2); 1456 1457 // Needed for DYNAMIC_STACKALLOC expansion. 1458 unsigned StackRegister = TM.getRegisterInfo()->getStackRegister(); 1459 setStackPointerRegisterToSaveRestore(StackRegister); 1460 setSchedulingPreference(Sched::VLIW); 1461} 1462 1463 1464const char* 1465HexagonTargetLowering::getTargetNodeName(unsigned Opcode) const { 1466 switch (Opcode) { 1467 default: return 0; 1468 case HexagonISD::CONST32: return "HexagonISD::CONST32"; 1469 case HexagonISD::CONST32_GP: return "HexagonISD::CONST32_GP"; 1470 case HexagonISD::CONST32_Int_Real: return "HexagonISD::CONST32_Int_Real"; 1471 case HexagonISD::ADJDYNALLOC: return "HexagonISD::ADJDYNALLOC"; 1472 case HexagonISD::CMPICC: return "HexagonISD::CMPICC"; 1473 case HexagonISD::CMPFCC: return "HexagonISD::CMPFCC"; 1474 case HexagonISD::BRICC: return "HexagonISD::BRICC"; 1475 case HexagonISD::BRFCC: return "HexagonISD::BRFCC"; 1476 case HexagonISD::SELECT_ICC: return "HexagonISD::SELECT_ICC"; 1477 case HexagonISD::SELECT_FCC: return "HexagonISD::SELECT_FCC"; 1478 case HexagonISD::Hi: return "HexagonISD::Hi"; 1479 case HexagonISD::Lo: return "HexagonISD::Lo"; 1480 case HexagonISD::FTOI: return "HexagonISD::FTOI"; 1481 case HexagonISD::ITOF: return "HexagonISD::ITOF"; 1482 case HexagonISD::CALL: return "HexagonISD::CALL"; 1483 case HexagonISD::RET_FLAG: return "HexagonISD::RET_FLAG"; 1484 case HexagonISD::BR_JT: return "HexagonISD::BR_JT"; 1485 case HexagonISD::TC_RETURN: return "HexagonISD::TC_RETURN"; 1486 case HexagonISD::EH_RETURN: return "HexagonISD::EH_RETURN"; 1487 } 1488} 1489 1490bool 1491HexagonTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const { 1492 EVT MTy1 = EVT::getEVT(Ty1); 1493 EVT MTy2 = EVT::getEVT(Ty2); 1494 if (!MTy1.isSimple() || !MTy2.isSimple()) { 1495 return false; 1496 } 1497 return ((MTy1.getSimpleVT() == MVT::i64) && (MTy2.getSimpleVT() == MVT::i32)); 1498} 1499 1500bool HexagonTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { 1501 if (!VT1.isSimple() || !VT2.isSimple()) { 1502 return false; 1503 } 1504 return ((VT1.getSimpleVT() == MVT::i64) && (VT2.getSimpleVT() == MVT::i32)); 1505} 1506 1507bool 1508HexagonTargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const { 1509 // Assuming the caller does not have either a signext or zeroext modifier, and 1510 // only one value is accepted, any reasonable truncation is allowed. 1511 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) 1512 return false; 1513 1514 // FIXME: in principle up to 64-bit could be made safe, but it would be very 1515 // fragile at the moment: any support for multiple value returns would be 1516 // liable to disallow tail calls involving i64 -> iN truncation in many cases. 1517 return Ty1->getPrimitiveSizeInBits() <= 32; 1518} 1519 1520SDValue 1521HexagonTargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const { 1522 SDValue Chain = Op.getOperand(0); 1523 SDValue Offset = Op.getOperand(1); 1524 SDValue Handler = Op.getOperand(2); 1525 SDLoc dl(Op); 1526 1527 // Mark function as containing a call to EH_RETURN. 1528 HexagonMachineFunctionInfo *FuncInfo = 1529 DAG.getMachineFunction().getInfo<HexagonMachineFunctionInfo>(); 1530 FuncInfo->setHasEHReturn(); 1531 1532 unsigned OffsetReg = Hexagon::R28; 1533 1534 SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), 1535 DAG.getRegister(Hexagon::R30, getPointerTy()), 1536 DAG.getIntPtrConstant(4)); 1537 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo(), 1538 false, false, 0); 1539 Chain = DAG.getCopyToReg(Chain, dl, OffsetReg, Offset); 1540 1541 // Not needed we already use it as explict input to EH_RETURN. 1542 // MF.getRegInfo().addLiveOut(OffsetReg); 1543 1544 return DAG.getNode(HexagonISD::EH_RETURN, dl, MVT::Other, Chain); 1545} 1546 1547SDValue 1548HexagonTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 1549 switch (Op.getOpcode()) { 1550 default: llvm_unreachable("Should not custom lower this!"); 1551 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 1552 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG); 1553 // Frame & Return address. Currently unimplemented. 1554 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 1555 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 1556 case ISD::GlobalTLSAddress: 1557 llvm_unreachable("TLS not implemented for Hexagon."); 1558 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG); 1559 case ISD::GlobalAddress: return LowerGLOBALADDRESS(Op, DAG); 1560 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 1561 case ISD::VASTART: return LowerVASTART(Op, DAG); 1562 case ISD::BR_JT: return LowerBR_JT(Op, DAG); 1563 1564 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); 1565 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 1566 case ISD::SELECT: return Op; 1567 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 1568 case ISD::INLINEASM: return LowerINLINEASM(Op, DAG); 1569 1570 } 1571} 1572 1573 1574 1575//===----------------------------------------------------------------------===// 1576// Hexagon Scheduler Hooks 1577//===----------------------------------------------------------------------===// 1578MachineBasicBlock * 1579HexagonTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 1580 MachineBasicBlock *BB) 1581const { 1582 switch (MI->getOpcode()) { 1583 case Hexagon::ADJDYNALLOC: { 1584 MachineFunction *MF = BB->getParent(); 1585 HexagonMachineFunctionInfo *FuncInfo = 1586 MF->getInfo<HexagonMachineFunctionInfo>(); 1587 FuncInfo->addAllocaAdjustInst(MI); 1588 return BB; 1589 } 1590 default: llvm_unreachable("Unexpected instr type to insert"); 1591 } // switch 1592} 1593 1594//===----------------------------------------------------------------------===// 1595// Inline Assembly Support 1596//===----------------------------------------------------------------------===// 1597 1598std::pair<unsigned, const TargetRegisterClass*> 1599HexagonTargetLowering::getRegForInlineAsmConstraint(const 1600 std::string &Constraint, 1601 MVT VT) const { 1602 if (Constraint.size() == 1) { 1603 switch (Constraint[0]) { 1604 case 'r': // R0-R31 1605 switch (VT.SimpleTy) { 1606 default: 1607 llvm_unreachable("getRegForInlineAsmConstraint Unhandled data type"); 1608 case MVT::i32: 1609 case MVT::i16: 1610 case MVT::i8: 1611 case MVT::f32: 1612 return std::make_pair(0U, &Hexagon::IntRegsRegClass); 1613 case MVT::i64: 1614 case MVT::f64: 1615 return std::make_pair(0U, &Hexagon::DoubleRegsRegClass); 1616 } 1617 default: 1618 llvm_unreachable("Unknown asm register class"); 1619 } 1620 } 1621 1622 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 1623} 1624 1625/// isFPImmLegal - Returns true if the target can instruction select the 1626/// specified FP immediate natively. If false, the legalizer will 1627/// materialize the FP immediate as a load from a constant pool. 1628bool HexagonTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 1629 const HexagonRegisterInfo* QRI = TM.getRegisterInfo(); 1630 return QRI->Subtarget.hasV5TOps(); 1631} 1632 1633/// isLegalAddressingMode - Return true if the addressing mode represented by 1634/// AM is legal for this target, for a load/store of the specified type. 1635bool HexagonTargetLowering::isLegalAddressingMode(const AddrMode &AM, 1636 Type *Ty) const { 1637 // Allows a signed-extended 11-bit immediate field. 1638 if (AM.BaseOffs <= -(1LL << 13) || AM.BaseOffs >= (1LL << 13)-1) { 1639 return false; 1640 } 1641 1642 // No global is ever allowed as a base. 1643 if (AM.BaseGV) { 1644 return false; 1645 } 1646 1647 int Scale = AM.Scale; 1648 if (Scale < 0) Scale = -Scale; 1649 switch (Scale) { 1650 case 0: // No scale reg, "r+i", "r", or just "i". 1651 break; 1652 default: // No scaled addressing mode. 1653 return false; 1654 } 1655 return true; 1656} 1657 1658/// isLegalICmpImmediate - Return true if the specified immediate is legal 1659/// icmp immediate, that is the target has icmp instructions which can compare 1660/// a register against the immediate without having to materialize the 1661/// immediate into a register. 1662bool HexagonTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 1663 return Imm >= -512 && Imm <= 511; 1664} 1665 1666/// IsEligibleForTailCallOptimization - Check whether the call is eligible 1667/// for tail call optimization. Targets which want to do tail call 1668/// optimization should implement this function. 1669bool HexagonTargetLowering::IsEligibleForTailCallOptimization( 1670 SDValue Callee, 1671 CallingConv::ID CalleeCC, 1672 bool isVarArg, 1673 bool isCalleeStructRet, 1674 bool isCallerStructRet, 1675 const SmallVectorImpl<ISD::OutputArg> &Outs, 1676 const SmallVectorImpl<SDValue> &OutVals, 1677 const SmallVectorImpl<ISD::InputArg> &Ins, 1678 SelectionDAG& DAG) const { 1679 const Function *CallerF = DAG.getMachineFunction().getFunction(); 1680 CallingConv::ID CallerCC = CallerF->getCallingConv(); 1681 bool CCMatch = CallerCC == CalleeCC; 1682 1683 // *************************************************************************** 1684 // Look for obvious safe cases to perform tail call optimization that do not 1685 // require ABI changes. 1686 // *************************************************************************** 1687 1688 // If this is a tail call via a function pointer, then don't do it! 1689 if (!(dyn_cast<GlobalAddressSDNode>(Callee)) 1690 && !(dyn_cast<ExternalSymbolSDNode>(Callee))) { 1691 return false; 1692 } 1693 1694 // Do not optimize if the calling conventions do not match. 1695 if (!CCMatch) 1696 return false; 1697 1698 // Do not tail call optimize vararg calls. 1699 if (isVarArg) 1700 return false; 1701 1702 // Also avoid tail call optimization if either caller or callee uses struct 1703 // return semantics. 1704 if (isCalleeStructRet || isCallerStructRet) 1705 return false; 1706 1707 // In addition to the cases above, we also disable Tail Call Optimization if 1708 // the calling convention code that at least one outgoing argument needs to 1709 // go on the stack. We cannot check that here because at this point that 1710 // information is not available. 1711 return true; 1712} 1713