XCoreISelLowering.cpp revision 8b99622b9b0902c709a33a07efb3461bc7830852
1//===-- XCoreISelLowering.cpp - XCore DAG Lowering Implementation ---------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the XCoreTargetLowering class. 11// 12//===----------------------------------------------------------------------===// 13 14#define DEBUG_TYPE "xcore-lower" 15 16#include "XCoreISelLowering.h" 17#include "XCore.h" 18#include "XCoreMachineFunctionInfo.h" 19#include "XCoreSubtarget.h" 20#include "XCoreTargetMachine.h" 21#include "XCoreTargetObjectFile.h" 22#include "llvm/CodeGen/CallingConvLower.h" 23#include "llvm/CodeGen/MachineFrameInfo.h" 24#include "llvm/CodeGen/MachineFunction.h" 25#include "llvm/CodeGen/MachineInstrBuilder.h" 26#include "llvm/CodeGen/MachineJumpTableInfo.h" 27#include "llvm/CodeGen/MachineRegisterInfo.h" 28#include "llvm/CodeGen/SelectionDAGISel.h" 29#include "llvm/CodeGen/ValueTypes.h" 30#include "llvm/IR/CallingConv.h" 31#include "llvm/IR/DerivedTypes.h" 32#include "llvm/IR/Function.h" 33#include "llvm/IR/GlobalAlias.h" 34#include "llvm/IR/GlobalVariable.h" 35#include "llvm/IR/Intrinsics.h" 36#include "llvm/Support/Debug.h" 37#include "llvm/Support/ErrorHandling.h" 38#include "llvm/Support/raw_ostream.h" 39#include <algorithm> 40 41using namespace llvm; 42 43const char *XCoreTargetLowering:: 44getTargetNodeName(unsigned Opcode) const 45{ 46 switch (Opcode) 47 { 48 case XCoreISD::BL : return "XCoreISD::BL"; 49 case XCoreISD::PCRelativeWrapper : return "XCoreISD::PCRelativeWrapper"; 50 case XCoreISD::DPRelativeWrapper : return "XCoreISD::DPRelativeWrapper"; 51 case XCoreISD::CPRelativeWrapper : return "XCoreISD::CPRelativeWrapper"; 52 case XCoreISD::STWSP : return "XCoreISD::STWSP"; 53 case XCoreISD::RETSP : return "XCoreISD::RETSP"; 54 case XCoreISD::LADD : return "XCoreISD::LADD"; 55 case XCoreISD::LSUB : return "XCoreISD::LSUB"; 56 case XCoreISD::LMUL : return "XCoreISD::LMUL"; 57 case XCoreISD::MACCU : return "XCoreISD::MACCU"; 58 case XCoreISD::MACCS : return "XCoreISD::MACCS"; 59 case XCoreISD::CRC8 : return "XCoreISD::CRC8"; 60 case XCoreISD::BR_JT : return "XCoreISD::BR_JT"; 61 case XCoreISD::BR_JT32 : return "XCoreISD::BR_JT32"; 62 case XCoreISD::MEMBARRIER : return "XCoreISD::MEMBARRIER"; 63 default : return NULL; 64 } 65} 66 67XCoreTargetLowering::XCoreTargetLowering(XCoreTargetMachine &XTM) 68 : TargetLowering(XTM, new XCoreTargetObjectFile()), 69 TM(XTM), 70 Subtarget(*XTM.getSubtargetImpl()) { 71 72 // Set up the register classes. 73 addRegisterClass(MVT::i32, &XCore::GRRegsRegClass); 74 75 // Compute derived properties from the register classes 76 computeRegisterProperties(); 77 78 // Division is expensive 79 setIntDivIsCheap(false); 80 81 setStackPointerRegisterToSaveRestore(XCore::SP); 82 83 setSchedulingPreference(Sched::Source); 84 85 // Use i32 for setcc operations results (slt, sgt, ...). 86 setBooleanContents(ZeroOrOneBooleanContent); 87 setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct? 88 89 // XCore does not have the NodeTypes below. 90 setOperationAction(ISD::BR_CC, MVT::i32, Expand); 91 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); 92 setOperationAction(ISD::ADDC, MVT::i32, Expand); 93 setOperationAction(ISD::ADDE, MVT::i32, Expand); 94 setOperationAction(ISD::SUBC, MVT::i32, Expand); 95 setOperationAction(ISD::SUBE, MVT::i32, Expand); 96 97 // Stop the combiner recombining select and set_cc 98 setOperationAction(ISD::SELECT_CC, MVT::Other, Expand); 99 100 // 64bit 101 setOperationAction(ISD::ADD, MVT::i64, Custom); 102 setOperationAction(ISD::SUB, MVT::i64, Custom); 103 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Custom); 104 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Custom); 105 setOperationAction(ISD::MULHS, MVT::i32, Expand); 106 setOperationAction(ISD::MULHU, MVT::i32, Expand); 107 setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand); 108 setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand); 109 setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand); 110 111 // Bit Manipulation 112 setOperationAction(ISD::CTPOP, MVT::i32, Expand); 113 setOperationAction(ISD::ROTL , MVT::i32, Expand); 114 setOperationAction(ISD::ROTR , MVT::i32, Expand); 115 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand); 116 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand); 117 118 setOperationAction(ISD::TRAP, MVT::Other, Legal); 119 120 // Jump tables. 121 setOperationAction(ISD::BR_JT, MVT::Other, Custom); 122 123 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 124 setOperationAction(ISD::BlockAddress, MVT::i32 , Custom); 125 126 // Conversion of i64 -> double produces constantpool nodes 127 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 128 129 // Loads 130 setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote); 131 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote); 132 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 133 134 setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Expand); 135 setLoadExtAction(ISD::ZEXTLOAD, MVT::i16, Expand); 136 137 // Custom expand misaligned loads / stores. 138 setOperationAction(ISD::LOAD, MVT::i32, Custom); 139 setOperationAction(ISD::STORE, MVT::i32, Custom); 140 141 // Varargs 142 setOperationAction(ISD::VAEND, MVT::Other, Expand); 143 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 144 setOperationAction(ISD::VAARG, MVT::Other, Custom); 145 setOperationAction(ISD::VASTART, MVT::Other, Custom); 146 147 // Dynamic stack 148 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 149 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 150 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); 151 152 // Exception handling 153 setExceptionPointerRegister(XCore::R0); 154 setExceptionSelectorRegister(XCore::R1); 155 156 // Atomic operations 157 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom); 158 159 // TRAMPOLINE is custom lowered. 160 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); 161 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); 162 163 // We want to custom lower some of our intrinsics. 164 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 165 166 MaxStoresPerMemset = MaxStoresPerMemsetOptSize = 4; 167 MaxStoresPerMemmove = MaxStoresPerMemmoveOptSize 168 = MaxStoresPerMemcpy = MaxStoresPerMemcpyOptSize = 2; 169 170 // We have target-specific dag combine patterns for the following nodes: 171 setTargetDAGCombine(ISD::STORE); 172 setTargetDAGCombine(ISD::ADD); 173 174 setMinFunctionAlignment(1); 175} 176 177bool XCoreTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 178 if (Val.getOpcode() != ISD::LOAD) 179 return false; 180 181 EVT VT1 = Val.getValueType(); 182 if (!VT1.isSimple() || !VT1.isInteger() || 183 !VT2.isSimple() || !VT2.isInteger()) 184 return false; 185 186 switch (VT1.getSimpleVT().SimpleTy) { 187 default: break; 188 case MVT::i8: 189 return true; 190 } 191 192 return false; 193} 194 195SDValue XCoreTargetLowering:: 196LowerOperation(SDValue Op, SelectionDAG &DAG) const { 197 switch (Op.getOpcode()) 198 { 199 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 200 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 201 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 202 case ISD::BR_JT: return LowerBR_JT(Op, DAG); 203 case ISD::LOAD: return LowerLOAD(Op, DAG); 204 case ISD::STORE: return LowerSTORE(Op, DAG); 205 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 206 case ISD::VAARG: return LowerVAARG(Op, DAG); 207 case ISD::VASTART: return LowerVASTART(Op, DAG); 208 case ISD::SMUL_LOHI: return LowerSMUL_LOHI(Op, DAG); 209 case ISD::UMUL_LOHI: return LowerUMUL_LOHI(Op, DAG); 210 // FIXME: Remove these when LegalizeDAGTypes lands. 211 case ISD::ADD: 212 case ISD::SUB: return ExpandADDSUB(Op.getNode(), DAG); 213 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 214 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG); 215 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG); 216 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 217 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG); 218 default: 219 llvm_unreachable("unimplemented operand"); 220 } 221} 222 223/// ReplaceNodeResults - Replace the results of node with an illegal result 224/// type with new values built out of custom code. 225void XCoreTargetLowering::ReplaceNodeResults(SDNode *N, 226 SmallVectorImpl<SDValue>&Results, 227 SelectionDAG &DAG) const { 228 switch (N->getOpcode()) { 229 default: 230 llvm_unreachable("Don't know how to custom expand this!"); 231 case ISD::ADD: 232 case ISD::SUB: 233 Results.push_back(ExpandADDSUB(N, DAG)); 234 return; 235 } 236} 237 238//===----------------------------------------------------------------------===// 239// Misc Lower Operation implementation 240//===----------------------------------------------------------------------===// 241 242SDValue XCoreTargetLowering:: 243LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const 244{ 245 SDLoc dl(Op); 246 SDValue Cond = DAG.getNode(ISD::SETCC, dl, MVT::i32, Op.getOperand(2), 247 Op.getOperand(3), Op.getOperand(4)); 248 return DAG.getNode(ISD::SELECT, dl, MVT::i32, Cond, Op.getOperand(0), 249 Op.getOperand(1)); 250} 251 252SDValue XCoreTargetLowering:: 253getGlobalAddressWrapper(SDValue GA, const GlobalValue *GV, 254 SelectionDAG &DAG) const 255{ 256 // FIXME there is no actual debug info here 257 SDLoc dl(GA); 258 const GlobalValue *UnderlyingGV = GV; 259 // If GV is an alias then use the aliasee to determine the wrapper type 260 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV)) 261 UnderlyingGV = GA->resolveAliasedGlobal(); 262 if (const GlobalVariable *GVar = dyn_cast<GlobalVariable>(UnderlyingGV)) { 263 if (GVar->isConstant()) 264 return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, GA); 265 return DAG.getNode(XCoreISD::DPRelativeWrapper, dl, MVT::i32, GA); 266 } 267 return DAG.getNode(XCoreISD::PCRelativeWrapper, dl, MVT::i32, GA); 268} 269 270SDValue XCoreTargetLowering:: 271LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const 272{ 273 SDLoc DL(Op); 274 const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op); 275 const GlobalValue *GV = GN->getGlobal(); 276 int64_t Offset = GN->getOffset(); 277 // We can only fold positive offsets that are a multiple of the word size. 278 int64_t FoldedOffset = std::max(Offset & ~3, (int64_t)0); 279 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, FoldedOffset); 280 GA = getGlobalAddressWrapper(GA, GV, DAG); 281 // Handle the rest of the offset. 282 if (Offset != FoldedOffset) { 283 SDValue Remaining = DAG.getConstant(Offset - FoldedOffset, MVT::i32); 284 GA = DAG.getNode(ISD::ADD, DL, MVT::i32, GA, Remaining); 285 } 286 return GA; 287} 288 289SDValue XCoreTargetLowering:: 290LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const 291{ 292 SDLoc DL(Op); 293 294 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 295 SDValue Result = DAG.getTargetBlockAddress(BA, getPointerTy()); 296 297 return DAG.getNode(XCoreISD::PCRelativeWrapper, DL, getPointerTy(), Result); 298} 299 300SDValue XCoreTargetLowering:: 301LowerConstantPool(SDValue Op, SelectionDAG &DAG) const 302{ 303 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 304 // FIXME there isn't really debug info here 305 SDLoc dl(CP); 306 EVT PtrVT = Op.getValueType(); 307 SDValue Res; 308 if (CP->isMachineConstantPoolEntry()) { 309 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, 310 CP->getAlignment()); 311 } else { 312 Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, 313 CP->getAlignment()); 314 } 315 return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, Res); 316} 317 318unsigned XCoreTargetLowering::getJumpTableEncoding() const { 319 return MachineJumpTableInfo::EK_Inline; 320} 321 322SDValue XCoreTargetLowering:: 323LowerBR_JT(SDValue Op, SelectionDAG &DAG) const 324{ 325 SDValue Chain = Op.getOperand(0); 326 SDValue Table = Op.getOperand(1); 327 SDValue Index = Op.getOperand(2); 328 SDLoc dl(Op); 329 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table); 330 unsigned JTI = JT->getIndex(); 331 MachineFunction &MF = DAG.getMachineFunction(); 332 const MachineJumpTableInfo *MJTI = MF.getJumpTableInfo(); 333 SDValue TargetJT = DAG.getTargetJumpTable(JT->getIndex(), MVT::i32); 334 335 unsigned NumEntries = MJTI->getJumpTables()[JTI].MBBs.size(); 336 if (NumEntries <= 32) { 337 return DAG.getNode(XCoreISD::BR_JT, dl, MVT::Other, Chain, TargetJT, Index); 338 } 339 assert((NumEntries >> 31) == 0); 340 SDValue ScaledIndex = DAG.getNode(ISD::SHL, dl, MVT::i32, Index, 341 DAG.getConstant(1, MVT::i32)); 342 return DAG.getNode(XCoreISD::BR_JT32, dl, MVT::Other, Chain, TargetJT, 343 ScaledIndex); 344} 345 346SDValue XCoreTargetLowering:: 347lowerLoadWordFromAlignedBasePlusOffset(SDLoc DL, SDValue Chain, SDValue Base, 348 int64_t Offset, SelectionDAG &DAG) const 349{ 350 if ((Offset & 0x3) == 0) { 351 return DAG.getLoad(getPointerTy(), DL, Chain, Base, MachinePointerInfo(), 352 false, false, false, 0); 353 } 354 // Lower to pair of consecutive word aligned loads plus some bit shifting. 355 int32_t HighOffset = RoundUpToAlignment(Offset, 4); 356 int32_t LowOffset = HighOffset - 4; 357 SDValue LowAddr, HighAddr; 358 if (GlobalAddressSDNode *GASD = 359 dyn_cast<GlobalAddressSDNode>(Base.getNode())) { 360 LowAddr = DAG.getGlobalAddress(GASD->getGlobal(), DL, Base.getValueType(), 361 LowOffset); 362 HighAddr = DAG.getGlobalAddress(GASD->getGlobal(), DL, Base.getValueType(), 363 HighOffset); 364 } else { 365 LowAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base, 366 DAG.getConstant(LowOffset, MVT::i32)); 367 HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base, 368 DAG.getConstant(HighOffset, MVT::i32)); 369 } 370 SDValue LowShift = DAG.getConstant((Offset - LowOffset) * 8, MVT::i32); 371 SDValue HighShift = DAG.getConstant((HighOffset - Offset) * 8, MVT::i32); 372 373 SDValue Low = DAG.getLoad(getPointerTy(), DL, Chain, 374 LowAddr, MachinePointerInfo(), 375 false, false, false, 0); 376 SDValue High = DAG.getLoad(getPointerTy(), DL, Chain, 377 HighAddr, MachinePointerInfo(), 378 false, false, false, 0); 379 SDValue LowShifted = DAG.getNode(ISD::SRL, DL, MVT::i32, Low, LowShift); 380 SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High, HighShift); 381 SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, LowShifted, HighShifted); 382 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1), 383 High.getValue(1)); 384 SDValue Ops[] = { Result, Chain }; 385 return DAG.getMergeValues(Ops, 2, DL); 386} 387 388static bool isWordAligned(SDValue Value, SelectionDAG &DAG) 389{ 390 APInt KnownZero, KnownOne; 391 DAG.ComputeMaskedBits(Value, KnownZero, KnownOne); 392 return KnownZero.countTrailingOnes() >= 2; 393} 394 395SDValue XCoreTargetLowering:: 396LowerLOAD(SDValue Op, SelectionDAG &DAG) const { 397 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 398 LoadSDNode *LD = cast<LoadSDNode>(Op); 399 assert(LD->getExtensionType() == ISD::NON_EXTLOAD && 400 "Unexpected extension type"); 401 assert(LD->getMemoryVT() == MVT::i32 && "Unexpected load EVT"); 402 if (allowsUnalignedMemoryAccesses(LD->getMemoryVT())) 403 return SDValue(); 404 405 unsigned ABIAlignment = getDataLayout()-> 406 getABITypeAlignment(LD->getMemoryVT().getTypeForEVT(*DAG.getContext())); 407 // Leave aligned load alone. 408 if (LD->getAlignment() >= ABIAlignment) 409 return SDValue(); 410 411 SDValue Chain = LD->getChain(); 412 SDValue BasePtr = LD->getBasePtr(); 413 SDLoc DL(Op); 414 415 if (!LD->isVolatile()) { 416 const GlobalValue *GV; 417 int64_t Offset = 0; 418 if (DAG.isBaseWithConstantOffset(BasePtr) && 419 isWordAligned(BasePtr->getOperand(0), DAG)) { 420 SDValue NewBasePtr = BasePtr->getOperand(0); 421 Offset = cast<ConstantSDNode>(BasePtr->getOperand(1))->getSExtValue(); 422 return lowerLoadWordFromAlignedBasePlusOffset(DL, Chain, NewBasePtr, 423 Offset, DAG); 424 } 425 if (TLI.isGAPlusOffset(BasePtr.getNode(), GV, Offset) && 426 MinAlign(GV->getAlignment(), 4) == 4) { 427 SDValue NewBasePtr = DAG.getGlobalAddress(GV, DL, 428 BasePtr->getValueType(0)); 429 return lowerLoadWordFromAlignedBasePlusOffset(DL, Chain, NewBasePtr, 430 Offset, DAG); 431 } 432 } 433 434 if (LD->getAlignment() == 2) { 435 SDValue Low = DAG.getExtLoad(ISD::ZEXTLOAD, DL, MVT::i32, Chain, 436 BasePtr, LD->getPointerInfo(), MVT::i16, 437 LD->isVolatile(), LD->isNonTemporal(), 2); 438 SDValue HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, 439 DAG.getConstant(2, MVT::i32)); 440 SDValue High = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain, 441 HighAddr, 442 LD->getPointerInfo().getWithOffset(2), 443 MVT::i16, LD->isVolatile(), 444 LD->isNonTemporal(), 2); 445 SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High, 446 DAG.getConstant(16, MVT::i32)); 447 SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, Low, HighShifted); 448 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1), 449 High.getValue(1)); 450 SDValue Ops[] = { Result, Chain }; 451 return DAG.getMergeValues(Ops, 2, DL); 452 } 453 454 // Lower to a call to __misaligned_load(BasePtr). 455 Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext()); 456 TargetLowering::ArgListTy Args; 457 TargetLowering::ArgListEntry Entry; 458 459 Entry.Ty = IntPtrTy; 460 Entry.Node = BasePtr; 461 Args.push_back(Entry); 462 463 TargetLowering::CallLoweringInfo CLI(Chain, IntPtrTy, false, false, 464 false, false, 0, CallingConv::C, /*isTailCall=*/false, 465 /*doesNotRet=*/false, /*isReturnValueUsed=*/true, 466 DAG.getExternalSymbol("__misaligned_load", getPointerTy()), 467 Args, DAG, DL); 468 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 469 470 SDValue Ops[] = 471 { CallResult.first, CallResult.second }; 472 473 return DAG.getMergeValues(Ops, 2, DL); 474} 475 476SDValue XCoreTargetLowering:: 477LowerSTORE(SDValue Op, SelectionDAG &DAG) const 478{ 479 StoreSDNode *ST = cast<StoreSDNode>(Op); 480 assert(!ST->isTruncatingStore() && "Unexpected store type"); 481 assert(ST->getMemoryVT() == MVT::i32 && "Unexpected store EVT"); 482 if (allowsUnalignedMemoryAccesses(ST->getMemoryVT())) { 483 return SDValue(); 484 } 485 unsigned ABIAlignment = getDataLayout()-> 486 getABITypeAlignment(ST->getMemoryVT().getTypeForEVT(*DAG.getContext())); 487 // Leave aligned store alone. 488 if (ST->getAlignment() >= ABIAlignment) { 489 return SDValue(); 490 } 491 SDValue Chain = ST->getChain(); 492 SDValue BasePtr = ST->getBasePtr(); 493 SDValue Value = ST->getValue(); 494 SDLoc dl(Op); 495 496 if (ST->getAlignment() == 2) { 497 SDValue Low = Value; 498 SDValue High = DAG.getNode(ISD::SRL, dl, MVT::i32, Value, 499 DAG.getConstant(16, MVT::i32)); 500 SDValue StoreLow = DAG.getTruncStore(Chain, dl, Low, BasePtr, 501 ST->getPointerInfo(), MVT::i16, 502 ST->isVolatile(), ST->isNonTemporal(), 503 2); 504 SDValue HighAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, BasePtr, 505 DAG.getConstant(2, MVT::i32)); 506 SDValue StoreHigh = DAG.getTruncStore(Chain, dl, High, HighAddr, 507 ST->getPointerInfo().getWithOffset(2), 508 MVT::i16, ST->isVolatile(), 509 ST->isNonTemporal(), 2); 510 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, StoreLow, StoreHigh); 511 } 512 513 // Lower to a call to __misaligned_store(BasePtr, Value). 514 Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext()); 515 TargetLowering::ArgListTy Args; 516 TargetLowering::ArgListEntry Entry; 517 518 Entry.Ty = IntPtrTy; 519 Entry.Node = BasePtr; 520 Args.push_back(Entry); 521 522 Entry.Node = Value; 523 Args.push_back(Entry); 524 525 TargetLowering::CallLoweringInfo CLI(Chain, 526 Type::getVoidTy(*DAG.getContext()), false, false, 527 false, false, 0, CallingConv::C, /*isTailCall=*/false, 528 /*doesNotRet=*/false, /*isReturnValueUsed=*/true, 529 DAG.getExternalSymbol("__misaligned_store", getPointerTy()), 530 Args, DAG, dl); 531 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 532 533 return CallResult.second; 534} 535 536SDValue XCoreTargetLowering:: 537LowerSMUL_LOHI(SDValue Op, SelectionDAG &DAG) const 538{ 539 assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::SMUL_LOHI && 540 "Unexpected operand to lower!"); 541 SDLoc dl(Op); 542 SDValue LHS = Op.getOperand(0); 543 SDValue RHS = Op.getOperand(1); 544 SDValue Zero = DAG.getConstant(0, MVT::i32); 545 SDValue Hi = DAG.getNode(XCoreISD::MACCS, dl, 546 DAG.getVTList(MVT::i32, MVT::i32), Zero, Zero, 547 LHS, RHS); 548 SDValue Lo(Hi.getNode(), 1); 549 SDValue Ops[] = { Lo, Hi }; 550 return DAG.getMergeValues(Ops, 2, dl); 551} 552 553SDValue XCoreTargetLowering:: 554LowerUMUL_LOHI(SDValue Op, SelectionDAG &DAG) const 555{ 556 assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::UMUL_LOHI && 557 "Unexpected operand to lower!"); 558 SDLoc dl(Op); 559 SDValue LHS = Op.getOperand(0); 560 SDValue RHS = Op.getOperand(1); 561 SDValue Zero = DAG.getConstant(0, MVT::i32); 562 SDValue Hi = DAG.getNode(XCoreISD::LMUL, dl, 563 DAG.getVTList(MVT::i32, MVT::i32), LHS, RHS, 564 Zero, Zero); 565 SDValue Lo(Hi.getNode(), 1); 566 SDValue Ops[] = { Lo, Hi }; 567 return DAG.getMergeValues(Ops, 2, dl); 568} 569 570/// isADDADDMUL - Return whether Op is in a form that is equivalent to 571/// add(add(mul(x,y),a),b). If requireIntermediatesHaveOneUse is true then 572/// each intermediate result in the calculation must also have a single use. 573/// If the Op is in the correct form the constituent parts are written to Mul0, 574/// Mul1, Addend0 and Addend1. 575static bool 576isADDADDMUL(SDValue Op, SDValue &Mul0, SDValue &Mul1, SDValue &Addend0, 577 SDValue &Addend1, bool requireIntermediatesHaveOneUse) 578{ 579 if (Op.getOpcode() != ISD::ADD) 580 return false; 581 SDValue N0 = Op.getOperand(0); 582 SDValue N1 = Op.getOperand(1); 583 SDValue AddOp; 584 SDValue OtherOp; 585 if (N0.getOpcode() == ISD::ADD) { 586 AddOp = N0; 587 OtherOp = N1; 588 } else if (N1.getOpcode() == ISD::ADD) { 589 AddOp = N1; 590 OtherOp = N0; 591 } else { 592 return false; 593 } 594 if (requireIntermediatesHaveOneUse && !AddOp.hasOneUse()) 595 return false; 596 if (OtherOp.getOpcode() == ISD::MUL) { 597 // add(add(a,b),mul(x,y)) 598 if (requireIntermediatesHaveOneUse && !OtherOp.hasOneUse()) 599 return false; 600 Mul0 = OtherOp.getOperand(0); 601 Mul1 = OtherOp.getOperand(1); 602 Addend0 = AddOp.getOperand(0); 603 Addend1 = AddOp.getOperand(1); 604 return true; 605 } 606 if (AddOp.getOperand(0).getOpcode() == ISD::MUL) { 607 // add(add(mul(x,y),a),b) 608 if (requireIntermediatesHaveOneUse && !AddOp.getOperand(0).hasOneUse()) 609 return false; 610 Mul0 = AddOp.getOperand(0).getOperand(0); 611 Mul1 = AddOp.getOperand(0).getOperand(1); 612 Addend0 = AddOp.getOperand(1); 613 Addend1 = OtherOp; 614 return true; 615 } 616 if (AddOp.getOperand(1).getOpcode() == ISD::MUL) { 617 // add(add(a,mul(x,y)),b) 618 if (requireIntermediatesHaveOneUse && !AddOp.getOperand(1).hasOneUse()) 619 return false; 620 Mul0 = AddOp.getOperand(1).getOperand(0); 621 Mul1 = AddOp.getOperand(1).getOperand(1); 622 Addend0 = AddOp.getOperand(0); 623 Addend1 = OtherOp; 624 return true; 625 } 626 return false; 627} 628 629SDValue XCoreTargetLowering:: 630TryExpandADDWithMul(SDNode *N, SelectionDAG &DAG) const 631{ 632 SDValue Mul; 633 SDValue Other; 634 if (N->getOperand(0).getOpcode() == ISD::MUL) { 635 Mul = N->getOperand(0); 636 Other = N->getOperand(1); 637 } else if (N->getOperand(1).getOpcode() == ISD::MUL) { 638 Mul = N->getOperand(1); 639 Other = N->getOperand(0); 640 } else { 641 return SDValue(); 642 } 643 SDLoc dl(N); 644 SDValue LL, RL, AddendL, AddendH; 645 LL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 646 Mul.getOperand(0), DAG.getConstant(0, MVT::i32)); 647 RL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 648 Mul.getOperand(1), DAG.getConstant(0, MVT::i32)); 649 AddendL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 650 Other, DAG.getConstant(0, MVT::i32)); 651 AddendH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 652 Other, DAG.getConstant(1, MVT::i32)); 653 APInt HighMask = APInt::getHighBitsSet(64, 32); 654 unsigned LHSSB = DAG.ComputeNumSignBits(Mul.getOperand(0)); 655 unsigned RHSSB = DAG.ComputeNumSignBits(Mul.getOperand(1)); 656 if (DAG.MaskedValueIsZero(Mul.getOperand(0), HighMask) && 657 DAG.MaskedValueIsZero(Mul.getOperand(1), HighMask)) { 658 // The inputs are both zero-extended. 659 SDValue Hi = DAG.getNode(XCoreISD::MACCU, dl, 660 DAG.getVTList(MVT::i32, MVT::i32), AddendH, 661 AddendL, LL, RL); 662 SDValue Lo(Hi.getNode(), 1); 663 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 664 } 665 if (LHSSB > 32 && RHSSB > 32) { 666 // The inputs are both sign-extended. 667 SDValue Hi = DAG.getNode(XCoreISD::MACCS, dl, 668 DAG.getVTList(MVT::i32, MVT::i32), AddendH, 669 AddendL, LL, RL); 670 SDValue Lo(Hi.getNode(), 1); 671 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 672 } 673 SDValue LH, RH; 674 LH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 675 Mul.getOperand(0), DAG.getConstant(1, MVT::i32)); 676 RH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 677 Mul.getOperand(1), DAG.getConstant(1, MVT::i32)); 678 SDValue Hi = DAG.getNode(XCoreISD::MACCU, dl, 679 DAG.getVTList(MVT::i32, MVT::i32), AddendH, 680 AddendL, LL, RL); 681 SDValue Lo(Hi.getNode(), 1); 682 RH = DAG.getNode(ISD::MUL, dl, MVT::i32, LL, RH); 683 LH = DAG.getNode(ISD::MUL, dl, MVT::i32, LH, RL); 684 Hi = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, RH); 685 Hi = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, LH); 686 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 687} 688 689SDValue XCoreTargetLowering:: 690ExpandADDSUB(SDNode *N, SelectionDAG &DAG) const 691{ 692 assert(N->getValueType(0) == MVT::i64 && 693 (N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) && 694 "Unknown operand to lower!"); 695 696 if (N->getOpcode() == ISD::ADD) { 697 SDValue Result = TryExpandADDWithMul(N, DAG); 698 if (Result.getNode() != 0) 699 return Result; 700 } 701 702 SDLoc dl(N); 703 704 // Extract components 705 SDValue LHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 706 N->getOperand(0), DAG.getConstant(0, MVT::i32)); 707 SDValue LHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 708 N->getOperand(0), DAG.getConstant(1, MVT::i32)); 709 SDValue RHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 710 N->getOperand(1), DAG.getConstant(0, MVT::i32)); 711 SDValue RHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 712 N->getOperand(1), DAG.getConstant(1, MVT::i32)); 713 714 // Expand 715 unsigned Opcode = (N->getOpcode() == ISD::ADD) ? XCoreISD::LADD : 716 XCoreISD::LSUB; 717 SDValue Zero = DAG.getConstant(0, MVT::i32); 718 SDValue Lo = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32), 719 LHSL, RHSL, Zero); 720 SDValue Carry(Lo.getNode(), 1); 721 722 SDValue Hi = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32), 723 LHSH, RHSH, Carry); 724 SDValue Ignored(Hi.getNode(), 1); 725 // Merge the pieces 726 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 727} 728 729SDValue XCoreTargetLowering:: 730LowerVAARG(SDValue Op, SelectionDAG &DAG) const 731{ 732 // Whist llvm does not support aggregate varargs we can ignore 733 // the possibility of the ValueType being an implicit byVal vararg. 734 SDNode *Node = Op.getNode(); 735 EVT VT = Node->getValueType(0); // not an aggregate 736 SDValue InChain = Node->getOperand(0); 737 SDValue VAListPtr = Node->getOperand(1); 738 EVT PtrVT = VAListPtr.getValueType(); 739 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 740 SDLoc dl(Node); 741 SDValue VAList = DAG.getLoad(PtrVT, dl, InChain, 742 VAListPtr, MachinePointerInfo(SV), 743 false, false, false, 0); 744 // Increment the pointer, VAList, to the next vararg 745 SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAList, 746 DAG.getIntPtrConstant(VT.getSizeInBits() / 8)); 747 // Store the incremented VAList to the legalized pointer 748 InChain = DAG.getStore(VAList.getValue(1), dl, nextPtr, VAListPtr, 749 MachinePointerInfo(SV), false, false, 0); 750 // Load the actual argument out of the pointer VAList 751 return DAG.getLoad(VT, dl, InChain, VAList, MachinePointerInfo(), 752 false, false, false, 0); 753} 754 755SDValue XCoreTargetLowering:: 756LowerVASTART(SDValue Op, SelectionDAG &DAG) const 757{ 758 SDLoc dl(Op); 759 // vastart stores the address of the VarArgsFrameIndex slot into the 760 // memory location argument 761 MachineFunction &MF = DAG.getMachineFunction(); 762 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>(); 763 SDValue Addr = DAG.getFrameIndex(XFI->getVarArgsFrameIndex(), MVT::i32); 764 return DAG.getStore(Op.getOperand(0), dl, Addr, Op.getOperand(1), 765 MachinePointerInfo(), false, false, 0); 766} 767 768SDValue XCoreTargetLowering::LowerFRAMEADDR(SDValue Op, 769 SelectionDAG &DAG) const { 770 SDLoc dl(Op); 771 // Depths > 0 not supported yet! 772 if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0) 773 return SDValue(); 774 775 MachineFunction &MF = DAG.getMachineFunction(); 776 const TargetRegisterInfo *RegInfo = getTargetMachine().getRegisterInfo(); 777 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, 778 RegInfo->getFrameRegister(MF), MVT::i32); 779} 780 781SDValue XCoreTargetLowering:: 782LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const { 783 return Op.getOperand(0); 784} 785 786SDValue XCoreTargetLowering:: 787LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const { 788 SDValue Chain = Op.getOperand(0); 789 SDValue Trmp = Op.getOperand(1); // trampoline 790 SDValue FPtr = Op.getOperand(2); // nested function 791 SDValue Nest = Op.getOperand(3); // 'nest' parameter value 792 793 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 794 795 // .align 4 796 // LDAPF_u10 r11, nest 797 // LDW_2rus r11, r11[0] 798 // STWSP_ru6 r11, sp[0] 799 // LDAPF_u10 r11, fptr 800 // LDW_2rus r11, r11[0] 801 // BAU_1r r11 802 // nest: 803 // .word nest 804 // fptr: 805 // .word fptr 806 SDValue OutChains[5]; 807 808 SDValue Addr = Trmp; 809 810 SDLoc dl(Op); 811 OutChains[0] = DAG.getStore(Chain, dl, DAG.getConstant(0x0a3cd805, MVT::i32), 812 Addr, MachinePointerInfo(TrmpAddr), false, false, 813 0); 814 815 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 816 DAG.getConstant(4, MVT::i32)); 817 OutChains[1] = DAG.getStore(Chain, dl, DAG.getConstant(0xd80456c0, MVT::i32), 818 Addr, MachinePointerInfo(TrmpAddr, 4), false, 819 false, 0); 820 821 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 822 DAG.getConstant(8, MVT::i32)); 823 OutChains[2] = DAG.getStore(Chain, dl, DAG.getConstant(0x27fb0a3c, MVT::i32), 824 Addr, MachinePointerInfo(TrmpAddr, 8), false, 825 false, 0); 826 827 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 828 DAG.getConstant(12, MVT::i32)); 829 OutChains[3] = DAG.getStore(Chain, dl, Nest, Addr, 830 MachinePointerInfo(TrmpAddr, 12), false, false, 831 0); 832 833 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 834 DAG.getConstant(16, MVT::i32)); 835 OutChains[4] = DAG.getStore(Chain, dl, FPtr, Addr, 836 MachinePointerInfo(TrmpAddr, 16), false, false, 837 0); 838 839 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains, 5); 840} 841 842SDValue XCoreTargetLowering:: 843LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const { 844 SDLoc DL(Op); 845 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 846 switch (IntNo) { 847 case Intrinsic::xcore_crc8: 848 EVT VT = Op.getValueType(); 849 SDValue Data = 850 DAG.getNode(XCoreISD::CRC8, DL, DAG.getVTList(VT, VT), 851 Op.getOperand(1), Op.getOperand(2) , Op.getOperand(3)); 852 SDValue Crc(Data.getNode(), 1); 853 SDValue Results[] = { Crc, Data }; 854 return DAG.getMergeValues(Results, 2, DL); 855 } 856 return SDValue(); 857} 858 859SDValue XCoreTargetLowering:: 860LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const { 861 SDLoc DL(Op); 862 return DAG.getNode(XCoreISD::MEMBARRIER, DL, MVT::Other, Op.getOperand(0)); 863} 864 865//===----------------------------------------------------------------------===// 866// Calling Convention Implementation 867//===----------------------------------------------------------------------===// 868 869#include "XCoreGenCallingConv.inc" 870 871//===----------------------------------------------------------------------===// 872// Call Calling Convention Implementation 873//===----------------------------------------------------------------------===// 874 875/// XCore call implementation 876SDValue 877XCoreTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 878 SmallVectorImpl<SDValue> &InVals) const { 879 SelectionDAG &DAG = CLI.DAG; 880 SDLoc &dl = CLI.DL; 881 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 882 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 883 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 884 SDValue Chain = CLI.Chain; 885 SDValue Callee = CLI.Callee; 886 bool &isTailCall = CLI.IsTailCall; 887 CallingConv::ID CallConv = CLI.CallConv; 888 bool isVarArg = CLI.IsVarArg; 889 890 // XCore target does not yet support tail call optimization. 891 isTailCall = false; 892 893 // For now, only CallingConv::C implemented 894 switch (CallConv) 895 { 896 default: 897 llvm_unreachable("Unsupported calling convention"); 898 case CallingConv::Fast: 899 case CallingConv::C: 900 return LowerCCCCallTo(Chain, Callee, CallConv, isVarArg, isTailCall, 901 Outs, OutVals, Ins, dl, DAG, InVals); 902 } 903} 904 905/// LowerCCCCallTo - functions arguments are copied from virtual 906/// regs to (physical regs)/(stack frame), CALLSEQ_START and 907/// CALLSEQ_END are emitted. 908/// TODO: isTailCall, sret. 909SDValue 910XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee, 911 CallingConv::ID CallConv, bool isVarArg, 912 bool isTailCall, 913 const SmallVectorImpl<ISD::OutputArg> &Outs, 914 const SmallVectorImpl<SDValue> &OutVals, 915 const SmallVectorImpl<ISD::InputArg> &Ins, 916 SDLoc dl, SelectionDAG &DAG, 917 SmallVectorImpl<SDValue> &InVals) const { 918 919 // Analyze operands of the call, assigning locations to each operand. 920 SmallVector<CCValAssign, 16> ArgLocs; 921 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 922 getTargetMachine(), ArgLocs, *DAG.getContext()); 923 924 // The ABI dictates there should be one stack slot available to the callee 925 // on function entry (for saving lr). 926 CCInfo.AllocateStack(4, 4); 927 928 CCInfo.AnalyzeCallOperands(Outs, CC_XCore); 929 930 // Get a count of how many bytes are to be pushed on the stack. 931 unsigned NumBytes = CCInfo.getNextStackOffset(); 932 933 Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, 934 getPointerTy(), true), dl); 935 936 SmallVector<std::pair<unsigned, SDValue>, 4> RegsToPass; 937 SmallVector<SDValue, 12> MemOpChains; 938 939 // Walk the register/memloc assignments, inserting copies/loads. 940 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 941 CCValAssign &VA = ArgLocs[i]; 942 SDValue Arg = OutVals[i]; 943 944 // Promote the value if needed. 945 switch (VA.getLocInfo()) { 946 default: llvm_unreachable("Unknown loc info!"); 947 case CCValAssign::Full: break; 948 case CCValAssign::SExt: 949 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 950 break; 951 case CCValAssign::ZExt: 952 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 953 break; 954 case CCValAssign::AExt: 955 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 956 break; 957 } 958 959 // Arguments that can be passed on register must be kept at 960 // RegsToPass vector 961 if (VA.isRegLoc()) { 962 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 963 } else { 964 assert(VA.isMemLoc()); 965 966 int Offset = VA.getLocMemOffset(); 967 968 MemOpChains.push_back(DAG.getNode(XCoreISD::STWSP, dl, MVT::Other, 969 Chain, Arg, 970 DAG.getConstant(Offset/4, MVT::i32))); 971 } 972 } 973 974 // Transform all store nodes into one single node because 975 // all store nodes are independent of each other. 976 if (!MemOpChains.empty()) 977 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 978 &MemOpChains[0], MemOpChains.size()); 979 980 // Build a sequence of copy-to-reg nodes chained together with token 981 // chain and flag operands which copy the outgoing args into registers. 982 // The InFlag in necessary since all emitted instructions must be 983 // stuck together. 984 SDValue InFlag; 985 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 986 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 987 RegsToPass[i].second, InFlag); 988 InFlag = Chain.getValue(1); 989 } 990 991 // If the callee is a GlobalAddress node (quite common, every direct call is) 992 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 993 // Likewise ExternalSymbol -> TargetExternalSymbol. 994 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 995 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32); 996 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) 997 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32); 998 999 // XCoreBranchLink = #chain, #target_address, #opt_in_flags... 1000 // = Chain, Callee, Reg#1, Reg#2, ... 1001 // 1002 // Returns a chain & a flag for retval copy to use. 1003 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 1004 SmallVector<SDValue, 8> Ops; 1005 Ops.push_back(Chain); 1006 Ops.push_back(Callee); 1007 1008 // Add argument registers to the end of the list so that they are 1009 // known live into the call. 1010 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1011 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1012 RegsToPass[i].second.getValueType())); 1013 1014 if (InFlag.getNode()) 1015 Ops.push_back(InFlag); 1016 1017 Chain = DAG.getNode(XCoreISD::BL, dl, NodeTys, &Ops[0], Ops.size()); 1018 InFlag = Chain.getValue(1); 1019 1020 // Create the CALLSEQ_END node. 1021 Chain = DAG.getCALLSEQ_END(Chain, 1022 DAG.getConstant(NumBytes, getPointerTy(), true), 1023 DAG.getConstant(0, getPointerTy(), true), 1024 InFlag, dl); 1025 InFlag = Chain.getValue(1); 1026 1027 // Handle result values, copying them out of physregs into vregs that we 1028 // return. 1029 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, 1030 Ins, dl, DAG, InVals); 1031} 1032 1033/// LowerCallResult - Lower the result values of a call into the 1034/// appropriate copies out of appropriate physical registers. 1035SDValue 1036XCoreTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 1037 CallingConv::ID CallConv, bool isVarArg, 1038 const SmallVectorImpl<ISD::InputArg> &Ins, 1039 SDLoc dl, SelectionDAG &DAG, 1040 SmallVectorImpl<SDValue> &InVals) const { 1041 1042 // Assign locations to each value returned by this call. 1043 SmallVector<CCValAssign, 16> RVLocs; 1044 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1045 getTargetMachine(), RVLocs, *DAG.getContext()); 1046 1047 CCInfo.AnalyzeCallResult(Ins, RetCC_XCore); 1048 1049 // Copy all of the result registers out of their specified physreg. 1050 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1051 Chain = DAG.getCopyFromReg(Chain, dl, RVLocs[i].getLocReg(), 1052 RVLocs[i].getValVT(), InFlag).getValue(1); 1053 InFlag = Chain.getValue(2); 1054 InVals.push_back(Chain.getValue(0)); 1055 } 1056 1057 return Chain; 1058} 1059 1060//===----------------------------------------------------------------------===// 1061// Formal Arguments Calling Convention Implementation 1062//===----------------------------------------------------------------------===// 1063 1064namespace { 1065 struct ArgDataPair { SDValue SDV; ISD::ArgFlagsTy Flags; }; 1066} 1067 1068/// XCore formal arguments implementation 1069SDValue 1070XCoreTargetLowering::LowerFormalArguments(SDValue Chain, 1071 CallingConv::ID CallConv, 1072 bool isVarArg, 1073 const SmallVectorImpl<ISD::InputArg> &Ins, 1074 SDLoc dl, 1075 SelectionDAG &DAG, 1076 SmallVectorImpl<SDValue> &InVals) 1077 const { 1078 switch (CallConv) 1079 { 1080 default: 1081 llvm_unreachable("Unsupported calling convention"); 1082 case CallingConv::C: 1083 case CallingConv::Fast: 1084 return LowerCCCArguments(Chain, CallConv, isVarArg, 1085 Ins, dl, DAG, InVals); 1086 } 1087} 1088 1089/// LowerCCCArguments - transform physical registers into 1090/// virtual registers and generate load operations for 1091/// arguments places on the stack. 1092/// TODO: sret 1093SDValue 1094XCoreTargetLowering::LowerCCCArguments(SDValue Chain, 1095 CallingConv::ID CallConv, 1096 bool isVarArg, 1097 const SmallVectorImpl<ISD::InputArg> 1098 &Ins, 1099 SDLoc dl, 1100 SelectionDAG &DAG, 1101 SmallVectorImpl<SDValue> &InVals) const { 1102 MachineFunction &MF = DAG.getMachineFunction(); 1103 MachineFrameInfo *MFI = MF.getFrameInfo(); 1104 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 1105 1106 // Assign locations to all of the incoming arguments. 1107 SmallVector<CCValAssign, 16> ArgLocs; 1108 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1109 getTargetMachine(), ArgLocs, *DAG.getContext()); 1110 1111 CCInfo.AnalyzeFormalArguments(Ins, CC_XCore); 1112 1113 unsigned StackSlotSize = XCoreFrameLowering::stackSlotSize(); 1114 1115 unsigned LRSaveSize = StackSlotSize; 1116 1117 // All getCopyFromReg ops must precede any getMemcpys to prevent the 1118 // scheduler clobbering a register before it has been copied. 1119 // The stages are: 1120 // 1. CopyFromReg (and load) arg & vararg registers. 1121 // 2. Chain CopyFromReg nodes into a TokenFactor. 1122 // 3. Memcpy 'byVal' args & push final InVals. 1123 // 4. Chain mem ops nodes into a TokenFactor. 1124 SmallVector<SDValue, 4> CFRegNode; 1125 SmallVector<ArgDataPair, 4> ArgData; 1126 SmallVector<SDValue, 4> MemOps; 1127 1128 // 1a. CopyFromReg (and load) arg registers. 1129 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1130 1131 CCValAssign &VA = ArgLocs[i]; 1132 SDValue ArgIn; 1133 1134 if (VA.isRegLoc()) { 1135 // Arguments passed in registers 1136 EVT RegVT = VA.getLocVT(); 1137 switch (RegVT.getSimpleVT().SimpleTy) { 1138 default: 1139 { 1140#ifndef NDEBUG 1141 errs() << "LowerFormalArguments Unhandled argument type: " 1142 << RegVT.getSimpleVT().SimpleTy << "\n"; 1143#endif 1144 llvm_unreachable(0); 1145 } 1146 case MVT::i32: 1147 unsigned VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass); 1148 RegInfo.addLiveIn(VA.getLocReg(), VReg); 1149 ArgIn = DAG.getCopyFromReg(Chain, dl, VReg, RegVT); 1150 CFRegNode.push_back(ArgIn.getValue(ArgIn->getNumValues() - 1)); 1151 } 1152 } else { 1153 // sanity check 1154 assert(VA.isMemLoc()); 1155 // Load the argument to a virtual register 1156 unsigned ObjSize = VA.getLocVT().getSizeInBits()/8; 1157 if (ObjSize > StackSlotSize) { 1158 errs() << "LowerFormalArguments Unhandled argument type: " 1159 << EVT(VA.getLocVT()).getEVTString() 1160 << "\n"; 1161 } 1162 // Create the frame index object for this incoming parameter... 1163 int FI = MFI->CreateFixedObject(ObjSize, 1164 LRSaveSize + VA.getLocMemOffset(), 1165 true); 1166 1167 // Create the SelectionDAG nodes corresponding to a load 1168 //from this parameter 1169 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 1170 ArgIn = DAG.getLoad(VA.getLocVT(), dl, Chain, FIN, 1171 MachinePointerInfo::getFixedStack(FI), 1172 false, false, false, 0); 1173 } 1174 const ArgDataPair ADP = { ArgIn, Ins[i].Flags }; 1175 ArgData.push_back(ADP); 1176 } 1177 1178 // 1b. CopyFromReg vararg registers. 1179 if (isVarArg) { 1180 // Argument registers 1181 static const uint16_t ArgRegs[] = { 1182 XCore::R0, XCore::R1, XCore::R2, XCore::R3 1183 }; 1184 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>(); 1185 unsigned FirstVAReg = CCInfo.getFirstUnallocated(ArgRegs, 1186 array_lengthof(ArgRegs)); 1187 if (FirstVAReg < array_lengthof(ArgRegs)) { 1188 int offset = 0; 1189 // Save remaining registers, storing higher register numbers at a higher 1190 // address 1191 for (int i = array_lengthof(ArgRegs) - 1; i >= (int)FirstVAReg; --i) { 1192 // Create a stack slot 1193 int FI = MFI->CreateFixedObject(4, offset, true); 1194 if (i == (int)FirstVAReg) { 1195 XFI->setVarArgsFrameIndex(FI); 1196 } 1197 offset -= StackSlotSize; 1198 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 1199 // Move argument from phys reg -> virt reg 1200 unsigned VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass); 1201 RegInfo.addLiveIn(ArgRegs[i], VReg); 1202 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 1203 CFRegNode.push_back(Val.getValue(Val->getNumValues() - 1)); 1204 // Move argument from virt reg -> stack 1205 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 1206 MachinePointerInfo(), false, false, 0); 1207 MemOps.push_back(Store); 1208 } 1209 } else { 1210 // This will point to the next argument passed via stack. 1211 XFI->setVarArgsFrameIndex( 1212 MFI->CreateFixedObject(4, LRSaveSize + CCInfo.getNextStackOffset(), 1213 true)); 1214 } 1215 } 1216 1217 // 2. chain CopyFromReg nodes into a TokenFactor. 1218 if (!CFRegNode.empty()) 1219 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &CFRegNode[0], 1220 CFRegNode.size()); 1221 1222 // 3. Memcpy 'byVal' args & push final InVals. 1223 // Aggregates passed "byVal" need to be copied by the callee. 1224 // The callee will use a pointer to this copy, rather than the original 1225 // pointer. 1226 for (SmallVectorImpl<ArgDataPair>::const_iterator ArgDI = ArgData.begin(), 1227 ArgDE = ArgData.end(); 1228 ArgDI != ArgDE; ++ArgDI) { 1229 if (ArgDI->Flags.isByVal() && ArgDI->Flags.getByValSize()) { 1230 unsigned Size = ArgDI->Flags.getByValSize(); 1231 unsigned Align = std::max(StackSlotSize, ArgDI->Flags.getByValAlign()); 1232 // Create a new object on the stack and copy the pointee into it. 1233 int FI = MFI->CreateStackObject(Size, Align, false, false); 1234 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 1235 InVals.push_back(FIN); 1236 MemOps.push_back(DAG.getMemcpy(Chain, dl, FIN, ArgDI->SDV, 1237 DAG.getConstant(Size, MVT::i32), 1238 Align, false, false, 1239 MachinePointerInfo(), 1240 MachinePointerInfo())); 1241 } else { 1242 InVals.push_back(ArgDI->SDV); 1243 } 1244 } 1245 1246 // 4, chain mem ops nodes into a TokenFactor. 1247 if (!MemOps.empty()) { 1248 MemOps.push_back(Chain); 1249 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &MemOps[0], 1250 MemOps.size()); 1251 } 1252 1253 return Chain; 1254} 1255 1256//===----------------------------------------------------------------------===// 1257// Return Value Calling Convention Implementation 1258//===----------------------------------------------------------------------===// 1259 1260bool XCoreTargetLowering:: 1261CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, 1262 bool isVarArg, 1263 const SmallVectorImpl<ISD::OutputArg> &Outs, 1264 LLVMContext &Context) const { 1265 SmallVector<CCValAssign, 16> RVLocs; 1266 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), RVLocs, Context); 1267 return CCInfo.CheckReturn(Outs, RetCC_XCore); 1268} 1269 1270SDValue 1271XCoreTargetLowering::LowerReturn(SDValue Chain, 1272 CallingConv::ID CallConv, bool isVarArg, 1273 const SmallVectorImpl<ISD::OutputArg> &Outs, 1274 const SmallVectorImpl<SDValue> &OutVals, 1275 SDLoc dl, SelectionDAG &DAG) const { 1276 1277 // CCValAssign - represent the assignment of 1278 // the return value to a location 1279 SmallVector<CCValAssign, 16> RVLocs; 1280 1281 // CCState - Info about the registers and stack slot. 1282 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1283 getTargetMachine(), RVLocs, *DAG.getContext()); 1284 1285 // Analyze return values. 1286 CCInfo.AnalyzeReturn(Outs, RetCC_XCore); 1287 1288 SDValue Flag; 1289 SmallVector<SDValue, 4> RetOps(1, Chain); 1290 1291 // Return on XCore is always a "retsp 0" 1292 RetOps.push_back(DAG.getConstant(0, MVT::i32)); 1293 1294 // Copy the result values into the output registers. 1295 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1296 CCValAssign &VA = RVLocs[i]; 1297 assert(VA.isRegLoc() && "Can only return in registers!"); 1298 1299 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), 1300 OutVals[i], Flag); 1301 1302 // guarantee that all emitted copies are 1303 // stuck together, avoiding something bad 1304 Flag = Chain.getValue(1); 1305 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 1306 } 1307 1308 RetOps[0] = Chain; // Update chain. 1309 1310 // Add the flag if we have it. 1311 if (Flag.getNode()) 1312 RetOps.push_back(Flag); 1313 1314 return DAG.getNode(XCoreISD::RETSP, dl, MVT::Other, 1315 &RetOps[0], RetOps.size()); 1316} 1317 1318//===----------------------------------------------------------------------===// 1319// Other Lowering Code 1320//===----------------------------------------------------------------------===// 1321 1322MachineBasicBlock * 1323XCoreTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 1324 MachineBasicBlock *BB) const { 1325 const TargetInstrInfo &TII = *getTargetMachine().getInstrInfo(); 1326 DebugLoc dl = MI->getDebugLoc(); 1327 assert((MI->getOpcode() == XCore::SELECT_CC) && 1328 "Unexpected instr type to insert"); 1329 1330 // To "insert" a SELECT_CC instruction, we actually have to insert the diamond 1331 // control-flow pattern. The incoming instruction knows the destination vreg 1332 // to set, the condition code register to branch on, the true/false values to 1333 // select between, and a branch opcode to use. 1334 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 1335 MachineFunction::iterator It = BB; 1336 ++It; 1337 1338 // thisMBB: 1339 // ... 1340 // TrueVal = ... 1341 // cmpTY ccX, r1, r2 1342 // bCC copy1MBB 1343 // fallthrough --> copy0MBB 1344 MachineBasicBlock *thisMBB = BB; 1345 MachineFunction *F = BB->getParent(); 1346 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 1347 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 1348 F->insert(It, copy0MBB); 1349 F->insert(It, sinkMBB); 1350 1351 // Transfer the remainder of BB and its successor edges to sinkMBB. 1352 sinkMBB->splice(sinkMBB->begin(), BB, 1353 llvm::next(MachineBasicBlock::iterator(MI)), 1354 BB->end()); 1355 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 1356 1357 // Next, add the true and fallthrough blocks as its successors. 1358 BB->addSuccessor(copy0MBB); 1359 BB->addSuccessor(sinkMBB); 1360 1361 BuildMI(BB, dl, TII.get(XCore::BRFT_lru6)) 1362 .addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB); 1363 1364 // copy0MBB: 1365 // %FalseValue = ... 1366 // # fallthrough to sinkMBB 1367 BB = copy0MBB; 1368 1369 // Update machine-CFG edges 1370 BB->addSuccessor(sinkMBB); 1371 1372 // sinkMBB: 1373 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 1374 // ... 1375 BB = sinkMBB; 1376 BuildMI(*BB, BB->begin(), dl, 1377 TII.get(XCore::PHI), MI->getOperand(0).getReg()) 1378 .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB) 1379 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 1380 1381 MI->eraseFromParent(); // The pseudo instruction is gone now. 1382 return BB; 1383} 1384 1385//===----------------------------------------------------------------------===// 1386// Target Optimization Hooks 1387//===----------------------------------------------------------------------===// 1388 1389SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N, 1390 DAGCombinerInfo &DCI) const { 1391 SelectionDAG &DAG = DCI.DAG; 1392 SDLoc dl(N); 1393 switch (N->getOpcode()) { 1394 default: break; 1395 case XCoreISD::LADD: { 1396 SDValue N0 = N->getOperand(0); 1397 SDValue N1 = N->getOperand(1); 1398 SDValue N2 = N->getOperand(2); 1399 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 1400 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 1401 EVT VT = N0.getValueType(); 1402 1403 // canonicalize constant to RHS 1404 if (N0C && !N1C) 1405 return DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N1, N0, N2); 1406 1407 // fold (ladd 0, 0, x) -> 0, x & 1 1408 if (N0C && N0C->isNullValue() && N1C && N1C->isNullValue()) { 1409 SDValue Carry = DAG.getConstant(0, VT); 1410 SDValue Result = DAG.getNode(ISD::AND, dl, VT, N2, 1411 DAG.getConstant(1, VT)); 1412 SDValue Ops[] = { Result, Carry }; 1413 return DAG.getMergeValues(Ops, 2, dl); 1414 } 1415 1416 // fold (ladd x, 0, y) -> 0, add x, y iff carry is unused and y has only the 1417 // low bit set 1418 if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 1)) { 1419 APInt KnownZero, KnownOne; 1420 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), 1421 VT.getSizeInBits() - 1); 1422 DAG.ComputeMaskedBits(N2, KnownZero, KnownOne); 1423 if ((KnownZero & Mask) == Mask) { 1424 SDValue Carry = DAG.getConstant(0, VT); 1425 SDValue Result = DAG.getNode(ISD::ADD, dl, VT, N0, N2); 1426 SDValue Ops[] = { Result, Carry }; 1427 return DAG.getMergeValues(Ops, 2, dl); 1428 } 1429 } 1430 } 1431 break; 1432 case XCoreISD::LSUB: { 1433 SDValue N0 = N->getOperand(0); 1434 SDValue N1 = N->getOperand(1); 1435 SDValue N2 = N->getOperand(2); 1436 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 1437 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 1438 EVT VT = N0.getValueType(); 1439 1440 // fold (lsub 0, 0, x) -> x, -x iff x has only the low bit set 1441 if (N0C && N0C->isNullValue() && N1C && N1C->isNullValue()) { 1442 APInt KnownZero, KnownOne; 1443 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), 1444 VT.getSizeInBits() - 1); 1445 DAG.ComputeMaskedBits(N2, KnownZero, KnownOne); 1446 if ((KnownZero & Mask) == Mask) { 1447 SDValue Borrow = N2; 1448 SDValue Result = DAG.getNode(ISD::SUB, dl, VT, 1449 DAG.getConstant(0, VT), N2); 1450 SDValue Ops[] = { Result, Borrow }; 1451 return DAG.getMergeValues(Ops, 2, dl); 1452 } 1453 } 1454 1455 // fold (lsub x, 0, y) -> 0, sub x, y iff borrow is unused and y has only the 1456 // low bit set 1457 if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 1)) { 1458 APInt KnownZero, KnownOne; 1459 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), 1460 VT.getSizeInBits() - 1); 1461 DAG.ComputeMaskedBits(N2, KnownZero, KnownOne); 1462 if ((KnownZero & Mask) == Mask) { 1463 SDValue Borrow = DAG.getConstant(0, VT); 1464 SDValue Result = DAG.getNode(ISD::SUB, dl, VT, N0, N2); 1465 SDValue Ops[] = { Result, Borrow }; 1466 return DAG.getMergeValues(Ops, 2, dl); 1467 } 1468 } 1469 } 1470 break; 1471 case XCoreISD::LMUL: { 1472 SDValue N0 = N->getOperand(0); 1473 SDValue N1 = N->getOperand(1); 1474 SDValue N2 = N->getOperand(2); 1475 SDValue N3 = N->getOperand(3); 1476 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 1477 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 1478 EVT VT = N0.getValueType(); 1479 // Canonicalize multiplicative constant to RHS. If both multiplicative 1480 // operands are constant canonicalize smallest to RHS. 1481 if ((N0C && !N1C) || 1482 (N0C && N1C && N0C->getZExtValue() < N1C->getZExtValue())) 1483 return DAG.getNode(XCoreISD::LMUL, dl, DAG.getVTList(VT, VT), 1484 N1, N0, N2, N3); 1485 1486 // lmul(x, 0, a, b) 1487 if (N1C && N1C->isNullValue()) { 1488 // If the high result is unused fold to add(a, b) 1489 if (N->hasNUsesOfValue(0, 0)) { 1490 SDValue Lo = DAG.getNode(ISD::ADD, dl, VT, N2, N3); 1491 SDValue Ops[] = { Lo, Lo }; 1492 return DAG.getMergeValues(Ops, 2, dl); 1493 } 1494 // Otherwise fold to ladd(a, b, 0) 1495 SDValue Result = 1496 DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N2, N3, N1); 1497 SDValue Carry(Result.getNode(), 1); 1498 SDValue Ops[] = { Carry, Result }; 1499 return DAG.getMergeValues(Ops, 2, dl); 1500 } 1501 } 1502 break; 1503 case ISD::ADD: { 1504 // Fold 32 bit expressions such as add(add(mul(x,y),a),b) -> 1505 // lmul(x, y, a, b). The high result of lmul will be ignored. 1506 // This is only profitable if the intermediate results are unused 1507 // elsewhere. 1508 SDValue Mul0, Mul1, Addend0, Addend1; 1509 if (N->getValueType(0) == MVT::i32 && 1510 isADDADDMUL(SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, true)) { 1511 SDValue Ignored = DAG.getNode(XCoreISD::LMUL, dl, 1512 DAG.getVTList(MVT::i32, MVT::i32), Mul0, 1513 Mul1, Addend0, Addend1); 1514 SDValue Result(Ignored.getNode(), 1); 1515 return Result; 1516 } 1517 APInt HighMask = APInt::getHighBitsSet(64, 32); 1518 // Fold 64 bit expression such as add(add(mul(x,y),a),b) -> 1519 // lmul(x, y, a, b) if all operands are zero-extended. We do this 1520 // before type legalization as it is messy to match the operands after 1521 // that. 1522 if (N->getValueType(0) == MVT::i64 && 1523 isADDADDMUL(SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, false) && 1524 DAG.MaskedValueIsZero(Mul0, HighMask) && 1525 DAG.MaskedValueIsZero(Mul1, HighMask) && 1526 DAG.MaskedValueIsZero(Addend0, HighMask) && 1527 DAG.MaskedValueIsZero(Addend1, HighMask)) { 1528 SDValue Mul0L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 1529 Mul0, DAG.getConstant(0, MVT::i32)); 1530 SDValue Mul1L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 1531 Mul1, DAG.getConstant(0, MVT::i32)); 1532 SDValue Addend0L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 1533 Addend0, DAG.getConstant(0, MVT::i32)); 1534 SDValue Addend1L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 1535 Addend1, DAG.getConstant(0, MVT::i32)); 1536 SDValue Hi = DAG.getNode(XCoreISD::LMUL, dl, 1537 DAG.getVTList(MVT::i32, MVT::i32), Mul0L, Mul1L, 1538 Addend0L, Addend1L); 1539 SDValue Lo(Hi.getNode(), 1); 1540 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 1541 } 1542 } 1543 break; 1544 case ISD::STORE: { 1545 // Replace unaligned store of unaligned load with memmove. 1546 StoreSDNode *ST = cast<StoreSDNode>(N); 1547 if (!DCI.isBeforeLegalize() || 1548 allowsUnalignedMemoryAccesses(ST->getMemoryVT()) || 1549 ST->isVolatile() || ST->isIndexed()) { 1550 break; 1551 } 1552 SDValue Chain = ST->getChain(); 1553 1554 unsigned StoreBits = ST->getMemoryVT().getStoreSizeInBits(); 1555 if (StoreBits % 8) { 1556 break; 1557 } 1558 unsigned ABIAlignment = getDataLayout()->getABITypeAlignment( 1559 ST->getMemoryVT().getTypeForEVT(*DCI.DAG.getContext())); 1560 unsigned Alignment = ST->getAlignment(); 1561 if (Alignment >= ABIAlignment) { 1562 break; 1563 } 1564 1565 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(ST->getValue())) { 1566 if (LD->hasNUsesOfValue(1, 0) && ST->getMemoryVT() == LD->getMemoryVT() && 1567 LD->getAlignment() == Alignment && 1568 !LD->isVolatile() && !LD->isIndexed() && 1569 Chain.reachesChainWithoutSideEffects(SDValue(LD, 1))) { 1570 return DAG.getMemmove(Chain, dl, ST->getBasePtr(), 1571 LD->getBasePtr(), 1572 DAG.getConstant(StoreBits/8, MVT::i32), 1573 Alignment, false, ST->getPointerInfo(), 1574 LD->getPointerInfo()); 1575 } 1576 } 1577 break; 1578 } 1579 } 1580 return SDValue(); 1581} 1582 1583void XCoreTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, 1584 APInt &KnownZero, 1585 APInt &KnownOne, 1586 const SelectionDAG &DAG, 1587 unsigned Depth) const { 1588 KnownZero = KnownOne = APInt(KnownZero.getBitWidth(), 0); 1589 switch (Op.getOpcode()) { 1590 default: break; 1591 case XCoreISD::LADD: 1592 case XCoreISD::LSUB: 1593 if (Op.getResNo() == 1) { 1594 // Top bits of carry / borrow are clear. 1595 KnownZero = APInt::getHighBitsSet(KnownZero.getBitWidth(), 1596 KnownZero.getBitWidth() - 1); 1597 } 1598 break; 1599 } 1600} 1601 1602//===----------------------------------------------------------------------===// 1603// Addressing mode description hooks 1604//===----------------------------------------------------------------------===// 1605 1606static inline bool isImmUs(int64_t val) 1607{ 1608 return (val >= 0 && val <= 11); 1609} 1610 1611static inline bool isImmUs2(int64_t val) 1612{ 1613 return (val%2 == 0 && isImmUs(val/2)); 1614} 1615 1616static inline bool isImmUs4(int64_t val) 1617{ 1618 return (val%4 == 0 && isImmUs(val/4)); 1619} 1620 1621/// isLegalAddressingMode - Return true if the addressing mode represented 1622/// by AM is legal for this target, for a load/store of the specified type. 1623bool 1624XCoreTargetLowering::isLegalAddressingMode(const AddrMode &AM, 1625 Type *Ty) const { 1626 if (Ty->getTypeID() == Type::VoidTyID) 1627 return AM.Scale == 0 && isImmUs(AM.BaseOffs) && isImmUs4(AM.BaseOffs); 1628 1629 const DataLayout *TD = TM.getDataLayout(); 1630 unsigned Size = TD->getTypeAllocSize(Ty); 1631 if (AM.BaseGV) { 1632 return Size >= 4 && !AM.HasBaseReg && AM.Scale == 0 && 1633 AM.BaseOffs%4 == 0; 1634 } 1635 1636 switch (Size) { 1637 case 1: 1638 // reg + imm 1639 if (AM.Scale == 0) { 1640 return isImmUs(AM.BaseOffs); 1641 } 1642 // reg + reg 1643 return AM.Scale == 1 && AM.BaseOffs == 0; 1644 case 2: 1645 case 3: 1646 // reg + imm 1647 if (AM.Scale == 0) { 1648 return isImmUs2(AM.BaseOffs); 1649 } 1650 // reg + reg<<1 1651 return AM.Scale == 2 && AM.BaseOffs == 0; 1652 default: 1653 // reg + imm 1654 if (AM.Scale == 0) { 1655 return isImmUs4(AM.BaseOffs); 1656 } 1657 // reg + reg<<2 1658 return AM.Scale == 4 && AM.BaseOffs == 0; 1659 } 1660} 1661 1662//===----------------------------------------------------------------------===// 1663// XCore Inline Assembly Support 1664//===----------------------------------------------------------------------===// 1665 1666std::pair<unsigned, const TargetRegisterClass*> 1667XCoreTargetLowering:: 1668getRegForInlineAsmConstraint(const std::string &Constraint, 1669 MVT VT) const { 1670 if (Constraint.size() == 1) { 1671 switch (Constraint[0]) { 1672 default : break; 1673 case 'r': 1674 return std::make_pair(0U, &XCore::GRRegsRegClass); 1675 } 1676 } 1677 // Use the default implementation in TargetLowering to convert the register 1678 // constraint into a member of a register class. 1679 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 1680} 1681