XCoreISelLowering.cpp revision 3574eca1b02600bac4e625297f4ecf745f4c4f32
1//===-- XCoreISelLowering.cpp - XCore DAG Lowering Implementation ---------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the XCoreTargetLowering class. 11// 12//===----------------------------------------------------------------------===// 13 14#define DEBUG_TYPE "xcore-lower" 15 16#include "XCoreISelLowering.h" 17#include "XCoreMachineFunctionInfo.h" 18#include "XCore.h" 19#include "XCoreTargetObjectFile.h" 20#include "XCoreTargetMachine.h" 21#include "XCoreSubtarget.h" 22#include "llvm/DerivedTypes.h" 23#include "llvm/Function.h" 24#include "llvm/Intrinsics.h" 25#include "llvm/CallingConv.h" 26#include "llvm/GlobalVariable.h" 27#include "llvm/GlobalAlias.h" 28#include "llvm/CodeGen/CallingConvLower.h" 29#include "llvm/CodeGen/MachineFrameInfo.h" 30#include "llvm/CodeGen/MachineFunction.h" 31#include "llvm/CodeGen/MachineInstrBuilder.h" 32#include "llvm/CodeGen/MachineJumpTableInfo.h" 33#include "llvm/CodeGen/MachineRegisterInfo.h" 34#include "llvm/CodeGen/SelectionDAGISel.h" 35#include "llvm/CodeGen/ValueTypes.h" 36#include "llvm/Support/Debug.h" 37#include "llvm/Support/ErrorHandling.h" 38#include "llvm/Support/raw_ostream.h" 39using namespace llvm; 40 41const char *XCoreTargetLowering:: 42getTargetNodeName(unsigned Opcode) const 43{ 44 switch (Opcode) 45 { 46 case XCoreISD::BL : return "XCoreISD::BL"; 47 case XCoreISD::PCRelativeWrapper : return "XCoreISD::PCRelativeWrapper"; 48 case XCoreISD::DPRelativeWrapper : return "XCoreISD::DPRelativeWrapper"; 49 case XCoreISD::CPRelativeWrapper : return "XCoreISD::CPRelativeWrapper"; 50 case XCoreISD::STWSP : return "XCoreISD::STWSP"; 51 case XCoreISD::RETSP : return "XCoreISD::RETSP"; 52 case XCoreISD::LADD : return "XCoreISD::LADD"; 53 case XCoreISD::LSUB : return "XCoreISD::LSUB"; 54 case XCoreISD::LMUL : return "XCoreISD::LMUL"; 55 case XCoreISD::MACCU : return "XCoreISD::MACCU"; 56 case XCoreISD::MACCS : return "XCoreISD::MACCS"; 57 case XCoreISD::BR_JT : return "XCoreISD::BR_JT"; 58 case XCoreISD::BR_JT32 : return "XCoreISD::BR_JT32"; 59 default : return NULL; 60 } 61} 62 63XCoreTargetLowering::XCoreTargetLowering(XCoreTargetMachine &XTM) 64 : TargetLowering(XTM, new XCoreTargetObjectFile()), 65 TM(XTM), 66 Subtarget(*XTM.getSubtargetImpl()) { 67 68 // Set up the register classes. 69 addRegisterClass(MVT::i32, &XCore::GRRegsRegClass); 70 71 // Compute derived properties from the register classes 72 computeRegisterProperties(); 73 74 // Division is expensive 75 setIntDivIsCheap(false); 76 77 setStackPointerRegisterToSaveRestore(XCore::SP); 78 79 setSchedulingPreference(Sched::RegPressure); 80 81 // Use i32 for setcc operations results (slt, sgt, ...). 82 setBooleanContents(ZeroOrOneBooleanContent); 83 setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct? 84 85 // XCore does not have the NodeTypes below. 86 setOperationAction(ISD::BR_CC, MVT::Other, Expand); 87 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); 88 setOperationAction(ISD::ADDC, MVT::i32, Expand); 89 setOperationAction(ISD::ADDE, MVT::i32, Expand); 90 setOperationAction(ISD::SUBC, MVT::i32, Expand); 91 setOperationAction(ISD::SUBE, MVT::i32, Expand); 92 93 // Stop the combiner recombining select and set_cc 94 setOperationAction(ISD::SELECT_CC, MVT::Other, Expand); 95 96 // 64bit 97 setOperationAction(ISD::ADD, MVT::i64, Custom); 98 setOperationAction(ISD::SUB, MVT::i64, Custom); 99 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Custom); 100 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Custom); 101 setOperationAction(ISD::MULHS, MVT::i32, Expand); 102 setOperationAction(ISD::MULHU, MVT::i32, Expand); 103 setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand); 104 setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand); 105 setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand); 106 107 // Bit Manipulation 108 setOperationAction(ISD::CTPOP, MVT::i32, Expand); 109 setOperationAction(ISD::ROTL , MVT::i32, Expand); 110 setOperationAction(ISD::ROTR , MVT::i32, Expand); 111 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand); 112 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand); 113 114 setOperationAction(ISD::TRAP, MVT::Other, Legal); 115 116 // Jump tables. 117 setOperationAction(ISD::BR_JT, MVT::Other, Custom); 118 119 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 120 setOperationAction(ISD::BlockAddress, MVT::i32 , Custom); 121 122 // Thread Local Storage 123 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 124 125 // Conversion of i64 -> double produces constantpool nodes 126 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 127 128 // Loads 129 setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote); 130 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote); 131 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 132 133 setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Expand); 134 setLoadExtAction(ISD::ZEXTLOAD, MVT::i16, Expand); 135 136 // Custom expand misaligned loads / stores. 137 setOperationAction(ISD::LOAD, MVT::i32, Custom); 138 setOperationAction(ISD::STORE, MVT::i32, Custom); 139 140 // Varargs 141 setOperationAction(ISD::VAEND, MVT::Other, Expand); 142 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 143 setOperationAction(ISD::VAARG, MVT::Other, Custom); 144 setOperationAction(ISD::VASTART, MVT::Other, Custom); 145 146 // Dynamic stack 147 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 148 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 149 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); 150 151 // TRAMPOLINE is custom lowered. 152 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); 153 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); 154 155 maxStoresPerMemset = maxStoresPerMemsetOptSize = 4; 156 maxStoresPerMemmove = maxStoresPerMemmoveOptSize 157 = maxStoresPerMemcpy = maxStoresPerMemcpyOptSize = 2; 158 159 // We have target-specific dag combine patterns for the following nodes: 160 setTargetDAGCombine(ISD::STORE); 161 setTargetDAGCombine(ISD::ADD); 162 163 setMinFunctionAlignment(1); 164} 165 166SDValue XCoreTargetLowering:: 167LowerOperation(SDValue Op, SelectionDAG &DAG) const { 168 switch (Op.getOpcode()) 169 { 170 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 171 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 172 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 173 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 174 case ISD::BR_JT: return LowerBR_JT(Op, DAG); 175 case ISD::LOAD: return LowerLOAD(Op, DAG); 176 case ISD::STORE: return LowerSTORE(Op, DAG); 177 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 178 case ISD::VAARG: return LowerVAARG(Op, DAG); 179 case ISD::VASTART: return LowerVASTART(Op, DAG); 180 case ISD::SMUL_LOHI: return LowerSMUL_LOHI(Op, DAG); 181 case ISD::UMUL_LOHI: return LowerUMUL_LOHI(Op, DAG); 182 // FIXME: Remove these when LegalizeDAGTypes lands. 183 case ISD::ADD: 184 case ISD::SUB: return ExpandADDSUB(Op.getNode(), DAG); 185 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 186 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG); 187 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG); 188 default: 189 llvm_unreachable("unimplemented operand"); 190 } 191} 192 193/// ReplaceNodeResults - Replace the results of node with an illegal result 194/// type with new values built out of custom code. 195void XCoreTargetLowering::ReplaceNodeResults(SDNode *N, 196 SmallVectorImpl<SDValue>&Results, 197 SelectionDAG &DAG) const { 198 switch (N->getOpcode()) { 199 default: 200 llvm_unreachable("Don't know how to custom expand this!"); 201 case ISD::ADD: 202 case ISD::SUB: 203 Results.push_back(ExpandADDSUB(N, DAG)); 204 return; 205 } 206} 207 208//===----------------------------------------------------------------------===// 209// Misc Lower Operation implementation 210//===----------------------------------------------------------------------===// 211 212SDValue XCoreTargetLowering:: 213LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const 214{ 215 DebugLoc dl = Op.getDebugLoc(); 216 SDValue Cond = DAG.getNode(ISD::SETCC, dl, MVT::i32, Op.getOperand(2), 217 Op.getOperand(3), Op.getOperand(4)); 218 return DAG.getNode(ISD::SELECT, dl, MVT::i32, Cond, Op.getOperand(0), 219 Op.getOperand(1)); 220} 221 222SDValue XCoreTargetLowering:: 223getGlobalAddressWrapper(SDValue GA, const GlobalValue *GV, 224 SelectionDAG &DAG) const 225{ 226 // FIXME there is no actual debug info here 227 DebugLoc dl = GA.getDebugLoc(); 228 if (isa<Function>(GV)) { 229 return DAG.getNode(XCoreISD::PCRelativeWrapper, dl, MVT::i32, GA); 230 } 231 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV); 232 if (!GVar) { 233 // If GV is an alias then use the aliasee to determine constness 234 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV)) 235 GVar = dyn_cast_or_null<GlobalVariable>(GA->resolveAliasedGlobal()); 236 } 237 bool isConst = GVar && GVar->isConstant(); 238 if (isConst) { 239 return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, GA); 240 } 241 return DAG.getNode(XCoreISD::DPRelativeWrapper, dl, MVT::i32, GA); 242} 243 244SDValue XCoreTargetLowering:: 245LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const 246{ 247 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 248 SDValue GA = DAG.getTargetGlobalAddress(GV, Op.getDebugLoc(), MVT::i32); 249 return getGlobalAddressWrapper(GA, GV, DAG); 250} 251 252static inline SDValue BuildGetId(SelectionDAG &DAG, DebugLoc dl) { 253 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::i32, 254 DAG.getConstant(Intrinsic::xcore_getid, MVT::i32)); 255} 256 257static inline bool isZeroLengthArray(Type *Ty) { 258 ArrayType *AT = dyn_cast_or_null<ArrayType>(Ty); 259 return AT && (AT->getNumElements() == 0); 260} 261 262SDValue XCoreTargetLowering:: 263LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const 264{ 265 // FIXME there isn't really debug info here 266 DebugLoc dl = Op.getDebugLoc(); 267 // transform to label + getid() * size 268 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 269 SDValue GA = DAG.getTargetGlobalAddress(GV, dl, MVT::i32); 270 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV); 271 if (!GVar) { 272 // If GV is an alias then use the aliasee to determine size 273 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV)) 274 GVar = dyn_cast_or_null<GlobalVariable>(GA->resolveAliasedGlobal()); 275 } 276 if (!GVar) { 277 llvm_unreachable("Thread local object not a GlobalVariable?"); 278 } 279 Type *Ty = cast<PointerType>(GV->getType())->getElementType(); 280 if (!Ty->isSized() || isZeroLengthArray(Ty)) { 281#ifndef NDEBUG 282 errs() << "Size of thread local object " << GVar->getName() 283 << " is unknown\n"; 284#endif 285 llvm_unreachable(0); 286 } 287 SDValue base = getGlobalAddressWrapper(GA, GV, DAG); 288 const DataLayout *TD = TM.getDataLayout(); 289 unsigned Size = TD->getTypeAllocSize(Ty); 290 SDValue offset = DAG.getNode(ISD::MUL, dl, MVT::i32, BuildGetId(DAG, dl), 291 DAG.getConstant(Size, MVT::i32)); 292 return DAG.getNode(ISD::ADD, dl, MVT::i32, base, offset); 293} 294 295SDValue XCoreTargetLowering:: 296LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const 297{ 298 DebugLoc DL = Op.getDebugLoc(); 299 300 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 301 SDValue Result = DAG.getTargetBlockAddress(BA, getPointerTy()); 302 303 return DAG.getNode(XCoreISD::PCRelativeWrapper, DL, getPointerTy(), Result); 304} 305 306SDValue XCoreTargetLowering:: 307LowerConstantPool(SDValue Op, SelectionDAG &DAG) const 308{ 309 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 310 // FIXME there isn't really debug info here 311 DebugLoc dl = CP->getDebugLoc(); 312 EVT PtrVT = Op.getValueType(); 313 SDValue Res; 314 if (CP->isMachineConstantPoolEntry()) { 315 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, 316 CP->getAlignment()); 317 } else { 318 Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, 319 CP->getAlignment()); 320 } 321 return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, Res); 322} 323 324unsigned XCoreTargetLowering::getJumpTableEncoding() const { 325 return MachineJumpTableInfo::EK_Inline; 326} 327 328SDValue XCoreTargetLowering:: 329LowerBR_JT(SDValue Op, SelectionDAG &DAG) const 330{ 331 SDValue Chain = Op.getOperand(0); 332 SDValue Table = Op.getOperand(1); 333 SDValue Index = Op.getOperand(2); 334 DebugLoc dl = Op.getDebugLoc(); 335 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table); 336 unsigned JTI = JT->getIndex(); 337 MachineFunction &MF = DAG.getMachineFunction(); 338 const MachineJumpTableInfo *MJTI = MF.getJumpTableInfo(); 339 SDValue TargetJT = DAG.getTargetJumpTable(JT->getIndex(), MVT::i32); 340 341 unsigned NumEntries = MJTI->getJumpTables()[JTI].MBBs.size(); 342 if (NumEntries <= 32) { 343 return DAG.getNode(XCoreISD::BR_JT, dl, MVT::Other, Chain, TargetJT, Index); 344 } 345 assert((NumEntries >> 31) == 0); 346 SDValue ScaledIndex = DAG.getNode(ISD::SHL, dl, MVT::i32, Index, 347 DAG.getConstant(1, MVT::i32)); 348 return DAG.getNode(XCoreISD::BR_JT32, dl, MVT::Other, Chain, TargetJT, 349 ScaledIndex); 350} 351 352static bool 353IsWordAlignedBasePlusConstantOffset(SDValue Addr, SDValue &AlignedBase, 354 int64_t &Offset) 355{ 356 if (Addr.getOpcode() != ISD::ADD) { 357 return false; 358 } 359 ConstantSDNode *CN = 0; 360 if (!(CN = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))) { 361 return false; 362 } 363 int64_t off = CN->getSExtValue(); 364 const SDValue &Base = Addr.getOperand(0); 365 const SDValue *Root = &Base; 366 if (Base.getOpcode() == ISD::ADD && 367 Base.getOperand(1).getOpcode() == ISD::SHL) { 368 ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Base.getOperand(1) 369 .getOperand(1)); 370 if (CN && (CN->getSExtValue() >= 2)) { 371 Root = &Base.getOperand(0); 372 } 373 } 374 if (isa<FrameIndexSDNode>(*Root)) { 375 // All frame indicies are word aligned 376 AlignedBase = Base; 377 Offset = off; 378 return true; 379 } 380 if (Root->getOpcode() == XCoreISD::DPRelativeWrapper || 381 Root->getOpcode() == XCoreISD::CPRelativeWrapper) { 382 // All dp / cp relative addresses are word aligned 383 AlignedBase = Base; 384 Offset = off; 385 return true; 386 } 387 // Check for an aligned global variable. 388 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(*Root)) { 389 const GlobalValue *GV = GA->getGlobal(); 390 if (GA->getOffset() == 0 && GV->getAlignment() >= 4) { 391 AlignedBase = Base; 392 Offset = off; 393 return true; 394 } 395 } 396 return false; 397} 398 399SDValue XCoreTargetLowering:: 400LowerLOAD(SDValue Op, SelectionDAG &DAG) const { 401 LoadSDNode *LD = cast<LoadSDNode>(Op); 402 assert(LD->getExtensionType() == ISD::NON_EXTLOAD && 403 "Unexpected extension type"); 404 assert(LD->getMemoryVT() == MVT::i32 && "Unexpected load EVT"); 405 if (allowsUnalignedMemoryAccesses(LD->getMemoryVT())) 406 return SDValue(); 407 408 unsigned ABIAlignment = getDataLayout()-> 409 getABITypeAlignment(LD->getMemoryVT().getTypeForEVT(*DAG.getContext())); 410 // Leave aligned load alone. 411 if (LD->getAlignment() >= ABIAlignment) 412 return SDValue(); 413 414 SDValue Chain = LD->getChain(); 415 SDValue BasePtr = LD->getBasePtr(); 416 DebugLoc DL = Op.getDebugLoc(); 417 418 SDValue Base; 419 int64_t Offset; 420 if (!LD->isVolatile() && 421 IsWordAlignedBasePlusConstantOffset(BasePtr, Base, Offset)) { 422 if (Offset % 4 == 0) { 423 // We've managed to infer better alignment information than the load 424 // already has. Use an aligned load. 425 // 426 return DAG.getLoad(getPointerTy(), DL, Chain, BasePtr, 427 MachinePointerInfo(), 428 false, false, false, 0); 429 } 430 // Lower to 431 // ldw low, base[offset >> 2] 432 // ldw high, base[(offset >> 2) + 1] 433 // shr low_shifted, low, (offset & 0x3) * 8 434 // shl high_shifted, high, 32 - (offset & 0x3) * 8 435 // or result, low_shifted, high_shifted 436 SDValue LowOffset = DAG.getConstant(Offset & ~0x3, MVT::i32); 437 SDValue HighOffset = DAG.getConstant((Offset & ~0x3) + 4, MVT::i32); 438 SDValue LowShift = DAG.getConstant((Offset & 0x3) * 8, MVT::i32); 439 SDValue HighShift = DAG.getConstant(32 - (Offset & 0x3) * 8, MVT::i32); 440 441 SDValue LowAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base, LowOffset); 442 SDValue HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base, HighOffset); 443 444 SDValue Low = DAG.getLoad(getPointerTy(), DL, Chain, 445 LowAddr, MachinePointerInfo(), 446 false, false, false, 0); 447 SDValue High = DAG.getLoad(getPointerTy(), DL, Chain, 448 HighAddr, MachinePointerInfo(), 449 false, false, false, 0); 450 SDValue LowShifted = DAG.getNode(ISD::SRL, DL, MVT::i32, Low, LowShift); 451 SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High, HighShift); 452 SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, LowShifted, HighShifted); 453 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1), 454 High.getValue(1)); 455 SDValue Ops[] = { Result, Chain }; 456 return DAG.getMergeValues(Ops, 2, DL); 457 } 458 459 if (LD->getAlignment() == 2) { 460 SDValue Low = DAG.getExtLoad(ISD::ZEXTLOAD, DL, MVT::i32, Chain, 461 BasePtr, LD->getPointerInfo(), MVT::i16, 462 LD->isVolatile(), LD->isNonTemporal(), 2); 463 SDValue HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, 464 DAG.getConstant(2, MVT::i32)); 465 SDValue High = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain, 466 HighAddr, 467 LD->getPointerInfo().getWithOffset(2), 468 MVT::i16, LD->isVolatile(), 469 LD->isNonTemporal(), 2); 470 SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High, 471 DAG.getConstant(16, MVT::i32)); 472 SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, Low, HighShifted); 473 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1), 474 High.getValue(1)); 475 SDValue Ops[] = { Result, Chain }; 476 return DAG.getMergeValues(Ops, 2, DL); 477 } 478 479 // Lower to a call to __misaligned_load(BasePtr). 480 Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext()); 481 TargetLowering::ArgListTy Args; 482 TargetLowering::ArgListEntry Entry; 483 484 Entry.Ty = IntPtrTy; 485 Entry.Node = BasePtr; 486 Args.push_back(Entry); 487 488 TargetLowering::CallLoweringInfo CLI(Chain, IntPtrTy, false, false, 489 false, false, 0, CallingConv::C, /*isTailCall=*/false, 490 /*doesNotRet=*/false, /*isReturnValueUsed=*/true, 491 DAG.getExternalSymbol("__misaligned_load", getPointerTy()), 492 Args, DAG, DL); 493 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 494 495 SDValue Ops[] = 496 { CallResult.first, CallResult.second }; 497 498 return DAG.getMergeValues(Ops, 2, DL); 499} 500 501SDValue XCoreTargetLowering:: 502LowerSTORE(SDValue Op, SelectionDAG &DAG) const 503{ 504 StoreSDNode *ST = cast<StoreSDNode>(Op); 505 assert(!ST->isTruncatingStore() && "Unexpected store type"); 506 assert(ST->getMemoryVT() == MVT::i32 && "Unexpected store EVT"); 507 if (allowsUnalignedMemoryAccesses(ST->getMemoryVT())) { 508 return SDValue(); 509 } 510 unsigned ABIAlignment = getDataLayout()-> 511 getABITypeAlignment(ST->getMemoryVT().getTypeForEVT(*DAG.getContext())); 512 // Leave aligned store alone. 513 if (ST->getAlignment() >= ABIAlignment) { 514 return SDValue(); 515 } 516 SDValue Chain = ST->getChain(); 517 SDValue BasePtr = ST->getBasePtr(); 518 SDValue Value = ST->getValue(); 519 DebugLoc dl = Op.getDebugLoc(); 520 521 if (ST->getAlignment() == 2) { 522 SDValue Low = Value; 523 SDValue High = DAG.getNode(ISD::SRL, dl, MVT::i32, Value, 524 DAG.getConstant(16, MVT::i32)); 525 SDValue StoreLow = DAG.getTruncStore(Chain, dl, Low, BasePtr, 526 ST->getPointerInfo(), MVT::i16, 527 ST->isVolatile(), ST->isNonTemporal(), 528 2); 529 SDValue HighAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, BasePtr, 530 DAG.getConstant(2, MVT::i32)); 531 SDValue StoreHigh = DAG.getTruncStore(Chain, dl, High, HighAddr, 532 ST->getPointerInfo().getWithOffset(2), 533 MVT::i16, ST->isVolatile(), 534 ST->isNonTemporal(), 2); 535 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, StoreLow, StoreHigh); 536 } 537 538 // Lower to a call to __misaligned_store(BasePtr, Value). 539 Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext()); 540 TargetLowering::ArgListTy Args; 541 TargetLowering::ArgListEntry Entry; 542 543 Entry.Ty = IntPtrTy; 544 Entry.Node = BasePtr; 545 Args.push_back(Entry); 546 547 Entry.Node = Value; 548 Args.push_back(Entry); 549 550 TargetLowering::CallLoweringInfo CLI(Chain, 551 Type::getVoidTy(*DAG.getContext()), false, false, 552 false, false, 0, CallingConv::C, /*isTailCall=*/false, 553 /*doesNotRet=*/false, /*isReturnValueUsed=*/true, 554 DAG.getExternalSymbol("__misaligned_store", getPointerTy()), 555 Args, DAG, dl); 556 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 557 558 return CallResult.second; 559} 560 561SDValue XCoreTargetLowering:: 562LowerSMUL_LOHI(SDValue Op, SelectionDAG &DAG) const 563{ 564 assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::SMUL_LOHI && 565 "Unexpected operand to lower!"); 566 DebugLoc dl = Op.getDebugLoc(); 567 SDValue LHS = Op.getOperand(0); 568 SDValue RHS = Op.getOperand(1); 569 SDValue Zero = DAG.getConstant(0, MVT::i32); 570 SDValue Hi = DAG.getNode(XCoreISD::MACCS, dl, 571 DAG.getVTList(MVT::i32, MVT::i32), Zero, Zero, 572 LHS, RHS); 573 SDValue Lo(Hi.getNode(), 1); 574 SDValue Ops[] = { Lo, Hi }; 575 return DAG.getMergeValues(Ops, 2, dl); 576} 577 578SDValue XCoreTargetLowering:: 579LowerUMUL_LOHI(SDValue Op, SelectionDAG &DAG) const 580{ 581 assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::UMUL_LOHI && 582 "Unexpected operand to lower!"); 583 DebugLoc dl = Op.getDebugLoc(); 584 SDValue LHS = Op.getOperand(0); 585 SDValue RHS = Op.getOperand(1); 586 SDValue Zero = DAG.getConstant(0, MVT::i32); 587 SDValue Hi = DAG.getNode(XCoreISD::LMUL, dl, 588 DAG.getVTList(MVT::i32, MVT::i32), LHS, RHS, 589 Zero, Zero); 590 SDValue Lo(Hi.getNode(), 1); 591 SDValue Ops[] = { Lo, Hi }; 592 return DAG.getMergeValues(Ops, 2, dl); 593} 594 595/// isADDADDMUL - Return whether Op is in a form that is equivalent to 596/// add(add(mul(x,y),a),b). If requireIntermediatesHaveOneUse is true then 597/// each intermediate result in the calculation must also have a single use. 598/// If the Op is in the correct form the constituent parts are written to Mul0, 599/// Mul1, Addend0 and Addend1. 600static bool 601isADDADDMUL(SDValue Op, SDValue &Mul0, SDValue &Mul1, SDValue &Addend0, 602 SDValue &Addend1, bool requireIntermediatesHaveOneUse) 603{ 604 if (Op.getOpcode() != ISD::ADD) 605 return false; 606 SDValue N0 = Op.getOperand(0); 607 SDValue N1 = Op.getOperand(1); 608 SDValue AddOp; 609 SDValue OtherOp; 610 if (N0.getOpcode() == ISD::ADD) { 611 AddOp = N0; 612 OtherOp = N1; 613 } else if (N1.getOpcode() == ISD::ADD) { 614 AddOp = N1; 615 OtherOp = N0; 616 } else { 617 return false; 618 } 619 if (requireIntermediatesHaveOneUse && !AddOp.hasOneUse()) 620 return false; 621 if (OtherOp.getOpcode() == ISD::MUL) { 622 // add(add(a,b),mul(x,y)) 623 if (requireIntermediatesHaveOneUse && !OtherOp.hasOneUse()) 624 return false; 625 Mul0 = OtherOp.getOperand(0); 626 Mul1 = OtherOp.getOperand(1); 627 Addend0 = AddOp.getOperand(0); 628 Addend1 = AddOp.getOperand(1); 629 return true; 630 } 631 if (AddOp.getOperand(0).getOpcode() == ISD::MUL) { 632 // add(add(mul(x,y),a),b) 633 if (requireIntermediatesHaveOneUse && !AddOp.getOperand(0).hasOneUse()) 634 return false; 635 Mul0 = AddOp.getOperand(0).getOperand(0); 636 Mul1 = AddOp.getOperand(0).getOperand(1); 637 Addend0 = AddOp.getOperand(1); 638 Addend1 = OtherOp; 639 return true; 640 } 641 if (AddOp.getOperand(1).getOpcode() == ISD::MUL) { 642 // add(add(a,mul(x,y)),b) 643 if (requireIntermediatesHaveOneUse && !AddOp.getOperand(1).hasOneUse()) 644 return false; 645 Mul0 = AddOp.getOperand(1).getOperand(0); 646 Mul1 = AddOp.getOperand(1).getOperand(1); 647 Addend0 = AddOp.getOperand(0); 648 Addend1 = OtherOp; 649 return true; 650 } 651 return false; 652} 653 654SDValue XCoreTargetLowering:: 655TryExpandADDWithMul(SDNode *N, SelectionDAG &DAG) const 656{ 657 SDValue Mul; 658 SDValue Other; 659 if (N->getOperand(0).getOpcode() == ISD::MUL) { 660 Mul = N->getOperand(0); 661 Other = N->getOperand(1); 662 } else if (N->getOperand(1).getOpcode() == ISD::MUL) { 663 Mul = N->getOperand(1); 664 Other = N->getOperand(0); 665 } else { 666 return SDValue(); 667 } 668 DebugLoc dl = N->getDebugLoc(); 669 SDValue LL, RL, AddendL, AddendH; 670 LL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 671 Mul.getOperand(0), DAG.getConstant(0, MVT::i32)); 672 RL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 673 Mul.getOperand(1), DAG.getConstant(0, MVT::i32)); 674 AddendL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 675 Other, DAG.getConstant(0, MVT::i32)); 676 AddendH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 677 Other, DAG.getConstant(1, MVT::i32)); 678 APInt HighMask = APInt::getHighBitsSet(64, 32); 679 unsigned LHSSB = DAG.ComputeNumSignBits(Mul.getOperand(0)); 680 unsigned RHSSB = DAG.ComputeNumSignBits(Mul.getOperand(1)); 681 if (DAG.MaskedValueIsZero(Mul.getOperand(0), HighMask) && 682 DAG.MaskedValueIsZero(Mul.getOperand(1), HighMask)) { 683 // The inputs are both zero-extended. 684 SDValue Hi = DAG.getNode(XCoreISD::MACCU, dl, 685 DAG.getVTList(MVT::i32, MVT::i32), AddendH, 686 AddendL, LL, RL); 687 SDValue Lo(Hi.getNode(), 1); 688 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 689 } 690 if (LHSSB > 32 && RHSSB > 32) { 691 // The inputs are both sign-extended. 692 SDValue Hi = DAG.getNode(XCoreISD::MACCS, dl, 693 DAG.getVTList(MVT::i32, MVT::i32), AddendH, 694 AddendL, LL, RL); 695 SDValue Lo(Hi.getNode(), 1); 696 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 697 } 698 SDValue LH, RH; 699 LH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 700 Mul.getOperand(0), DAG.getConstant(1, MVT::i32)); 701 RH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 702 Mul.getOperand(1), DAG.getConstant(1, MVT::i32)); 703 SDValue Hi = DAG.getNode(XCoreISD::MACCU, dl, 704 DAG.getVTList(MVT::i32, MVT::i32), AddendH, 705 AddendL, LL, RL); 706 SDValue Lo(Hi.getNode(), 1); 707 RH = DAG.getNode(ISD::MUL, dl, MVT::i32, LL, RH); 708 LH = DAG.getNode(ISD::MUL, dl, MVT::i32, LH, RL); 709 Hi = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, RH); 710 Hi = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, LH); 711 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 712} 713 714SDValue XCoreTargetLowering:: 715ExpandADDSUB(SDNode *N, SelectionDAG &DAG) const 716{ 717 assert(N->getValueType(0) == MVT::i64 && 718 (N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) && 719 "Unknown operand to lower!"); 720 721 if (N->getOpcode() == ISD::ADD) { 722 SDValue Result = TryExpandADDWithMul(N, DAG); 723 if (Result.getNode() != 0) 724 return Result; 725 } 726 727 DebugLoc dl = N->getDebugLoc(); 728 729 // Extract components 730 SDValue LHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 731 N->getOperand(0), DAG.getConstant(0, MVT::i32)); 732 SDValue LHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 733 N->getOperand(0), DAG.getConstant(1, MVT::i32)); 734 SDValue RHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 735 N->getOperand(1), DAG.getConstant(0, MVT::i32)); 736 SDValue RHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 737 N->getOperand(1), DAG.getConstant(1, MVT::i32)); 738 739 // Expand 740 unsigned Opcode = (N->getOpcode() == ISD::ADD) ? XCoreISD::LADD : 741 XCoreISD::LSUB; 742 SDValue Zero = DAG.getConstant(0, MVT::i32); 743 SDValue Carry = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32), 744 LHSL, RHSL, Zero); 745 SDValue Lo(Carry.getNode(), 1); 746 747 SDValue Ignored = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32), 748 LHSH, RHSH, Carry); 749 SDValue Hi(Ignored.getNode(), 1); 750 // Merge the pieces 751 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 752} 753 754SDValue XCoreTargetLowering:: 755LowerVAARG(SDValue Op, SelectionDAG &DAG) const 756{ 757 llvm_unreachable("unimplemented"); 758 // FIXME Arguments passed by reference need a extra dereference. 759 SDNode *Node = Op.getNode(); 760 DebugLoc dl = Node->getDebugLoc(); 761 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 762 EVT VT = Node->getValueType(0); 763 SDValue VAList = DAG.getLoad(getPointerTy(), dl, Node->getOperand(0), 764 Node->getOperand(1), MachinePointerInfo(V), 765 false, false, false, 0); 766 // Increment the pointer, VAList, to the next vararg 767 SDValue Tmp3 = DAG.getNode(ISD::ADD, dl, getPointerTy(), VAList, 768 DAG.getConstant(VT.getSizeInBits(), 769 getPointerTy())); 770 // Store the incremented VAList to the legalized pointer 771 Tmp3 = DAG.getStore(VAList.getValue(1), dl, Tmp3, Node->getOperand(1), 772 MachinePointerInfo(V), false, false, 0); 773 // Load the actual argument out of the pointer VAList 774 return DAG.getLoad(VT, dl, Tmp3, VAList, MachinePointerInfo(), 775 false, false, false, 0); 776} 777 778SDValue XCoreTargetLowering:: 779LowerVASTART(SDValue Op, SelectionDAG &DAG) const 780{ 781 DebugLoc dl = Op.getDebugLoc(); 782 // vastart stores the address of the VarArgsFrameIndex slot into the 783 // memory location argument 784 MachineFunction &MF = DAG.getMachineFunction(); 785 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>(); 786 SDValue Addr = DAG.getFrameIndex(XFI->getVarArgsFrameIndex(), MVT::i32); 787 return DAG.getStore(Op.getOperand(0), dl, Addr, Op.getOperand(1), 788 MachinePointerInfo(), false, false, 0); 789} 790 791SDValue XCoreTargetLowering::LowerFRAMEADDR(SDValue Op, 792 SelectionDAG &DAG) const { 793 DebugLoc dl = Op.getDebugLoc(); 794 // Depths > 0 not supported yet! 795 if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0) 796 return SDValue(); 797 798 MachineFunction &MF = DAG.getMachineFunction(); 799 const TargetRegisterInfo *RegInfo = getTargetMachine().getRegisterInfo(); 800 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, 801 RegInfo->getFrameRegister(MF), MVT::i32); 802} 803 804SDValue XCoreTargetLowering:: 805LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const { 806 return Op.getOperand(0); 807} 808 809SDValue XCoreTargetLowering:: 810LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const { 811 SDValue Chain = Op.getOperand(0); 812 SDValue Trmp = Op.getOperand(1); // trampoline 813 SDValue FPtr = Op.getOperand(2); // nested function 814 SDValue Nest = Op.getOperand(3); // 'nest' parameter value 815 816 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 817 818 // .align 4 819 // LDAPF_u10 r11, nest 820 // LDW_2rus r11, r11[0] 821 // STWSP_ru6 r11, sp[0] 822 // LDAPF_u10 r11, fptr 823 // LDW_2rus r11, r11[0] 824 // BAU_1r r11 825 // nest: 826 // .word nest 827 // fptr: 828 // .word fptr 829 SDValue OutChains[5]; 830 831 SDValue Addr = Trmp; 832 833 DebugLoc dl = Op.getDebugLoc(); 834 OutChains[0] = DAG.getStore(Chain, dl, DAG.getConstant(0x0a3cd805, MVT::i32), 835 Addr, MachinePointerInfo(TrmpAddr), false, false, 836 0); 837 838 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 839 DAG.getConstant(4, MVT::i32)); 840 OutChains[1] = DAG.getStore(Chain, dl, DAG.getConstant(0xd80456c0, MVT::i32), 841 Addr, MachinePointerInfo(TrmpAddr, 4), false, 842 false, 0); 843 844 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 845 DAG.getConstant(8, MVT::i32)); 846 OutChains[2] = DAG.getStore(Chain, dl, DAG.getConstant(0x27fb0a3c, MVT::i32), 847 Addr, MachinePointerInfo(TrmpAddr, 8), false, 848 false, 0); 849 850 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 851 DAG.getConstant(12, MVT::i32)); 852 OutChains[3] = DAG.getStore(Chain, dl, Nest, Addr, 853 MachinePointerInfo(TrmpAddr, 12), false, false, 854 0); 855 856 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 857 DAG.getConstant(16, MVT::i32)); 858 OutChains[4] = DAG.getStore(Chain, dl, FPtr, Addr, 859 MachinePointerInfo(TrmpAddr, 16), false, false, 860 0); 861 862 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains, 5); 863} 864 865//===----------------------------------------------------------------------===// 866// Calling Convention Implementation 867//===----------------------------------------------------------------------===// 868 869#include "XCoreGenCallingConv.inc" 870 871//===----------------------------------------------------------------------===// 872// Call Calling Convention Implementation 873//===----------------------------------------------------------------------===// 874 875/// XCore call implementation 876SDValue 877XCoreTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 878 SmallVectorImpl<SDValue> &InVals) const { 879 SelectionDAG &DAG = CLI.DAG; 880 DebugLoc &dl = CLI.DL; 881 SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; 882 SmallVector<SDValue, 32> &OutVals = CLI.OutVals; 883 SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; 884 SDValue Chain = CLI.Chain; 885 SDValue Callee = CLI.Callee; 886 bool &isTailCall = CLI.IsTailCall; 887 CallingConv::ID CallConv = CLI.CallConv; 888 bool isVarArg = CLI.IsVarArg; 889 890 // XCore target does not yet support tail call optimization. 891 isTailCall = false; 892 893 // For now, only CallingConv::C implemented 894 switch (CallConv) 895 { 896 default: 897 llvm_unreachable("Unsupported calling convention"); 898 case CallingConv::Fast: 899 case CallingConv::C: 900 return LowerCCCCallTo(Chain, Callee, CallConv, isVarArg, isTailCall, 901 Outs, OutVals, Ins, dl, DAG, InVals); 902 } 903} 904 905/// LowerCCCCallTo - functions arguments are copied from virtual 906/// regs to (physical regs)/(stack frame), CALLSEQ_START and 907/// CALLSEQ_END are emitted. 908/// TODO: isTailCall, sret. 909SDValue 910XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee, 911 CallingConv::ID CallConv, bool isVarArg, 912 bool isTailCall, 913 const SmallVectorImpl<ISD::OutputArg> &Outs, 914 const SmallVectorImpl<SDValue> &OutVals, 915 const SmallVectorImpl<ISD::InputArg> &Ins, 916 DebugLoc dl, SelectionDAG &DAG, 917 SmallVectorImpl<SDValue> &InVals) const { 918 919 // Analyze operands of the call, assigning locations to each operand. 920 SmallVector<CCValAssign, 16> ArgLocs; 921 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 922 getTargetMachine(), ArgLocs, *DAG.getContext()); 923 924 // The ABI dictates there should be one stack slot available to the callee 925 // on function entry (for saving lr). 926 CCInfo.AllocateStack(4, 4); 927 928 CCInfo.AnalyzeCallOperands(Outs, CC_XCore); 929 930 // Get a count of how many bytes are to be pushed on the stack. 931 unsigned NumBytes = CCInfo.getNextStackOffset(); 932 933 Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, 934 getPointerTy(), true)); 935 936 SmallVector<std::pair<unsigned, SDValue>, 4> RegsToPass; 937 SmallVector<SDValue, 12> MemOpChains; 938 939 // Walk the register/memloc assignments, inserting copies/loads. 940 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 941 CCValAssign &VA = ArgLocs[i]; 942 SDValue Arg = OutVals[i]; 943 944 // Promote the value if needed. 945 switch (VA.getLocInfo()) { 946 default: llvm_unreachable("Unknown loc info!"); 947 case CCValAssign::Full: break; 948 case CCValAssign::SExt: 949 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 950 break; 951 case CCValAssign::ZExt: 952 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 953 break; 954 case CCValAssign::AExt: 955 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 956 break; 957 } 958 959 // Arguments that can be passed on register must be kept at 960 // RegsToPass vector 961 if (VA.isRegLoc()) { 962 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 963 } else { 964 assert(VA.isMemLoc()); 965 966 int Offset = VA.getLocMemOffset(); 967 968 MemOpChains.push_back(DAG.getNode(XCoreISD::STWSP, dl, MVT::Other, 969 Chain, Arg, 970 DAG.getConstant(Offset/4, MVT::i32))); 971 } 972 } 973 974 // Transform all store nodes into one single node because 975 // all store nodes are independent of each other. 976 if (!MemOpChains.empty()) 977 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 978 &MemOpChains[0], MemOpChains.size()); 979 980 // Build a sequence of copy-to-reg nodes chained together with token 981 // chain and flag operands which copy the outgoing args into registers. 982 // The InFlag in necessary since all emitted instructions must be 983 // stuck together. 984 SDValue InFlag; 985 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 986 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 987 RegsToPass[i].second, InFlag); 988 InFlag = Chain.getValue(1); 989 } 990 991 // If the callee is a GlobalAddress node (quite common, every direct call is) 992 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 993 // Likewise ExternalSymbol -> TargetExternalSymbol. 994 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 995 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32); 996 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) 997 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32); 998 999 // XCoreBranchLink = #chain, #target_address, #opt_in_flags... 1000 // = Chain, Callee, Reg#1, Reg#2, ... 1001 // 1002 // Returns a chain & a flag for retval copy to use. 1003 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 1004 SmallVector<SDValue, 8> Ops; 1005 Ops.push_back(Chain); 1006 Ops.push_back(Callee); 1007 1008 // Add argument registers to the end of the list so that they are 1009 // known live into the call. 1010 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1011 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1012 RegsToPass[i].second.getValueType())); 1013 1014 if (InFlag.getNode()) 1015 Ops.push_back(InFlag); 1016 1017 Chain = DAG.getNode(XCoreISD::BL, dl, NodeTys, &Ops[0], Ops.size()); 1018 InFlag = Chain.getValue(1); 1019 1020 // Create the CALLSEQ_END node. 1021 Chain = DAG.getCALLSEQ_END(Chain, 1022 DAG.getConstant(NumBytes, getPointerTy(), true), 1023 DAG.getConstant(0, getPointerTy(), true), 1024 InFlag); 1025 InFlag = Chain.getValue(1); 1026 1027 // Handle result values, copying them out of physregs into vregs that we 1028 // return. 1029 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, 1030 Ins, dl, DAG, InVals); 1031} 1032 1033/// LowerCallResult - Lower the result values of a call into the 1034/// appropriate copies out of appropriate physical registers. 1035SDValue 1036XCoreTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 1037 CallingConv::ID CallConv, bool isVarArg, 1038 const SmallVectorImpl<ISD::InputArg> &Ins, 1039 DebugLoc dl, SelectionDAG &DAG, 1040 SmallVectorImpl<SDValue> &InVals) const { 1041 1042 // Assign locations to each value returned by this call. 1043 SmallVector<CCValAssign, 16> RVLocs; 1044 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1045 getTargetMachine(), RVLocs, *DAG.getContext()); 1046 1047 CCInfo.AnalyzeCallResult(Ins, RetCC_XCore); 1048 1049 // Copy all of the result registers out of their specified physreg. 1050 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1051 Chain = DAG.getCopyFromReg(Chain, dl, RVLocs[i].getLocReg(), 1052 RVLocs[i].getValVT(), InFlag).getValue(1); 1053 InFlag = Chain.getValue(2); 1054 InVals.push_back(Chain.getValue(0)); 1055 } 1056 1057 return Chain; 1058} 1059 1060//===----------------------------------------------------------------------===// 1061// Formal Arguments Calling Convention Implementation 1062//===----------------------------------------------------------------------===// 1063 1064/// XCore formal arguments implementation 1065SDValue 1066XCoreTargetLowering::LowerFormalArguments(SDValue Chain, 1067 CallingConv::ID CallConv, 1068 bool isVarArg, 1069 const SmallVectorImpl<ISD::InputArg> &Ins, 1070 DebugLoc dl, 1071 SelectionDAG &DAG, 1072 SmallVectorImpl<SDValue> &InVals) 1073 const { 1074 switch (CallConv) 1075 { 1076 default: 1077 llvm_unreachable("Unsupported calling convention"); 1078 case CallingConv::C: 1079 case CallingConv::Fast: 1080 return LowerCCCArguments(Chain, CallConv, isVarArg, 1081 Ins, dl, DAG, InVals); 1082 } 1083} 1084 1085/// LowerCCCArguments - transform physical registers into 1086/// virtual registers and generate load operations for 1087/// arguments places on the stack. 1088/// TODO: sret 1089SDValue 1090XCoreTargetLowering::LowerCCCArguments(SDValue Chain, 1091 CallingConv::ID CallConv, 1092 bool isVarArg, 1093 const SmallVectorImpl<ISD::InputArg> 1094 &Ins, 1095 DebugLoc dl, 1096 SelectionDAG &DAG, 1097 SmallVectorImpl<SDValue> &InVals) const { 1098 MachineFunction &MF = DAG.getMachineFunction(); 1099 MachineFrameInfo *MFI = MF.getFrameInfo(); 1100 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 1101 1102 // Assign locations to all of the incoming arguments. 1103 SmallVector<CCValAssign, 16> ArgLocs; 1104 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1105 getTargetMachine(), ArgLocs, *DAG.getContext()); 1106 1107 CCInfo.AnalyzeFormalArguments(Ins, CC_XCore); 1108 1109 unsigned StackSlotSize = XCoreFrameLowering::stackSlotSize(); 1110 1111 unsigned LRSaveSize = StackSlotSize; 1112 1113 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1114 1115 CCValAssign &VA = ArgLocs[i]; 1116 1117 if (VA.isRegLoc()) { 1118 // Arguments passed in registers 1119 EVT RegVT = VA.getLocVT(); 1120 switch (RegVT.getSimpleVT().SimpleTy) { 1121 default: 1122 { 1123#ifndef NDEBUG 1124 errs() << "LowerFormalArguments Unhandled argument type: " 1125 << RegVT.getSimpleVT().SimpleTy << "\n"; 1126#endif 1127 llvm_unreachable(0); 1128 } 1129 case MVT::i32: 1130 unsigned VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass); 1131 RegInfo.addLiveIn(VA.getLocReg(), VReg); 1132 InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT)); 1133 } 1134 } else { 1135 // sanity check 1136 assert(VA.isMemLoc()); 1137 // Load the argument to a virtual register 1138 unsigned ObjSize = VA.getLocVT().getSizeInBits()/8; 1139 if (ObjSize > StackSlotSize) { 1140 errs() << "LowerFormalArguments Unhandled argument type: " 1141 << EVT(VA.getLocVT()).getEVTString() 1142 << "\n"; 1143 } 1144 // Create the frame index object for this incoming parameter... 1145 int FI = MFI->CreateFixedObject(ObjSize, 1146 LRSaveSize + VA.getLocMemOffset(), 1147 true); 1148 1149 // Create the SelectionDAG nodes corresponding to a load 1150 //from this parameter 1151 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 1152 InVals.push_back(DAG.getLoad(VA.getLocVT(), dl, Chain, FIN, 1153 MachinePointerInfo::getFixedStack(FI), 1154 false, false, false, 0)); 1155 } 1156 } 1157 1158 if (isVarArg) { 1159 /* Argument registers */ 1160 static const uint16_t ArgRegs[] = { 1161 XCore::R0, XCore::R1, XCore::R2, XCore::R3 1162 }; 1163 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>(); 1164 unsigned FirstVAReg = CCInfo.getFirstUnallocated(ArgRegs, 1165 array_lengthof(ArgRegs)); 1166 if (FirstVAReg < array_lengthof(ArgRegs)) { 1167 SmallVector<SDValue, 4> MemOps; 1168 int offset = 0; 1169 // Save remaining registers, storing higher register numbers at a higher 1170 // address 1171 for (int i = array_lengthof(ArgRegs) - 1; i >= (int)FirstVAReg; --i) { 1172 // Create a stack slot 1173 int FI = MFI->CreateFixedObject(4, offset, true); 1174 if (i == (int)FirstVAReg) { 1175 XFI->setVarArgsFrameIndex(FI); 1176 } 1177 offset -= StackSlotSize; 1178 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 1179 // Move argument from phys reg -> virt reg 1180 unsigned VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass); 1181 RegInfo.addLiveIn(ArgRegs[i], VReg); 1182 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 1183 // Move argument from virt reg -> stack 1184 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 1185 MachinePointerInfo(), false, false, 0); 1186 MemOps.push_back(Store); 1187 } 1188 if (!MemOps.empty()) 1189 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 1190 &MemOps[0], MemOps.size()); 1191 } else { 1192 // This will point to the next argument passed via stack. 1193 XFI->setVarArgsFrameIndex( 1194 MFI->CreateFixedObject(4, LRSaveSize + CCInfo.getNextStackOffset(), 1195 true)); 1196 } 1197 } 1198 1199 return Chain; 1200} 1201 1202//===----------------------------------------------------------------------===// 1203// Return Value Calling Convention Implementation 1204//===----------------------------------------------------------------------===// 1205 1206bool XCoreTargetLowering:: 1207CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, 1208 bool isVarArg, 1209 const SmallVectorImpl<ISD::OutputArg> &Outs, 1210 LLVMContext &Context) const { 1211 SmallVector<CCValAssign, 16> RVLocs; 1212 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), RVLocs, Context); 1213 return CCInfo.CheckReturn(Outs, RetCC_XCore); 1214} 1215 1216SDValue 1217XCoreTargetLowering::LowerReturn(SDValue Chain, 1218 CallingConv::ID CallConv, bool isVarArg, 1219 const SmallVectorImpl<ISD::OutputArg> &Outs, 1220 const SmallVectorImpl<SDValue> &OutVals, 1221 DebugLoc dl, SelectionDAG &DAG) const { 1222 1223 // CCValAssign - represent the assignment of 1224 // the return value to a location 1225 SmallVector<CCValAssign, 16> RVLocs; 1226 1227 // CCState - Info about the registers and stack slot. 1228 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1229 getTargetMachine(), RVLocs, *DAG.getContext()); 1230 1231 // Analyze return values. 1232 CCInfo.AnalyzeReturn(Outs, RetCC_XCore); 1233 1234 // If this is the first return lowered for this function, add 1235 // the regs to the liveout set for the function. 1236 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) { 1237 for (unsigned i = 0; i != RVLocs.size(); ++i) 1238 if (RVLocs[i].isRegLoc()) 1239 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); 1240 } 1241 1242 SDValue Flag; 1243 1244 // Copy the result values into the output registers. 1245 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1246 CCValAssign &VA = RVLocs[i]; 1247 assert(VA.isRegLoc() && "Can only return in registers!"); 1248 1249 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), 1250 OutVals[i], Flag); 1251 1252 // guarantee that all emitted copies are 1253 // stuck together, avoiding something bad 1254 Flag = Chain.getValue(1); 1255 } 1256 1257 // Return on XCore is always a "retsp 0" 1258 if (Flag.getNode()) 1259 return DAG.getNode(XCoreISD::RETSP, dl, MVT::Other, 1260 Chain, DAG.getConstant(0, MVT::i32), Flag); 1261 else // Return Void 1262 return DAG.getNode(XCoreISD::RETSP, dl, MVT::Other, 1263 Chain, DAG.getConstant(0, MVT::i32)); 1264} 1265 1266//===----------------------------------------------------------------------===// 1267// Other Lowering Code 1268//===----------------------------------------------------------------------===// 1269 1270MachineBasicBlock * 1271XCoreTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 1272 MachineBasicBlock *BB) const { 1273 const TargetInstrInfo &TII = *getTargetMachine().getInstrInfo(); 1274 DebugLoc dl = MI->getDebugLoc(); 1275 assert((MI->getOpcode() == XCore::SELECT_CC) && 1276 "Unexpected instr type to insert"); 1277 1278 // To "insert" a SELECT_CC instruction, we actually have to insert the diamond 1279 // control-flow pattern. The incoming instruction knows the destination vreg 1280 // to set, the condition code register to branch on, the true/false values to 1281 // select between, and a branch opcode to use. 1282 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 1283 MachineFunction::iterator It = BB; 1284 ++It; 1285 1286 // thisMBB: 1287 // ... 1288 // TrueVal = ... 1289 // cmpTY ccX, r1, r2 1290 // bCC copy1MBB 1291 // fallthrough --> copy0MBB 1292 MachineBasicBlock *thisMBB = BB; 1293 MachineFunction *F = BB->getParent(); 1294 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 1295 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 1296 F->insert(It, copy0MBB); 1297 F->insert(It, sinkMBB); 1298 1299 // Transfer the remainder of BB and its successor edges to sinkMBB. 1300 sinkMBB->splice(sinkMBB->begin(), BB, 1301 llvm::next(MachineBasicBlock::iterator(MI)), 1302 BB->end()); 1303 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 1304 1305 // Next, add the true and fallthrough blocks as its successors. 1306 BB->addSuccessor(copy0MBB); 1307 BB->addSuccessor(sinkMBB); 1308 1309 BuildMI(BB, dl, TII.get(XCore::BRFT_lru6)) 1310 .addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB); 1311 1312 // copy0MBB: 1313 // %FalseValue = ... 1314 // # fallthrough to sinkMBB 1315 BB = copy0MBB; 1316 1317 // Update machine-CFG edges 1318 BB->addSuccessor(sinkMBB); 1319 1320 // sinkMBB: 1321 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 1322 // ... 1323 BB = sinkMBB; 1324 BuildMI(*BB, BB->begin(), dl, 1325 TII.get(XCore::PHI), MI->getOperand(0).getReg()) 1326 .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB) 1327 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 1328 1329 MI->eraseFromParent(); // The pseudo instruction is gone now. 1330 return BB; 1331} 1332 1333//===----------------------------------------------------------------------===// 1334// Target Optimization Hooks 1335//===----------------------------------------------------------------------===// 1336 1337SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N, 1338 DAGCombinerInfo &DCI) const { 1339 SelectionDAG &DAG = DCI.DAG; 1340 DebugLoc dl = N->getDebugLoc(); 1341 switch (N->getOpcode()) { 1342 default: break; 1343 case XCoreISD::LADD: { 1344 SDValue N0 = N->getOperand(0); 1345 SDValue N1 = N->getOperand(1); 1346 SDValue N2 = N->getOperand(2); 1347 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 1348 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 1349 EVT VT = N0.getValueType(); 1350 1351 // canonicalize constant to RHS 1352 if (N0C && !N1C) 1353 return DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N1, N0, N2); 1354 1355 // fold (ladd 0, 0, x) -> 0, x & 1 1356 if (N0C && N0C->isNullValue() && N1C && N1C->isNullValue()) { 1357 SDValue Carry = DAG.getConstant(0, VT); 1358 SDValue Result = DAG.getNode(ISD::AND, dl, VT, N2, 1359 DAG.getConstant(1, VT)); 1360 SDValue Ops [] = { Carry, Result }; 1361 return DAG.getMergeValues(Ops, 2, dl); 1362 } 1363 1364 // fold (ladd x, 0, y) -> 0, add x, y iff carry is unused and y has only the 1365 // low bit set 1366 if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 0)) { 1367 APInt KnownZero, KnownOne; 1368 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), 1369 VT.getSizeInBits() - 1); 1370 DAG.ComputeMaskedBits(N2, KnownZero, KnownOne); 1371 if ((KnownZero & Mask) == Mask) { 1372 SDValue Carry = DAG.getConstant(0, VT); 1373 SDValue Result = DAG.getNode(ISD::ADD, dl, VT, N0, N2); 1374 SDValue Ops [] = { Carry, Result }; 1375 return DAG.getMergeValues(Ops, 2, dl); 1376 } 1377 } 1378 } 1379 break; 1380 case XCoreISD::LSUB: { 1381 SDValue N0 = N->getOperand(0); 1382 SDValue N1 = N->getOperand(1); 1383 SDValue N2 = N->getOperand(2); 1384 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 1385 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 1386 EVT VT = N0.getValueType(); 1387 1388 // fold (lsub 0, 0, x) -> x, -x iff x has only the low bit set 1389 if (N0C && N0C->isNullValue() && N1C && N1C->isNullValue()) { 1390 APInt KnownZero, KnownOne; 1391 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), 1392 VT.getSizeInBits() - 1); 1393 DAG.ComputeMaskedBits(N2, KnownZero, KnownOne); 1394 if ((KnownZero & Mask) == Mask) { 1395 SDValue Borrow = N2; 1396 SDValue Result = DAG.getNode(ISD::SUB, dl, VT, 1397 DAG.getConstant(0, VT), N2); 1398 SDValue Ops [] = { Borrow, Result }; 1399 return DAG.getMergeValues(Ops, 2, dl); 1400 } 1401 } 1402 1403 // fold (lsub x, 0, y) -> 0, sub x, y iff borrow is unused and y has only the 1404 // low bit set 1405 if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 0)) { 1406 APInt KnownZero, KnownOne; 1407 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), 1408 VT.getSizeInBits() - 1); 1409 DAG.ComputeMaskedBits(N2, KnownZero, KnownOne); 1410 if ((KnownZero & Mask) == Mask) { 1411 SDValue Borrow = DAG.getConstant(0, VT); 1412 SDValue Result = DAG.getNode(ISD::SUB, dl, VT, N0, N2); 1413 SDValue Ops [] = { Borrow, Result }; 1414 return DAG.getMergeValues(Ops, 2, dl); 1415 } 1416 } 1417 } 1418 break; 1419 case XCoreISD::LMUL: { 1420 SDValue N0 = N->getOperand(0); 1421 SDValue N1 = N->getOperand(1); 1422 SDValue N2 = N->getOperand(2); 1423 SDValue N3 = N->getOperand(3); 1424 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 1425 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 1426 EVT VT = N0.getValueType(); 1427 // Canonicalize multiplicative constant to RHS. If both multiplicative 1428 // operands are constant canonicalize smallest to RHS. 1429 if ((N0C && !N1C) || 1430 (N0C && N1C && N0C->getZExtValue() < N1C->getZExtValue())) 1431 return DAG.getNode(XCoreISD::LMUL, dl, DAG.getVTList(VT, VT), 1432 N1, N0, N2, N3); 1433 1434 // lmul(x, 0, a, b) 1435 if (N1C && N1C->isNullValue()) { 1436 // If the high result is unused fold to add(a, b) 1437 if (N->hasNUsesOfValue(0, 0)) { 1438 SDValue Lo = DAG.getNode(ISD::ADD, dl, VT, N2, N3); 1439 SDValue Ops [] = { Lo, Lo }; 1440 return DAG.getMergeValues(Ops, 2, dl); 1441 } 1442 // Otherwise fold to ladd(a, b, 0) 1443 return DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N2, N3, N1); 1444 } 1445 } 1446 break; 1447 case ISD::ADD: { 1448 // Fold 32 bit expressions such as add(add(mul(x,y),a),b) -> 1449 // lmul(x, y, a, b). The high result of lmul will be ignored. 1450 // This is only profitable if the intermediate results are unused 1451 // elsewhere. 1452 SDValue Mul0, Mul1, Addend0, Addend1; 1453 if (N->getValueType(0) == MVT::i32 && 1454 isADDADDMUL(SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, true)) { 1455 SDValue Ignored = DAG.getNode(XCoreISD::LMUL, dl, 1456 DAG.getVTList(MVT::i32, MVT::i32), Mul0, 1457 Mul1, Addend0, Addend1); 1458 SDValue Result(Ignored.getNode(), 1); 1459 return Result; 1460 } 1461 APInt HighMask = APInt::getHighBitsSet(64, 32); 1462 // Fold 64 bit expression such as add(add(mul(x,y),a),b) -> 1463 // lmul(x, y, a, b) if all operands are zero-extended. We do this 1464 // before type legalization as it is messy to match the operands after 1465 // that. 1466 if (N->getValueType(0) == MVT::i64 && 1467 isADDADDMUL(SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, false) && 1468 DAG.MaskedValueIsZero(Mul0, HighMask) && 1469 DAG.MaskedValueIsZero(Mul1, HighMask) && 1470 DAG.MaskedValueIsZero(Addend0, HighMask) && 1471 DAG.MaskedValueIsZero(Addend1, HighMask)) { 1472 SDValue Mul0L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 1473 Mul0, DAG.getConstant(0, MVT::i32)); 1474 SDValue Mul1L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 1475 Mul1, DAG.getConstant(0, MVT::i32)); 1476 SDValue Addend0L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 1477 Addend0, DAG.getConstant(0, MVT::i32)); 1478 SDValue Addend1L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 1479 Addend1, DAG.getConstant(0, MVT::i32)); 1480 SDValue Hi = DAG.getNode(XCoreISD::LMUL, dl, 1481 DAG.getVTList(MVT::i32, MVT::i32), Mul0L, Mul1L, 1482 Addend0L, Addend1L); 1483 SDValue Lo(Hi.getNode(), 1); 1484 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 1485 } 1486 } 1487 break; 1488 case ISD::STORE: { 1489 // Replace unaligned store of unaligned load with memmove. 1490 StoreSDNode *ST = cast<StoreSDNode>(N); 1491 if (!DCI.isBeforeLegalize() || 1492 allowsUnalignedMemoryAccesses(ST->getMemoryVT()) || 1493 ST->isVolatile() || ST->isIndexed()) { 1494 break; 1495 } 1496 SDValue Chain = ST->getChain(); 1497 1498 unsigned StoreBits = ST->getMemoryVT().getStoreSizeInBits(); 1499 if (StoreBits % 8) { 1500 break; 1501 } 1502 unsigned ABIAlignment = getDataLayout()->getABITypeAlignment( 1503 ST->getMemoryVT().getTypeForEVT(*DCI.DAG.getContext())); 1504 unsigned Alignment = ST->getAlignment(); 1505 if (Alignment >= ABIAlignment) { 1506 break; 1507 } 1508 1509 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(ST->getValue())) { 1510 if (LD->hasNUsesOfValue(1, 0) && ST->getMemoryVT() == LD->getMemoryVT() && 1511 LD->getAlignment() == Alignment && 1512 !LD->isVolatile() && !LD->isIndexed() && 1513 Chain.reachesChainWithoutSideEffects(SDValue(LD, 1))) { 1514 return DAG.getMemmove(Chain, dl, ST->getBasePtr(), 1515 LD->getBasePtr(), 1516 DAG.getConstant(StoreBits/8, MVT::i32), 1517 Alignment, false, ST->getPointerInfo(), 1518 LD->getPointerInfo()); 1519 } 1520 } 1521 break; 1522 } 1523 } 1524 return SDValue(); 1525} 1526 1527void XCoreTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, 1528 APInt &KnownZero, 1529 APInt &KnownOne, 1530 const SelectionDAG &DAG, 1531 unsigned Depth) const { 1532 KnownZero = KnownOne = APInt(KnownZero.getBitWidth(), 0); 1533 switch (Op.getOpcode()) { 1534 default: break; 1535 case XCoreISD::LADD: 1536 case XCoreISD::LSUB: 1537 if (Op.getResNo() == 0) { 1538 // Top bits of carry / borrow are clear. 1539 KnownZero = APInt::getHighBitsSet(KnownZero.getBitWidth(), 1540 KnownZero.getBitWidth() - 1); 1541 } 1542 break; 1543 } 1544} 1545 1546//===----------------------------------------------------------------------===// 1547// Addressing mode description hooks 1548//===----------------------------------------------------------------------===// 1549 1550static inline bool isImmUs(int64_t val) 1551{ 1552 return (val >= 0 && val <= 11); 1553} 1554 1555static inline bool isImmUs2(int64_t val) 1556{ 1557 return (val%2 == 0 && isImmUs(val/2)); 1558} 1559 1560static inline bool isImmUs4(int64_t val) 1561{ 1562 return (val%4 == 0 && isImmUs(val/4)); 1563} 1564 1565/// isLegalAddressingMode - Return true if the addressing mode represented 1566/// by AM is legal for this target, for a load/store of the specified type. 1567bool 1568XCoreTargetLowering::isLegalAddressingMode(const AddrMode &AM, 1569 Type *Ty) const { 1570 if (Ty->getTypeID() == Type::VoidTyID) 1571 return AM.Scale == 0 && isImmUs(AM.BaseOffs) && isImmUs4(AM.BaseOffs); 1572 1573 const DataLayout *TD = TM.getDataLayout(); 1574 unsigned Size = TD->getTypeAllocSize(Ty); 1575 if (AM.BaseGV) { 1576 return Size >= 4 && !AM.HasBaseReg && AM.Scale == 0 && 1577 AM.BaseOffs%4 == 0; 1578 } 1579 1580 switch (Size) { 1581 case 1: 1582 // reg + imm 1583 if (AM.Scale == 0) { 1584 return isImmUs(AM.BaseOffs); 1585 } 1586 // reg + reg 1587 return AM.Scale == 1 && AM.BaseOffs == 0; 1588 case 2: 1589 case 3: 1590 // reg + imm 1591 if (AM.Scale == 0) { 1592 return isImmUs2(AM.BaseOffs); 1593 } 1594 // reg + reg<<1 1595 return AM.Scale == 2 && AM.BaseOffs == 0; 1596 default: 1597 // reg + imm 1598 if (AM.Scale == 0) { 1599 return isImmUs4(AM.BaseOffs); 1600 } 1601 // reg + reg<<2 1602 return AM.Scale == 4 && AM.BaseOffs == 0; 1603 } 1604} 1605 1606//===----------------------------------------------------------------------===// 1607// XCore Inline Assembly Support 1608//===----------------------------------------------------------------------===// 1609 1610std::pair<unsigned, const TargetRegisterClass*> 1611XCoreTargetLowering:: 1612getRegForInlineAsmConstraint(const std::string &Constraint, 1613 EVT VT) const { 1614 if (Constraint.size() == 1) { 1615 switch (Constraint[0]) { 1616 default : break; 1617 case 'r': 1618 return std::make_pair(0U, &XCore::GRRegsRegClass); 1619 } 1620 } 1621 // Use the default implementation in TargetLowering to convert the register 1622 // constraint into a member of a register class. 1623 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 1624} 1625