XCoreISelLowering.cpp revision 1d0be15f89cb5056e20e2d24faa8d6afb1573bca
1//===-- XCoreISelLowering.cpp - XCore DAG Lowering Implementation ------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the XCoreTargetLowering class. 11// 12//===----------------------------------------------------------------------===// 13 14#define DEBUG_TYPE "xcore-lower" 15 16#include "XCoreISelLowering.h" 17#include "XCoreMachineFunctionInfo.h" 18#include "XCore.h" 19#include "XCoreTargetObjectFile.h" 20#include "XCoreTargetMachine.h" 21#include "XCoreSubtarget.h" 22#include "llvm/DerivedTypes.h" 23#include "llvm/Function.h" 24#include "llvm/Intrinsics.h" 25#include "llvm/CallingConv.h" 26#include "llvm/GlobalVariable.h" 27#include "llvm/GlobalAlias.h" 28#include "llvm/CodeGen/CallingConvLower.h" 29#include "llvm/CodeGen/MachineFrameInfo.h" 30#include "llvm/CodeGen/MachineFunction.h" 31#include "llvm/CodeGen/MachineInstrBuilder.h" 32#include "llvm/CodeGen/MachineRegisterInfo.h" 33#include "llvm/CodeGen/SelectionDAGISel.h" 34#include "llvm/CodeGen/ValueTypes.h" 35#include "llvm/Support/Debug.h" 36#include "llvm/Support/ErrorHandling.h" 37#include "llvm/Support/raw_ostream.h" 38#include "llvm/ADT/VectorExtras.h" 39#include <queue> 40#include <set> 41using namespace llvm; 42 43const char *XCoreTargetLowering:: 44getTargetNodeName(unsigned Opcode) const 45{ 46 switch (Opcode) 47 { 48 case XCoreISD::BL : return "XCoreISD::BL"; 49 case XCoreISD::PCRelativeWrapper : return "XCoreISD::PCRelativeWrapper"; 50 case XCoreISD::DPRelativeWrapper : return "XCoreISD::DPRelativeWrapper"; 51 case XCoreISD::CPRelativeWrapper : return "XCoreISD::CPRelativeWrapper"; 52 case XCoreISD::STWSP : return "XCoreISD::STWSP"; 53 case XCoreISD::RETSP : return "XCoreISD::RETSP"; 54 default : return NULL; 55 } 56} 57 58XCoreTargetLowering::XCoreTargetLowering(XCoreTargetMachine &XTM) 59 : TargetLowering(XTM, new XCoreTargetObjectFile()), 60 TM(XTM), 61 Subtarget(*XTM.getSubtargetImpl()) { 62 63 // Set up the register classes. 64 addRegisterClass(MVT::i32, XCore::GRRegsRegisterClass); 65 66 // Compute derived properties from the register classes 67 computeRegisterProperties(); 68 69 // Division is expensive 70 setIntDivIsCheap(false); 71 72 setShiftAmountType(MVT::i32); 73 setStackPointerRegisterToSaveRestore(XCore::SP); 74 75 setSchedulingPreference(SchedulingForRegPressure); 76 77 // Use i32 for setcc operations results (slt, sgt, ...). 78 setBooleanContents(ZeroOrOneBooleanContent); 79 80 // XCore does not have the NodeTypes below. 81 setOperationAction(ISD::BR_CC, MVT::Other, Expand); 82 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); 83 setOperationAction(ISD::ADDC, MVT::i32, Expand); 84 setOperationAction(ISD::ADDE, MVT::i32, Expand); 85 setOperationAction(ISD::SUBC, MVT::i32, Expand); 86 setOperationAction(ISD::SUBE, MVT::i32, Expand); 87 88 // Stop the combiner recombining select and set_cc 89 setOperationAction(ISD::SELECT_CC, MVT::Other, Expand); 90 91 // 64bit 92 if (!Subtarget.isXS1A()) { 93 setOperationAction(ISD::ADD, MVT::i64, Custom); 94 setOperationAction(ISD::SUB, MVT::i64, Custom); 95 } 96 if (Subtarget.isXS1A()) { 97 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 98 } 99 setOperationAction(ISD::MULHS, MVT::i32, Expand); 100 setOperationAction(ISD::MULHU, MVT::i32, Expand); 101 setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand); 102 setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand); 103 setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand); 104 105 // Bit Manipulation 106 setOperationAction(ISD::CTPOP, MVT::i32, Expand); 107 setOperationAction(ISD::ROTL , MVT::i32, Expand); 108 setOperationAction(ISD::ROTR , MVT::i32, Expand); 109 110 setOperationAction(ISD::TRAP, MVT::Other, Legal); 111 112 // Expand jump tables for now 113 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 114 setOperationAction(ISD::JumpTable, MVT::i32, Custom); 115 116 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 117 118 // Thread Local Storage 119 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 120 121 // Conversion of i64 -> double produces constantpool nodes 122 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 123 124 // Loads 125 setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote); 126 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote); 127 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 128 129 setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Expand); 130 setLoadExtAction(ISD::ZEXTLOAD, MVT::i16, Expand); 131 132 // Custom expand misaligned loads / stores. 133 setOperationAction(ISD::LOAD, MVT::i32, Custom); 134 setOperationAction(ISD::STORE, MVT::i32, Custom); 135 136 // Varargs 137 setOperationAction(ISD::VAEND, MVT::Other, Expand); 138 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 139 setOperationAction(ISD::VAARG, MVT::Other, Custom); 140 setOperationAction(ISD::VASTART, MVT::Other, Custom); 141 142 // Dynamic stack 143 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 144 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 145 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); 146 147 // Debug 148 setOperationAction(ISD::DBG_STOPPOINT, MVT::Other, Expand); 149 setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand); 150 151 maxStoresPerMemset = 4; 152 maxStoresPerMemmove = maxStoresPerMemcpy = 2; 153 154 // We have target-specific dag combine patterns for the following nodes: 155 setTargetDAGCombine(ISD::STORE); 156} 157 158SDValue XCoreTargetLowering:: 159LowerOperation(SDValue Op, SelectionDAG &DAG) { 160 switch (Op.getOpcode()) 161 { 162 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 163 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 164 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 165 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 166 case ISD::LOAD: return LowerLOAD(Op, DAG); 167 case ISD::STORE: return LowerSTORE(Op, DAG); 168 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 169 case ISD::VAARG: return LowerVAARG(Op, DAG); 170 case ISD::VASTART: return LowerVASTART(Op, DAG); 171 // FIXME: Remove these when LegalizeDAGTypes lands. 172 case ISD::ADD: 173 case ISD::SUB: return ExpandADDSUB(Op.getNode(), DAG); 174 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 175 default: 176 llvm_unreachable("unimplemented operand"); 177 return SDValue(); 178 } 179} 180 181/// ReplaceNodeResults - Replace the results of node with an illegal result 182/// type with new values built out of custom code. 183void XCoreTargetLowering::ReplaceNodeResults(SDNode *N, 184 SmallVectorImpl<SDValue>&Results, 185 SelectionDAG &DAG) { 186 switch (N->getOpcode()) { 187 default: 188 llvm_unreachable("Don't know how to custom expand this!"); 189 return; 190 case ISD::ADD: 191 case ISD::SUB: 192 Results.push_back(ExpandADDSUB(N, DAG)); 193 return; 194 } 195} 196 197/// getFunctionAlignment - Return the Log2 alignment of this function. 198unsigned XCoreTargetLowering:: 199getFunctionAlignment(const Function *) const { 200 return 1; 201} 202 203//===----------------------------------------------------------------------===// 204// Misc Lower Operation implementation 205//===----------------------------------------------------------------------===// 206 207SDValue XCoreTargetLowering:: 208LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) 209{ 210 DebugLoc dl = Op.getDebugLoc(); 211 SDValue Cond = DAG.getNode(ISD::SETCC, dl, MVT::i32, Op.getOperand(2), 212 Op.getOperand(3), Op.getOperand(4)); 213 return DAG.getNode(ISD::SELECT, dl, MVT::i32, Cond, Op.getOperand(0), 214 Op.getOperand(1)); 215} 216 217SDValue XCoreTargetLowering:: 218getGlobalAddressWrapper(SDValue GA, GlobalValue *GV, SelectionDAG &DAG) 219{ 220 // FIXME there is no actual debug info here 221 DebugLoc dl = GA.getDebugLoc(); 222 if (isa<Function>(GV)) { 223 return DAG.getNode(XCoreISD::PCRelativeWrapper, dl, MVT::i32, GA); 224 } else if (!Subtarget.isXS1A()) { 225 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV); 226 if (!GVar) { 227 // If GV is an alias then use the aliasee to determine constness 228 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV)) 229 GVar = dyn_cast_or_null<GlobalVariable>(GA->resolveAliasedGlobal()); 230 } 231 bool isConst = GVar && GVar->isConstant(); 232 if (isConst) { 233 return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, GA); 234 } 235 } 236 return DAG.getNode(XCoreISD::DPRelativeWrapper, dl, MVT::i32, GA); 237} 238 239SDValue XCoreTargetLowering:: 240LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) 241{ 242 GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 243 SDValue GA = DAG.getTargetGlobalAddress(GV, MVT::i32); 244 // If it's a debug information descriptor, don't mess with it. 245 if (DAG.isVerifiedDebugInfoDesc(Op)) 246 return GA; 247 return getGlobalAddressWrapper(GA, GV, DAG); 248} 249 250static inline SDValue BuildGetId(SelectionDAG &DAG, DebugLoc dl) { 251 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::i32, 252 DAG.getConstant(Intrinsic::xcore_getid, MVT::i32)); 253} 254 255static inline bool isZeroLengthArray(const Type *Ty) { 256 const ArrayType *AT = dyn_cast_or_null<ArrayType>(Ty); 257 return AT && (AT->getNumElements() == 0); 258} 259 260SDValue XCoreTargetLowering:: 261LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) 262{ 263 // FIXME there isn't really debug info here 264 DebugLoc dl = Op.getDebugLoc(); 265 // transform to label + getid() * size 266 GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 267 SDValue GA = DAG.getTargetGlobalAddress(GV, MVT::i32); 268 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV); 269 if (!GVar) { 270 // If GV is an alias then use the aliasee to determine size 271 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV)) 272 GVar = dyn_cast_or_null<GlobalVariable>(GA->resolveAliasedGlobal()); 273 } 274 if (! GVar) { 275 llvm_unreachable("Thread local object not a GlobalVariable?"); 276 return SDValue(); 277 } 278 const Type *Ty = cast<PointerType>(GV->getType())->getElementType(); 279 if (!Ty->isSized() || isZeroLengthArray(Ty)) { 280#ifndef NDEBUG 281 errs() << "Size of thread local object " << GVar->getName() 282 << " is unknown\n"; 283#endif 284 llvm_unreachable(0); 285 } 286 SDValue base = getGlobalAddressWrapper(GA, GV, DAG); 287 const TargetData *TD = TM.getTargetData(); 288 unsigned Size = TD->getTypeAllocSize(Ty); 289 SDValue offset = DAG.getNode(ISD::MUL, dl, MVT::i32, BuildGetId(DAG, dl), 290 DAG.getConstant(Size, MVT::i32)); 291 return DAG.getNode(ISD::ADD, dl, MVT::i32, base, offset); 292} 293 294SDValue XCoreTargetLowering:: 295LowerConstantPool(SDValue Op, SelectionDAG &DAG) 296{ 297 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 298 // FIXME there isn't really debug info here 299 DebugLoc dl = CP->getDebugLoc(); 300 if (Subtarget.isXS1A()) { 301 llvm_unreachable("Lowering of constant pool unimplemented"); 302 return SDValue(); 303 } else { 304 EVT PtrVT = Op.getValueType(); 305 SDValue Res; 306 if (CP->isMachineConstantPoolEntry()) { 307 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, 308 CP->getAlignment()); 309 } else { 310 Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, 311 CP->getAlignment()); 312 } 313 return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, Res); 314 } 315} 316 317SDValue XCoreTargetLowering:: 318LowerJumpTable(SDValue Op, SelectionDAG &DAG) 319{ 320 // FIXME there isn't really debug info here 321 DebugLoc dl = Op.getDebugLoc(); 322 EVT PtrVT = Op.getValueType(); 323 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 324 SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); 325 return DAG.getNode(XCoreISD::DPRelativeWrapper, dl, MVT::i32, JTI); 326} 327 328static bool 329IsWordAlignedBasePlusConstantOffset(SDValue Addr, SDValue &AlignedBase, 330 int64_t &Offset) 331{ 332 if (Addr.getOpcode() != ISD::ADD) { 333 return false; 334 } 335 ConstantSDNode *CN = 0; 336 if (!(CN = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))) { 337 return false; 338 } 339 int64_t off = CN->getSExtValue(); 340 const SDValue &Base = Addr.getOperand(0); 341 const SDValue *Root = &Base; 342 if (Base.getOpcode() == ISD::ADD && 343 Base.getOperand(1).getOpcode() == ISD::SHL) { 344 ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Base.getOperand(1) 345 .getOperand(1)); 346 if (CN && (CN->getSExtValue() >= 2)) { 347 Root = &Base.getOperand(0); 348 } 349 } 350 if (isa<FrameIndexSDNode>(*Root)) { 351 // All frame indicies are word aligned 352 AlignedBase = Base; 353 Offset = off; 354 return true; 355 } 356 if (Root->getOpcode() == XCoreISD::DPRelativeWrapper || 357 Root->getOpcode() == XCoreISD::CPRelativeWrapper) { 358 // All dp / cp relative addresses are word aligned 359 AlignedBase = Base; 360 Offset = off; 361 return true; 362 } 363 return false; 364} 365 366SDValue XCoreTargetLowering:: 367LowerLOAD(SDValue Op, SelectionDAG &DAG) 368{ 369 LoadSDNode *LD = cast<LoadSDNode>(Op); 370 assert(LD->getExtensionType() == ISD::NON_EXTLOAD && "Unexpected extension type"); 371 assert(LD->getMemoryVT() == MVT::i32 && "Unexpected load EVT"); 372 if (allowsUnalignedMemoryAccesses()) { 373 return SDValue(); 374 } 375 unsigned ABIAlignment = getTargetData()-> 376 getABITypeAlignment(LD->getMemoryVT().getTypeForEVT(*DAG.getContext())); 377 // Leave aligned load alone. 378 if (LD->getAlignment() >= ABIAlignment) { 379 return SDValue(); 380 } 381 SDValue Chain = LD->getChain(); 382 SDValue BasePtr = LD->getBasePtr(); 383 DebugLoc dl = Op.getDebugLoc(); 384 385 SDValue Base; 386 int64_t Offset; 387 if (!LD->isVolatile() && 388 IsWordAlignedBasePlusConstantOffset(BasePtr, Base, Offset)) { 389 if (Offset % 4 == 0) { 390 // We've managed to infer better alignment information than the load 391 // already has. Use an aligned load. 392 return DAG.getLoad(getPointerTy(), dl, Chain, BasePtr, NULL, 4); 393 } 394 // Lower to 395 // ldw low, base[offset >> 2] 396 // ldw high, base[(offset >> 2) + 1] 397 // shr low_shifted, low, (offset & 0x3) * 8 398 // shl high_shifted, high, 32 - (offset & 0x3) * 8 399 // or result, low_shifted, high_shifted 400 SDValue LowOffset = DAG.getConstant(Offset & ~0x3, MVT::i32); 401 SDValue HighOffset = DAG.getConstant((Offset & ~0x3) + 4, MVT::i32); 402 SDValue LowShift = DAG.getConstant((Offset & 0x3) * 8, MVT::i32); 403 SDValue HighShift = DAG.getConstant(32 - (Offset & 0x3) * 8, MVT::i32); 404 405 SDValue LowAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, Base, LowOffset); 406 SDValue HighAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, Base, HighOffset); 407 408 SDValue Low = DAG.getLoad(getPointerTy(), dl, Chain, 409 LowAddr, NULL, 4); 410 SDValue High = DAG.getLoad(getPointerTy(), dl, Chain, 411 HighAddr, NULL, 4); 412 SDValue LowShifted = DAG.getNode(ISD::SRL, dl, MVT::i32, Low, LowShift); 413 SDValue HighShifted = DAG.getNode(ISD::SHL, dl, MVT::i32, High, HighShift); 414 SDValue Result = DAG.getNode(ISD::OR, dl, MVT::i32, LowShifted, HighShifted); 415 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Low.getValue(1), 416 High.getValue(1)); 417 SDValue Ops[] = { Result, Chain }; 418 return DAG.getMergeValues(Ops, 2, dl); 419 } 420 421 if (LD->getAlignment() == 2) { 422 int SVOffset = LD->getSrcValueOffset(); 423 SDValue Low = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, Chain, 424 BasePtr, LD->getSrcValue(), SVOffset, MVT::i16, 425 LD->isVolatile(), 2); 426 SDValue HighAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, BasePtr, 427 DAG.getConstant(2, MVT::i32)); 428 SDValue High = DAG.getExtLoad(ISD::EXTLOAD, dl, MVT::i32, Chain, 429 HighAddr, LD->getSrcValue(), SVOffset + 2, 430 MVT::i16, LD->isVolatile(), 2); 431 SDValue HighShifted = DAG.getNode(ISD::SHL, dl, MVT::i32, High, 432 DAG.getConstant(16, MVT::i32)); 433 SDValue Result = DAG.getNode(ISD::OR, dl, MVT::i32, Low, HighShifted); 434 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Low.getValue(1), 435 High.getValue(1)); 436 SDValue Ops[] = { Result, Chain }; 437 return DAG.getMergeValues(Ops, 2, dl); 438 } 439 440 // Lower to a call to __misaligned_load(BasePtr). 441 const Type *IntPtrTy = getTargetData()->getIntPtrType(*DAG.getContext()); 442 TargetLowering::ArgListTy Args; 443 TargetLowering::ArgListEntry Entry; 444 445 Entry.Ty = IntPtrTy; 446 Entry.Node = BasePtr; 447 Args.push_back(Entry); 448 449 std::pair<SDValue, SDValue> CallResult = 450 LowerCallTo(Chain, IntPtrTy, false, false, 451 false, false, 0, CallingConv::C, false, 452 /*isReturnValueUsed=*/true, 453 DAG.getExternalSymbol("__misaligned_load", getPointerTy()), 454 Args, DAG, dl); 455 456 SDValue Ops[] = 457 { CallResult.first, CallResult.second }; 458 459 return DAG.getMergeValues(Ops, 2, dl); 460} 461 462SDValue XCoreTargetLowering:: 463LowerSTORE(SDValue Op, SelectionDAG &DAG) 464{ 465 StoreSDNode *ST = cast<StoreSDNode>(Op); 466 assert(!ST->isTruncatingStore() && "Unexpected store type"); 467 assert(ST->getMemoryVT() == MVT::i32 && "Unexpected store EVT"); 468 if (allowsUnalignedMemoryAccesses()) { 469 return SDValue(); 470 } 471 unsigned ABIAlignment = getTargetData()-> 472 getABITypeAlignment(ST->getMemoryVT().getTypeForEVT(*DAG.getContext())); 473 // Leave aligned store alone. 474 if (ST->getAlignment() >= ABIAlignment) { 475 return SDValue(); 476 } 477 SDValue Chain = ST->getChain(); 478 SDValue BasePtr = ST->getBasePtr(); 479 SDValue Value = ST->getValue(); 480 DebugLoc dl = Op.getDebugLoc(); 481 482 if (ST->getAlignment() == 2) { 483 int SVOffset = ST->getSrcValueOffset(); 484 SDValue Low = Value; 485 SDValue High = DAG.getNode(ISD::SRL, dl, MVT::i32, Value, 486 DAG.getConstant(16, MVT::i32)); 487 SDValue StoreLow = DAG.getTruncStore(Chain, dl, Low, BasePtr, 488 ST->getSrcValue(), SVOffset, MVT::i16, 489 ST->isVolatile(), 2); 490 SDValue HighAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, BasePtr, 491 DAG.getConstant(2, MVT::i32)); 492 SDValue StoreHigh = DAG.getTruncStore(Chain, dl, High, HighAddr, 493 ST->getSrcValue(), SVOffset + 2, 494 MVT::i16, ST->isVolatile(), 2); 495 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, StoreLow, StoreHigh); 496 } 497 498 // Lower to a call to __misaligned_store(BasePtr, Value). 499 const Type *IntPtrTy = getTargetData()->getIntPtrType(*DAG.getContext()); 500 TargetLowering::ArgListTy Args; 501 TargetLowering::ArgListEntry Entry; 502 503 Entry.Ty = IntPtrTy; 504 Entry.Node = BasePtr; 505 Args.push_back(Entry); 506 507 Entry.Node = Value; 508 Args.push_back(Entry); 509 510 std::pair<SDValue, SDValue> CallResult = 511 LowerCallTo(Chain, Type::getVoidTy(*DAG.getContext()), false, false, 512 false, false, 0, CallingConv::C, false, 513 /*isReturnValueUsed=*/true, 514 DAG.getExternalSymbol("__misaligned_store", getPointerTy()), 515 Args, DAG, dl); 516 517 return CallResult.second; 518} 519 520SDValue XCoreTargetLowering:: 521ExpandADDSUB(SDNode *N, SelectionDAG &DAG) 522{ 523 assert(N->getValueType(0) == MVT::i64 && 524 (N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) && 525 "Unknown operand to lower!"); 526 assert(!Subtarget.isXS1A() && "Cannot custom lower ADD/SUB on xs1a"); 527 DebugLoc dl = N->getDebugLoc(); 528 529 // Extract components 530 SDValue LHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 531 N->getOperand(0), DAG.getConstant(0, MVT::i32)); 532 SDValue LHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 533 N->getOperand(0), DAG.getConstant(1, MVT::i32)); 534 SDValue RHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 535 N->getOperand(1), DAG.getConstant(0, MVT::i32)); 536 SDValue RHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 537 N->getOperand(1), DAG.getConstant(1, MVT::i32)); 538 539 // Expand 540 unsigned Opcode = (N->getOpcode() == ISD::ADD) ? XCoreISD::LADD : 541 XCoreISD::LSUB; 542 SDValue Zero = DAG.getConstant(0, MVT::i32); 543 SDValue Carry = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32), 544 LHSL, RHSL, Zero); 545 SDValue Lo(Carry.getNode(), 1); 546 547 SDValue Ignored = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32), 548 LHSH, RHSH, Carry); 549 SDValue Hi(Ignored.getNode(), 1); 550 // Merge the pieces 551 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 552} 553 554SDValue XCoreTargetLowering:: 555LowerVAARG(SDValue Op, SelectionDAG &DAG) 556{ 557 llvm_unreachable("unimplemented"); 558 // FIX Arguments passed by reference need a extra dereference. 559 SDNode *Node = Op.getNode(); 560 DebugLoc dl = Node->getDebugLoc(); 561 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 562 EVT VT = Node->getValueType(0); 563 SDValue VAList = DAG.getLoad(getPointerTy(), dl, Node->getOperand(0), 564 Node->getOperand(1), V, 0); 565 // Increment the pointer, VAList, to the next vararg 566 SDValue Tmp3 = DAG.getNode(ISD::ADD, dl, getPointerTy(), VAList, 567 DAG.getConstant(VT.getSizeInBits(), 568 getPointerTy())); 569 // Store the incremented VAList to the legalized pointer 570 Tmp3 = DAG.getStore(VAList.getValue(1), dl, Tmp3, Node->getOperand(1), V, 0); 571 // Load the actual argument out of the pointer VAList 572 return DAG.getLoad(VT, dl, Tmp3, VAList, NULL, 0); 573} 574 575SDValue XCoreTargetLowering:: 576LowerVASTART(SDValue Op, SelectionDAG &DAG) 577{ 578 DebugLoc dl = Op.getDebugLoc(); 579 // vastart stores the address of the VarArgsFrameIndex slot into the 580 // memory location argument 581 MachineFunction &MF = DAG.getMachineFunction(); 582 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>(); 583 SDValue Addr = DAG.getFrameIndex(XFI->getVarArgsFrameIndex(), MVT::i32); 584 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 585 return DAG.getStore(Op.getOperand(0), dl, Addr, Op.getOperand(1), SV, 0); 586} 587 588SDValue XCoreTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) { 589 DebugLoc dl = Op.getDebugLoc(); 590 // Depths > 0 not supported yet! 591 if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0) 592 return SDValue(); 593 594 MachineFunction &MF = DAG.getMachineFunction(); 595 const TargetRegisterInfo *RegInfo = getTargetMachine().getRegisterInfo(); 596 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, 597 RegInfo->getFrameRegister(MF), MVT::i32); 598} 599 600//===----------------------------------------------------------------------===// 601// Calling Convention Implementation 602//===----------------------------------------------------------------------===// 603 604#include "XCoreGenCallingConv.inc" 605 606//===----------------------------------------------------------------------===// 607// Call Calling Convention Implementation 608//===----------------------------------------------------------------------===// 609 610/// XCore call implementation 611SDValue 612XCoreTargetLowering::LowerCall(SDValue Chain, SDValue Callee, 613 unsigned CallConv, bool isVarArg, 614 bool isTailCall, 615 const SmallVectorImpl<ISD::OutputArg> &Outs, 616 const SmallVectorImpl<ISD::InputArg> &Ins, 617 DebugLoc dl, SelectionDAG &DAG, 618 SmallVectorImpl<SDValue> &InVals) { 619 620 // For now, only CallingConv::C implemented 621 switch (CallConv) 622 { 623 default: 624 llvm_unreachable("Unsupported calling convention"); 625 case CallingConv::Fast: 626 case CallingConv::C: 627 return LowerCCCCallTo(Chain, Callee, CallConv, isVarArg, isTailCall, 628 Outs, Ins, dl, DAG, InVals); 629 } 630} 631 632/// LowerCCCCallTo - functions arguments are copied from virtual 633/// regs to (physical regs)/(stack frame), CALLSEQ_START and 634/// CALLSEQ_END are emitted. 635/// TODO: isTailCall, sret. 636SDValue 637XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee, 638 unsigned CallConv, bool isVarArg, 639 bool isTailCall, 640 const SmallVectorImpl<ISD::OutputArg> &Outs, 641 const SmallVectorImpl<ISD::InputArg> &Ins, 642 DebugLoc dl, SelectionDAG &DAG, 643 SmallVectorImpl<SDValue> &InVals) { 644 645 // Analyze operands of the call, assigning locations to each operand. 646 SmallVector<CCValAssign, 16> ArgLocs; 647 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), 648 ArgLocs, *DAG.getContext()); 649 650 // The ABI dictates there should be one stack slot available to the callee 651 // on function entry (for saving lr). 652 CCInfo.AllocateStack(4, 4); 653 654 CCInfo.AnalyzeCallOperands(Outs, CC_XCore); 655 656 // Get a count of how many bytes are to be pushed on the stack. 657 unsigned NumBytes = CCInfo.getNextStackOffset(); 658 659 Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, 660 getPointerTy(), true)); 661 662 SmallVector<std::pair<unsigned, SDValue>, 4> RegsToPass; 663 SmallVector<SDValue, 12> MemOpChains; 664 665 // Walk the register/memloc assignments, inserting copies/loads. 666 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 667 CCValAssign &VA = ArgLocs[i]; 668 SDValue Arg = Outs[i].Val; 669 670 // Promote the value if needed. 671 switch (VA.getLocInfo()) { 672 default: llvm_unreachable("Unknown loc info!"); 673 case CCValAssign::Full: break; 674 case CCValAssign::SExt: 675 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 676 break; 677 case CCValAssign::ZExt: 678 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 679 break; 680 case CCValAssign::AExt: 681 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 682 break; 683 } 684 685 // Arguments that can be passed on register must be kept at 686 // RegsToPass vector 687 if (VA.isRegLoc()) { 688 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 689 } else { 690 assert(VA.isMemLoc()); 691 692 int Offset = VA.getLocMemOffset(); 693 694 MemOpChains.push_back(DAG.getNode(XCoreISD::STWSP, dl, MVT::Other, 695 Chain, Arg, 696 DAG.getConstant(Offset/4, MVT::i32))); 697 } 698 } 699 700 // Transform all store nodes into one single node because 701 // all store nodes are independent of each other. 702 if (!MemOpChains.empty()) 703 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 704 &MemOpChains[0], MemOpChains.size()); 705 706 // Build a sequence of copy-to-reg nodes chained together with token 707 // chain and flag operands which copy the outgoing args into registers. 708 // The InFlag in necessary since all emited instructions must be 709 // stuck together. 710 SDValue InFlag; 711 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 712 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 713 RegsToPass[i].second, InFlag); 714 InFlag = Chain.getValue(1); 715 } 716 717 // If the callee is a GlobalAddress node (quite common, every direct call is) 718 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 719 // Likewise ExternalSymbol -> TargetExternalSymbol. 720 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 721 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), MVT::i32); 722 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) 723 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32); 724 725 // XCoreBranchLink = #chain, #target_address, #opt_in_flags... 726 // = Chain, Callee, Reg#1, Reg#2, ... 727 // 728 // Returns a chain & a flag for retval copy to use. 729 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 730 SmallVector<SDValue, 8> Ops; 731 Ops.push_back(Chain); 732 Ops.push_back(Callee); 733 734 // Add argument registers to the end of the list so that they are 735 // known live into the call. 736 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 737 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 738 RegsToPass[i].second.getValueType())); 739 740 if (InFlag.getNode()) 741 Ops.push_back(InFlag); 742 743 Chain = DAG.getNode(XCoreISD::BL, dl, NodeTys, &Ops[0], Ops.size()); 744 InFlag = Chain.getValue(1); 745 746 // Create the CALLSEQ_END node. 747 Chain = DAG.getCALLSEQ_END(Chain, 748 DAG.getConstant(NumBytes, getPointerTy(), true), 749 DAG.getConstant(0, getPointerTy(), true), 750 InFlag); 751 InFlag = Chain.getValue(1); 752 753 // Handle result values, copying them out of physregs into vregs that we 754 // return. 755 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, 756 Ins, dl, DAG, InVals); 757} 758 759/// LowerCallResult - Lower the result values of a call into the 760/// appropriate copies out of appropriate physical registers. 761SDValue 762XCoreTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 763 unsigned CallConv, bool isVarArg, 764 const SmallVectorImpl<ISD::InputArg> &Ins, 765 DebugLoc dl, SelectionDAG &DAG, 766 SmallVectorImpl<SDValue> &InVals) { 767 768 // Assign locations to each value returned by this call. 769 SmallVector<CCValAssign, 16> RVLocs; 770 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), 771 RVLocs, *DAG.getContext()); 772 773 CCInfo.AnalyzeCallResult(Ins, RetCC_XCore); 774 775 // Copy all of the result registers out of their specified physreg. 776 for (unsigned i = 0; i != RVLocs.size(); ++i) { 777 Chain = DAG.getCopyFromReg(Chain, dl, RVLocs[i].getLocReg(), 778 RVLocs[i].getValVT(), InFlag).getValue(1); 779 InFlag = Chain.getValue(2); 780 InVals.push_back(Chain.getValue(0)); 781 } 782 783 return Chain; 784} 785 786//===----------------------------------------------------------------------===// 787// Formal Arguments Calling Convention Implementation 788//===----------------------------------------------------------------------===// 789 790/// XCore formal arguments implementation 791SDValue 792XCoreTargetLowering::LowerFormalArguments(SDValue Chain, 793 unsigned CallConv, 794 bool isVarArg, 795 const SmallVectorImpl<ISD::InputArg> &Ins, 796 DebugLoc dl, 797 SelectionDAG &DAG, 798 SmallVectorImpl<SDValue> &InVals) { 799 switch (CallConv) 800 { 801 default: 802 llvm_unreachable("Unsupported calling convention"); 803 case CallingConv::C: 804 case CallingConv::Fast: 805 return LowerCCCArguments(Chain, CallConv, isVarArg, 806 Ins, dl, DAG, InVals); 807 } 808} 809 810/// LowerCCCArguments - transform physical registers into 811/// virtual registers and generate load operations for 812/// arguments places on the stack. 813/// TODO: sret 814SDValue 815XCoreTargetLowering::LowerCCCArguments(SDValue Chain, 816 unsigned CallConv, 817 bool isVarArg, 818 const SmallVectorImpl<ISD::InputArg> 819 &Ins, 820 DebugLoc dl, 821 SelectionDAG &DAG, 822 SmallVectorImpl<SDValue> &InVals) { 823 MachineFunction &MF = DAG.getMachineFunction(); 824 MachineFrameInfo *MFI = MF.getFrameInfo(); 825 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 826 827 // Assign locations to all of the incoming arguments. 828 SmallVector<CCValAssign, 16> ArgLocs; 829 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), 830 ArgLocs, *DAG.getContext()); 831 832 CCInfo.AnalyzeFormalArguments(Ins, CC_XCore); 833 834 unsigned StackSlotSize = XCoreFrameInfo::stackSlotSize(); 835 836 unsigned LRSaveSize = StackSlotSize; 837 838 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 839 840 CCValAssign &VA = ArgLocs[i]; 841 842 if (VA.isRegLoc()) { 843 // Arguments passed in registers 844 EVT RegVT = VA.getLocVT(); 845 switch (RegVT.getSimpleVT().SimpleTy) { 846 default: 847 { 848#ifndef NDEBUG 849 errs() << "LowerFormalArguments Unhandled argument type: " 850 << RegVT.getSimpleVT().SimpleTy << "\n"; 851#endif 852 llvm_unreachable(0); 853 } 854 case MVT::i32: 855 unsigned VReg = RegInfo.createVirtualRegister( 856 XCore::GRRegsRegisterClass); 857 RegInfo.addLiveIn(VA.getLocReg(), VReg); 858 InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT)); 859 } 860 } else { 861 // sanity check 862 assert(VA.isMemLoc()); 863 // Load the argument to a virtual register 864 unsigned ObjSize = VA.getLocVT().getSizeInBits()/8; 865 if (ObjSize > StackSlotSize) { 866 errs() << "LowerFormalArguments Unhandled argument type: " 867 << (unsigned)VA.getLocVT().getSimpleVT().SimpleTy 868 << "\n"; 869 } 870 // Create the frame index object for this incoming parameter... 871 int FI = MFI->CreateFixedObject(ObjSize, 872 LRSaveSize + VA.getLocMemOffset()); 873 874 // Create the SelectionDAG nodes corresponding to a load 875 //from this parameter 876 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 877 InVals.push_back(DAG.getLoad(VA.getLocVT(), dl, Chain, FIN, NULL, 0)); 878 } 879 } 880 881 if (isVarArg) { 882 /* Argument registers */ 883 static const unsigned ArgRegs[] = { 884 XCore::R0, XCore::R1, XCore::R2, XCore::R3 885 }; 886 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>(); 887 unsigned FirstVAReg = CCInfo.getFirstUnallocated(ArgRegs, 888 array_lengthof(ArgRegs)); 889 if (FirstVAReg < array_lengthof(ArgRegs)) { 890 SmallVector<SDValue, 4> MemOps; 891 int offset = 0; 892 // Save remaining registers, storing higher register numbers at a higher 893 // address 894 for (unsigned i = array_lengthof(ArgRegs) - 1; i >= FirstVAReg; --i) { 895 // Create a stack slot 896 int FI = MFI->CreateFixedObject(4, offset); 897 if (i == FirstVAReg) { 898 XFI->setVarArgsFrameIndex(FI); 899 } 900 offset -= StackSlotSize; 901 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 902 // Move argument from phys reg -> virt reg 903 unsigned VReg = RegInfo.createVirtualRegister( 904 XCore::GRRegsRegisterClass); 905 RegInfo.addLiveIn(ArgRegs[i], VReg); 906 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 907 // Move argument from virt reg -> stack 908 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, NULL, 0); 909 MemOps.push_back(Store); 910 } 911 if (!MemOps.empty()) 912 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 913 &MemOps[0], MemOps.size()); 914 } else { 915 // This will point to the next argument passed via stack. 916 XFI->setVarArgsFrameIndex( 917 MFI->CreateFixedObject(4, LRSaveSize + CCInfo.getNextStackOffset())); 918 } 919 } 920 921 return Chain; 922} 923 924//===----------------------------------------------------------------------===// 925// Return Value Calling Convention Implementation 926//===----------------------------------------------------------------------===// 927 928SDValue 929XCoreTargetLowering::LowerReturn(SDValue Chain, 930 unsigned CallConv, bool isVarArg, 931 const SmallVectorImpl<ISD::OutputArg> &Outs, 932 DebugLoc dl, SelectionDAG &DAG) { 933 934 // CCValAssign - represent the assignment of 935 // the return value to a location 936 SmallVector<CCValAssign, 16> RVLocs; 937 938 // CCState - Info about the registers and stack slot. 939 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), 940 RVLocs, *DAG.getContext()); 941 942 // Analize return values. 943 CCInfo.AnalyzeReturn(Outs, RetCC_XCore); 944 945 // If this is the first return lowered for this function, add 946 // the regs to the liveout set for the function. 947 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) { 948 for (unsigned i = 0; i != RVLocs.size(); ++i) 949 if (RVLocs[i].isRegLoc()) 950 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); 951 } 952 953 SDValue Flag; 954 955 // Copy the result values into the output registers. 956 for (unsigned i = 0; i != RVLocs.size(); ++i) { 957 CCValAssign &VA = RVLocs[i]; 958 assert(VA.isRegLoc() && "Can only return in registers!"); 959 960 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), 961 Outs[i].Val, Flag); 962 963 // guarantee that all emitted copies are 964 // stuck together, avoiding something bad 965 Flag = Chain.getValue(1); 966 } 967 968 // Return on XCore is always a "retsp 0" 969 if (Flag.getNode()) 970 return DAG.getNode(XCoreISD::RETSP, dl, MVT::Other, 971 Chain, DAG.getConstant(0, MVT::i32), Flag); 972 else // Return Void 973 return DAG.getNode(XCoreISD::RETSP, dl, MVT::Other, 974 Chain, DAG.getConstant(0, MVT::i32)); 975} 976 977//===----------------------------------------------------------------------===// 978// Other Lowering Code 979//===----------------------------------------------------------------------===// 980 981MachineBasicBlock * 982XCoreTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 983 MachineBasicBlock *BB) const { 984 const TargetInstrInfo &TII = *getTargetMachine().getInstrInfo(); 985 DebugLoc dl = MI->getDebugLoc(); 986 assert((MI->getOpcode() == XCore::SELECT_CC) && 987 "Unexpected instr type to insert"); 988 989 // To "insert" a SELECT_CC instruction, we actually have to insert the diamond 990 // control-flow pattern. The incoming instruction knows the destination vreg 991 // to set, the condition code register to branch on, the true/false values to 992 // select between, and a branch opcode to use. 993 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 994 MachineFunction::iterator It = BB; 995 ++It; 996 997 // thisMBB: 998 // ... 999 // TrueVal = ... 1000 // cmpTY ccX, r1, r2 1001 // bCC copy1MBB 1002 // fallthrough --> copy0MBB 1003 MachineBasicBlock *thisMBB = BB; 1004 MachineFunction *F = BB->getParent(); 1005 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 1006 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 1007 BuildMI(BB, dl, TII.get(XCore::BRFT_lru6)) 1008 .addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB); 1009 F->insert(It, copy0MBB); 1010 F->insert(It, sinkMBB); 1011 // Update machine-CFG edges by transferring all successors of the current 1012 // block to the new block which will contain the Phi node for the select. 1013 sinkMBB->transferSuccessors(BB); 1014 // Next, add the true and fallthrough blocks as its successors. 1015 BB->addSuccessor(copy0MBB); 1016 BB->addSuccessor(sinkMBB); 1017 1018 // copy0MBB: 1019 // %FalseValue = ... 1020 // # fallthrough to sinkMBB 1021 BB = copy0MBB; 1022 1023 // Update machine-CFG edges 1024 BB->addSuccessor(sinkMBB); 1025 1026 // sinkMBB: 1027 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 1028 // ... 1029 BB = sinkMBB; 1030 BuildMI(BB, dl, TII.get(XCore::PHI), MI->getOperand(0).getReg()) 1031 .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB) 1032 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 1033 1034 F->DeleteMachineInstr(MI); // The pseudo instruction is gone now. 1035 return BB; 1036} 1037 1038//===----------------------------------------------------------------------===// 1039// Target Optimization Hooks 1040//===----------------------------------------------------------------------===// 1041 1042SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N, 1043 DAGCombinerInfo &DCI) const { 1044 SelectionDAG &DAG = DCI.DAG; 1045 DebugLoc dl = N->getDebugLoc(); 1046 switch (N->getOpcode()) { 1047 default: break; 1048 case ISD::STORE: { 1049 // Replace unaligned store of unaligned load with memmove. 1050 StoreSDNode *ST = cast<StoreSDNode>(N); 1051 if (!DCI.isBeforeLegalize() || allowsUnalignedMemoryAccesses() || 1052 ST->isVolatile() || ST->isIndexed()) { 1053 break; 1054 } 1055 SDValue Chain = ST->getChain(); 1056 1057 unsigned StoreBits = ST->getMemoryVT().getStoreSizeInBits(); 1058 if (StoreBits % 8) { 1059 break; 1060 } 1061 unsigned ABIAlignment = getTargetData()->getABITypeAlignment( 1062 ST->getMemoryVT().getTypeForEVT(*DCI.DAG.getContext())); 1063 unsigned Alignment = ST->getAlignment(); 1064 if (Alignment >= ABIAlignment) { 1065 break; 1066 } 1067 1068 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(ST->getValue())) { 1069 if (LD->hasNUsesOfValue(1, 0) && ST->getMemoryVT() == LD->getMemoryVT() && 1070 LD->getAlignment() == Alignment && 1071 !LD->isVolatile() && !LD->isIndexed() && 1072 Chain.reachesChainWithoutSideEffects(SDValue(LD, 1))) { 1073 return DAG.getMemmove(Chain, dl, ST->getBasePtr(), 1074 LD->getBasePtr(), 1075 DAG.getConstant(StoreBits/8, MVT::i32), 1076 Alignment, ST->getSrcValue(), 1077 ST->getSrcValueOffset(), LD->getSrcValue(), 1078 LD->getSrcValueOffset()); 1079 } 1080 } 1081 break; 1082 } 1083 } 1084 return SDValue(); 1085} 1086 1087//===----------------------------------------------------------------------===// 1088// Addressing mode description hooks 1089//===----------------------------------------------------------------------===// 1090 1091static inline bool isImmUs(int64_t val) 1092{ 1093 return (val >= 0 && val <= 11); 1094} 1095 1096static inline bool isImmUs2(int64_t val) 1097{ 1098 return (val%2 == 0 && isImmUs(val/2)); 1099} 1100 1101static inline bool isImmUs4(int64_t val) 1102{ 1103 return (val%4 == 0 && isImmUs(val/4)); 1104} 1105 1106/// isLegalAddressingMode - Return true if the addressing mode represented 1107/// by AM is legal for this target, for a load/store of the specified type. 1108bool 1109XCoreTargetLowering::isLegalAddressingMode(const AddrMode &AM, 1110 const Type *Ty) const { 1111 // Be conservative with void 1112 // FIXME: Can we be more aggressive? 1113 if (Ty->getTypeID() == Type::VoidTyID) 1114 return false; 1115 1116 const TargetData *TD = TM.getTargetData(); 1117 unsigned Size = TD->getTypeAllocSize(Ty); 1118 if (AM.BaseGV) { 1119 return Size >= 4 && !AM.HasBaseReg && AM.Scale == 0 && 1120 AM.BaseOffs%4 == 0; 1121 } 1122 1123 switch (Size) { 1124 case 1: 1125 // reg + imm 1126 if (AM.Scale == 0) { 1127 return isImmUs(AM.BaseOffs); 1128 } 1129 // reg + reg 1130 return AM.Scale == 1 && AM.BaseOffs == 0; 1131 case 2: 1132 case 3: 1133 // reg + imm 1134 if (AM.Scale == 0) { 1135 return isImmUs2(AM.BaseOffs); 1136 } 1137 // reg + reg<<1 1138 return AM.Scale == 2 && AM.BaseOffs == 0; 1139 default: 1140 // reg + imm 1141 if (AM.Scale == 0) { 1142 return isImmUs4(AM.BaseOffs); 1143 } 1144 // reg + reg<<2 1145 return AM.Scale == 4 && AM.BaseOffs == 0; 1146 } 1147 1148 return false; 1149} 1150 1151//===----------------------------------------------------------------------===// 1152// XCore Inline Assembly Support 1153//===----------------------------------------------------------------------===// 1154 1155std::vector<unsigned> XCoreTargetLowering:: 1156getRegClassForInlineAsmConstraint(const std::string &Constraint, 1157 EVT VT) const 1158{ 1159 if (Constraint.size() != 1) 1160 return std::vector<unsigned>(); 1161 1162 switch (Constraint[0]) { 1163 default : break; 1164 case 'r': 1165 return make_vector<unsigned>(XCore::R0, XCore::R1, XCore::R2, 1166 XCore::R3, XCore::R4, XCore::R5, 1167 XCore::R6, XCore::R7, XCore::R8, 1168 XCore::R9, XCore::R10, XCore::R11, 0); 1169 break; 1170 } 1171 return std::vector<unsigned>(); 1172} 1173