LegalizeDAG.cpp revision 63974b2144c87c962effdc0508c27643c8ad98b6
1//===-- LegalizeDAG.cpp - Implement SelectionDAG::Legalize ----------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the SelectionDAG::Legalize method. 11// 12//===----------------------------------------------------------------------===// 13 14#include "llvm/Analysis/DebugInfo.h" 15#include "llvm/CodeGen/Analysis.h" 16#include "llvm/CodeGen/MachineFunction.h" 17#include "llvm/CodeGen/MachineJumpTableInfo.h" 18#include "llvm/CodeGen/SelectionDAG.h" 19#include "llvm/Target/TargetFrameLowering.h" 20#include "llvm/Target/TargetLowering.h" 21#include "llvm/Target/TargetData.h" 22#include "llvm/Target/TargetMachine.h" 23#include "llvm/CallingConv.h" 24#include "llvm/Constants.h" 25#include "llvm/DerivedTypes.h" 26#include "llvm/LLVMContext.h" 27#include "llvm/Support/Debug.h" 28#include "llvm/Support/ErrorHandling.h" 29#include "llvm/Support/MathExtras.h" 30#include "llvm/Support/raw_ostream.h" 31#include "llvm/ADT/DenseMap.h" 32#include "llvm/ADT/SmallVector.h" 33#include "llvm/ADT/SmallPtrSet.h" 34using namespace llvm; 35 36//===----------------------------------------------------------------------===// 37/// SelectionDAGLegalize - This takes an arbitrary SelectionDAG as input and 38/// hacks on it until the target machine can handle it. This involves 39/// eliminating value sizes the machine cannot handle (promoting small sizes to 40/// large sizes or splitting up large values into small values) as well as 41/// eliminating operations the machine cannot handle. 42/// 43/// This code also does a small amount of optimization and recognition of idioms 44/// as part of its processing. For example, if a target does not support a 45/// 'setcc' instruction efficiently, but does support 'brcc' instruction, this 46/// will attempt merge setcc and brc instructions into brcc's. 47/// 48namespace { 49class SelectionDAGLegalize : public SelectionDAG::DAGUpdateListener { 50 const TargetMachine &TM; 51 const TargetLowering &TLI; 52 SelectionDAG &DAG; 53 54 /// LegalizePosition - The iterator for walking through the node list. 55 SelectionDAG::allnodes_iterator LegalizePosition; 56 57 /// LegalizedNodes - The set of nodes which have already been legalized. 58 SmallPtrSet<SDNode *, 16> LegalizedNodes; 59 60 // Libcall insertion helpers. 61 62public: 63 explicit SelectionDAGLegalize(SelectionDAG &DAG); 64 65 void LegalizeDAG(); 66 67private: 68 /// LegalizeOp - Legalizes the given operation. 69 void LegalizeOp(SDNode *Node); 70 71 SDValue OptimizeFloatStore(StoreSDNode *ST); 72 73 /// PerformInsertVectorEltInMemory - Some target cannot handle a variable 74 /// insertion index for the INSERT_VECTOR_ELT instruction. In this case, it 75 /// is necessary to spill the vector being inserted into to memory, perform 76 /// the insert there, and then read the result back. 77 SDValue PerformInsertVectorEltInMemory(SDValue Vec, SDValue Val, 78 SDValue Idx, DebugLoc dl); 79 SDValue ExpandINSERT_VECTOR_ELT(SDValue Vec, SDValue Val, 80 SDValue Idx, DebugLoc dl); 81 82 /// ShuffleWithNarrowerEltType - Return a vector shuffle operation which 83 /// performs the same shuffe in terms of order or result bytes, but on a type 84 /// whose vector element type is narrower than the original shuffle type. 85 /// e.g. <v4i32> <0, 1, 0, 1> -> v8i16 <0, 1, 2, 3, 0, 1, 2, 3> 86 SDValue ShuffleWithNarrowerEltType(EVT NVT, EVT VT, DebugLoc dl, 87 SDValue N1, SDValue N2, 88 SmallVectorImpl<int> &Mask) const; 89 90 void LegalizeSetCCCondCode(EVT VT, SDValue &LHS, SDValue &RHS, SDValue &CC, 91 DebugLoc dl); 92 93 SDValue ExpandLibCall(RTLIB::Libcall LC, SDNode *Node, bool isSigned); 94 SDValue ExpandLibCall(RTLIB::Libcall LC, EVT RetVT, const SDValue *Ops, 95 unsigned NumOps, bool isSigned, DebugLoc dl); 96 97 std::pair<SDValue, SDValue> ExpandChainLibCall(RTLIB::Libcall LC, 98 SDNode *Node, bool isSigned); 99 SDValue ExpandFPLibCall(SDNode *Node, RTLIB::Libcall Call_F32, 100 RTLIB::Libcall Call_F64, RTLIB::Libcall Call_F80, 101 RTLIB::Libcall Call_PPCF128); 102 SDValue ExpandIntLibCall(SDNode *Node, bool isSigned, 103 RTLIB::Libcall Call_I8, 104 RTLIB::Libcall Call_I16, 105 RTLIB::Libcall Call_I32, 106 RTLIB::Libcall Call_I64, 107 RTLIB::Libcall Call_I128); 108 void ExpandDivRemLibCall(SDNode *Node, SmallVectorImpl<SDValue> &Results); 109 110 SDValue EmitStackConvert(SDValue SrcOp, EVT SlotVT, EVT DestVT, DebugLoc dl); 111 SDValue ExpandBUILD_VECTOR(SDNode *Node); 112 SDValue ExpandSCALAR_TO_VECTOR(SDNode *Node); 113 void ExpandDYNAMIC_STACKALLOC(SDNode *Node, 114 SmallVectorImpl<SDValue> &Results); 115 SDValue ExpandFCOPYSIGN(SDNode *Node); 116 SDValue ExpandLegalINT_TO_FP(bool isSigned, SDValue LegalOp, EVT DestVT, 117 DebugLoc dl); 118 SDValue PromoteLegalINT_TO_FP(SDValue LegalOp, EVT DestVT, bool isSigned, 119 DebugLoc dl); 120 SDValue PromoteLegalFP_TO_INT(SDValue LegalOp, EVT DestVT, bool isSigned, 121 DebugLoc dl); 122 123 SDValue ExpandBSWAP(SDValue Op, DebugLoc dl); 124 SDValue ExpandBitCount(unsigned Opc, SDValue Op, DebugLoc dl); 125 126 SDValue ExpandExtractFromVectorThroughStack(SDValue Op); 127 SDValue ExpandInsertToVectorThroughStack(SDValue Op); 128 SDValue ExpandVectorBuildThroughStack(SDNode* Node); 129 130 SDValue ExpandConstantFP(ConstantFPSDNode *CFP, bool UseCP); 131 132 std::pair<SDValue, SDValue> ExpandAtomic(SDNode *Node); 133 134 void ExpandNode(SDNode *Node); 135 void PromoteNode(SDNode *Node); 136 137 void ForgetNode(SDNode *N) { 138 LegalizedNodes.erase(N); 139 if (LegalizePosition == SelectionDAG::allnodes_iterator(N)) 140 ++LegalizePosition; 141 } 142 143public: 144 // DAGUpdateListener implementation. 145 virtual void NodeDeleted(SDNode *N, SDNode *E) { 146 ForgetNode(N); 147 } 148 virtual void NodeUpdated(SDNode *N) {} 149 150 // Node replacement helpers 151 void ReplacedNode(SDNode *N) { 152 if (N->use_empty()) { 153 DAG.RemoveDeadNode(N, this); 154 } else { 155 ForgetNode(N); 156 } 157 } 158 void ReplaceNode(SDNode *Old, SDNode *New) { 159 DAG.ReplaceAllUsesWith(Old, New, this); 160 ReplacedNode(Old); 161 } 162 void ReplaceNode(SDValue Old, SDValue New) { 163 DAG.ReplaceAllUsesWith(Old, New, this); 164 ReplacedNode(Old.getNode()); 165 } 166 void ReplaceNode(SDNode *Old, const SDValue *New) { 167 DAG.ReplaceAllUsesWith(Old, New, this); 168 ReplacedNode(Old); 169 } 170}; 171} 172 173/// ShuffleWithNarrowerEltType - Return a vector shuffle operation which 174/// performs the same shuffe in terms of order or result bytes, but on a type 175/// whose vector element type is narrower than the original shuffle type. 176/// e.g. <v4i32> <0, 1, 0, 1> -> v8i16 <0, 1, 2, 3, 0, 1, 2, 3> 177SDValue 178SelectionDAGLegalize::ShuffleWithNarrowerEltType(EVT NVT, EVT VT, DebugLoc dl, 179 SDValue N1, SDValue N2, 180 SmallVectorImpl<int> &Mask) const { 181 unsigned NumMaskElts = VT.getVectorNumElements(); 182 unsigned NumDestElts = NVT.getVectorNumElements(); 183 unsigned NumEltsGrowth = NumDestElts / NumMaskElts; 184 185 assert(NumEltsGrowth && "Cannot promote to vector type with fewer elts!"); 186 187 if (NumEltsGrowth == 1) 188 return DAG.getVectorShuffle(NVT, dl, N1, N2, &Mask[0]); 189 190 SmallVector<int, 8> NewMask; 191 for (unsigned i = 0; i != NumMaskElts; ++i) { 192 int Idx = Mask[i]; 193 for (unsigned j = 0; j != NumEltsGrowth; ++j) { 194 if (Idx < 0) 195 NewMask.push_back(-1); 196 else 197 NewMask.push_back(Idx * NumEltsGrowth + j); 198 } 199 } 200 assert(NewMask.size() == NumDestElts && "Non-integer NumEltsGrowth?"); 201 assert(TLI.isShuffleMaskLegal(NewMask, NVT) && "Shuffle not legal?"); 202 return DAG.getVectorShuffle(NVT, dl, N1, N2, &NewMask[0]); 203} 204 205SelectionDAGLegalize::SelectionDAGLegalize(SelectionDAG &dag) 206 : TM(dag.getTarget()), TLI(dag.getTargetLoweringInfo()), 207 DAG(dag) { 208} 209 210void SelectionDAGLegalize::LegalizeDAG() { 211 DAG.AssignTopologicalOrder(); 212 213 // Visit all the nodes. We start in topological order, so that we see 214 // nodes with their original operands intact. Legalization can produce 215 // new nodes which may themselves need to be legalized. Iterate until all 216 // nodes have been legalized. 217 for (;;) { 218 bool AnyLegalized = false; 219 for (LegalizePosition = DAG.allnodes_end(); 220 LegalizePosition != DAG.allnodes_begin(); ) { 221 --LegalizePosition; 222 223 SDNode *N = LegalizePosition; 224 if (LegalizedNodes.insert(N)) { 225 AnyLegalized = true; 226 LegalizeOp(N); 227 } 228 } 229 if (!AnyLegalized) 230 break; 231 232 } 233 234 // Remove dead nodes now. 235 DAG.RemoveDeadNodes(); 236} 237 238/// ExpandConstantFP - Expands the ConstantFP node to an integer constant or 239/// a load from the constant pool. 240SDValue 241SelectionDAGLegalize::ExpandConstantFP(ConstantFPSDNode *CFP, bool UseCP) { 242 bool Extend = false; 243 DebugLoc dl = CFP->getDebugLoc(); 244 245 // If a FP immediate is precise when represented as a float and if the 246 // target can do an extending load from float to double, we put it into 247 // the constant pool as a float, even if it's is statically typed as a 248 // double. This shrinks FP constants and canonicalizes them for targets where 249 // an FP extending load is the same cost as a normal load (such as on the x87 250 // fp stack or PPC FP unit). 251 EVT VT = CFP->getValueType(0); 252 ConstantFP *LLVMC = const_cast<ConstantFP*>(CFP->getConstantFPValue()); 253 if (!UseCP) { 254 assert((VT == MVT::f64 || VT == MVT::f32) && "Invalid type expansion"); 255 return DAG.getConstant(LLVMC->getValueAPF().bitcastToAPInt(), 256 (VT == MVT::f64) ? MVT::i64 : MVT::i32); 257 } 258 259 EVT OrigVT = VT; 260 EVT SVT = VT; 261 while (SVT != MVT::f32) { 262 SVT = (MVT::SimpleValueType)(SVT.getSimpleVT().SimpleTy - 1); 263 if (ConstantFPSDNode::isValueValidForType(SVT, CFP->getValueAPF()) && 264 // Only do this if the target has a native EXTLOAD instruction from 265 // smaller type. 266 TLI.isLoadExtLegal(ISD::EXTLOAD, SVT) && 267 TLI.ShouldShrinkFPConstant(OrigVT)) { 268 Type *SType = SVT.getTypeForEVT(*DAG.getContext()); 269 LLVMC = cast<ConstantFP>(ConstantExpr::getFPTrunc(LLVMC, SType)); 270 VT = SVT; 271 Extend = true; 272 } 273 } 274 275 SDValue CPIdx = DAG.getConstantPool(LLVMC, TLI.getPointerTy()); 276 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment(); 277 if (Extend) { 278 SDValue Result = 279 DAG.getExtLoad(ISD::EXTLOAD, dl, OrigVT, 280 DAG.getEntryNode(), 281 CPIdx, MachinePointerInfo::getConstantPool(), 282 VT, false, false, Alignment); 283 return Result; 284 } 285 SDValue Result = 286 DAG.getLoad(OrigVT, dl, DAG.getEntryNode(), CPIdx, 287 MachinePointerInfo::getConstantPool(), false, false, false, 288 Alignment); 289 return Result; 290} 291 292/// ExpandUnalignedStore - Expands an unaligned store to 2 half-size stores. 293static void ExpandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG, 294 const TargetLowering &TLI, 295 SelectionDAGLegalize *DAGLegalize) { 296 assert(ST->getAddressingMode() == ISD::UNINDEXED && 297 "unaligned indexed stores not implemented!"); 298 SDValue Chain = ST->getChain(); 299 SDValue Ptr = ST->getBasePtr(); 300 SDValue Val = ST->getValue(); 301 EVT VT = Val.getValueType(); 302 int Alignment = ST->getAlignment(); 303 DebugLoc dl = ST->getDebugLoc(); 304 if (ST->getMemoryVT().isFloatingPoint() || 305 ST->getMemoryVT().isVector()) { 306 EVT intVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits()); 307 if (TLI.isTypeLegal(intVT)) { 308 // Expand to a bitconvert of the value to the integer type of the 309 // same size, then a (misaligned) int store. 310 // FIXME: Does not handle truncating floating point stores! 311 SDValue Result = DAG.getNode(ISD::BITCAST, dl, intVT, Val); 312 Result = DAG.getStore(Chain, dl, Result, Ptr, ST->getPointerInfo(), 313 ST->isVolatile(), ST->isNonTemporal(), Alignment); 314 DAGLegalize->ReplaceNode(SDValue(ST, 0), Result); 315 return; 316 } 317 // Do a (aligned) store to a stack slot, then copy from the stack slot 318 // to the final destination using (unaligned) integer loads and stores. 319 EVT StoredVT = ST->getMemoryVT(); 320 EVT RegVT = 321 TLI.getRegisterType(*DAG.getContext(), 322 EVT::getIntegerVT(*DAG.getContext(), 323 StoredVT.getSizeInBits())); 324 unsigned StoredBytes = StoredVT.getSizeInBits() / 8; 325 unsigned RegBytes = RegVT.getSizeInBits() / 8; 326 unsigned NumRegs = (StoredBytes + RegBytes - 1) / RegBytes; 327 328 // Make sure the stack slot is also aligned for the register type. 329 SDValue StackPtr = DAG.CreateStackTemporary(StoredVT, RegVT); 330 331 // Perform the original store, only redirected to the stack slot. 332 SDValue Store = DAG.getTruncStore(Chain, dl, 333 Val, StackPtr, MachinePointerInfo(), 334 StoredVT, false, false, 0); 335 SDValue Increment = DAG.getConstant(RegBytes, TLI.getPointerTy()); 336 SmallVector<SDValue, 8> Stores; 337 unsigned Offset = 0; 338 339 // Do all but one copies using the full register width. 340 for (unsigned i = 1; i < NumRegs; i++) { 341 // Load one integer register's worth from the stack slot. 342 SDValue Load = DAG.getLoad(RegVT, dl, Store, StackPtr, 343 MachinePointerInfo(), 344 false, false, false, 0); 345 // Store it to the final location. Remember the store. 346 Stores.push_back(DAG.getStore(Load.getValue(1), dl, Load, Ptr, 347 ST->getPointerInfo().getWithOffset(Offset), 348 ST->isVolatile(), ST->isNonTemporal(), 349 MinAlign(ST->getAlignment(), Offset))); 350 // Increment the pointers. 351 Offset += RegBytes; 352 StackPtr = DAG.getNode(ISD::ADD, dl, StackPtr.getValueType(), StackPtr, 353 Increment); 354 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 355 } 356 357 // The last store may be partial. Do a truncating store. On big-endian 358 // machines this requires an extending load from the stack slot to ensure 359 // that the bits are in the right place. 360 EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), 361 8 * (StoredBytes - Offset)); 362 363 // Load from the stack slot. 364 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Store, StackPtr, 365 MachinePointerInfo(), 366 MemVT, false, false, 0); 367 368 Stores.push_back(DAG.getTruncStore(Load.getValue(1), dl, Load, Ptr, 369 ST->getPointerInfo() 370 .getWithOffset(Offset), 371 MemVT, ST->isVolatile(), 372 ST->isNonTemporal(), 373 MinAlign(ST->getAlignment(), Offset))); 374 // The order of the stores doesn't matter - say it with a TokenFactor. 375 SDValue Result = 376 DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Stores[0], 377 Stores.size()); 378 DAGLegalize->ReplaceNode(SDValue(ST, 0), Result); 379 return; 380 } 381 assert(ST->getMemoryVT().isInteger() && 382 !ST->getMemoryVT().isVector() && 383 "Unaligned store of unknown type."); 384 // Get the half-size VT 385 EVT NewStoredVT = ST->getMemoryVT().getHalfSizedIntegerVT(*DAG.getContext()); 386 int NumBits = NewStoredVT.getSizeInBits(); 387 int IncrementSize = NumBits / 8; 388 389 // Divide the stored value in two parts. 390 SDValue ShiftAmount = DAG.getConstant(NumBits, 391 TLI.getShiftAmountTy(Val.getValueType())); 392 SDValue Lo = Val; 393 SDValue Hi = DAG.getNode(ISD::SRL, dl, VT, Val, ShiftAmount); 394 395 // Store the two parts 396 SDValue Store1, Store2; 397 Store1 = DAG.getTruncStore(Chain, dl, TLI.isLittleEndian()?Lo:Hi, Ptr, 398 ST->getPointerInfo(), NewStoredVT, 399 ST->isVolatile(), ST->isNonTemporal(), Alignment); 400 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, 401 DAG.getConstant(IncrementSize, TLI.getPointerTy())); 402 Alignment = MinAlign(Alignment, IncrementSize); 403 Store2 = DAG.getTruncStore(Chain, dl, TLI.isLittleEndian()?Hi:Lo, Ptr, 404 ST->getPointerInfo().getWithOffset(IncrementSize), 405 NewStoredVT, ST->isVolatile(), ST->isNonTemporal(), 406 Alignment); 407 408 SDValue Result = 409 DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Store1, Store2); 410 DAGLegalize->ReplaceNode(SDValue(ST, 0), Result); 411} 412 413/// ExpandUnalignedLoad - Expands an unaligned load to 2 half-size loads. 414static void 415ExpandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG, 416 const TargetLowering &TLI, 417 SDValue &ValResult, SDValue &ChainResult) { 418 assert(LD->getAddressingMode() == ISD::UNINDEXED && 419 "unaligned indexed loads not implemented!"); 420 SDValue Chain = LD->getChain(); 421 SDValue Ptr = LD->getBasePtr(); 422 EVT VT = LD->getValueType(0); 423 EVT LoadedVT = LD->getMemoryVT(); 424 DebugLoc dl = LD->getDebugLoc(); 425 if (VT.isFloatingPoint() || VT.isVector()) { 426 EVT intVT = EVT::getIntegerVT(*DAG.getContext(), LoadedVT.getSizeInBits()); 427 if (TLI.isTypeLegal(intVT)) { 428 // Expand to a (misaligned) integer load of the same size, 429 // then bitconvert to floating point or vector. 430 SDValue newLoad = DAG.getLoad(intVT, dl, Chain, Ptr, LD->getPointerInfo(), 431 LD->isVolatile(), 432 LD->isNonTemporal(), 433 LD->isInvariant(), LD->getAlignment()); 434 SDValue Result = DAG.getNode(ISD::BITCAST, dl, LoadedVT, newLoad); 435 if (VT.isFloatingPoint() && LoadedVT != VT) 436 Result = DAG.getNode(ISD::FP_EXTEND, dl, VT, Result); 437 438 ValResult = Result; 439 ChainResult = Chain; 440 return; 441 } 442 443 // Copy the value to a (aligned) stack slot using (unaligned) integer 444 // loads and stores, then do a (aligned) load from the stack slot. 445 EVT RegVT = TLI.getRegisterType(*DAG.getContext(), intVT); 446 unsigned LoadedBytes = LoadedVT.getSizeInBits() / 8; 447 unsigned RegBytes = RegVT.getSizeInBits() / 8; 448 unsigned NumRegs = (LoadedBytes + RegBytes - 1) / RegBytes; 449 450 // Make sure the stack slot is also aligned for the register type. 451 SDValue StackBase = DAG.CreateStackTemporary(LoadedVT, RegVT); 452 453 SDValue Increment = DAG.getConstant(RegBytes, TLI.getPointerTy()); 454 SmallVector<SDValue, 8> Stores; 455 SDValue StackPtr = StackBase; 456 unsigned Offset = 0; 457 458 // Do all but one copies using the full register width. 459 for (unsigned i = 1; i < NumRegs; i++) { 460 // Load one integer register's worth from the original location. 461 SDValue Load = DAG.getLoad(RegVT, dl, Chain, Ptr, 462 LD->getPointerInfo().getWithOffset(Offset), 463 LD->isVolatile(), LD->isNonTemporal(), 464 LD->isInvariant(), 465 MinAlign(LD->getAlignment(), Offset)); 466 // Follow the load with a store to the stack slot. Remember the store. 467 Stores.push_back(DAG.getStore(Load.getValue(1), dl, Load, StackPtr, 468 MachinePointerInfo(), false, false, 0)); 469 // Increment the pointers. 470 Offset += RegBytes; 471 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 472 StackPtr = DAG.getNode(ISD::ADD, dl, StackPtr.getValueType(), StackPtr, 473 Increment); 474 } 475 476 // The last copy may be partial. Do an extending load. 477 EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), 478 8 * (LoadedBytes - Offset)); 479 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Chain, Ptr, 480 LD->getPointerInfo().getWithOffset(Offset), 481 MemVT, LD->isVolatile(), 482 LD->isNonTemporal(), 483 MinAlign(LD->getAlignment(), Offset)); 484 // Follow the load with a store to the stack slot. Remember the store. 485 // On big-endian machines this requires a truncating store to ensure 486 // that the bits end up in the right place. 487 Stores.push_back(DAG.getTruncStore(Load.getValue(1), dl, Load, StackPtr, 488 MachinePointerInfo(), MemVT, 489 false, false, 0)); 490 491 // The order of the stores doesn't matter - say it with a TokenFactor. 492 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Stores[0], 493 Stores.size()); 494 495 // Finally, perform the original load only redirected to the stack slot. 496 Load = DAG.getExtLoad(LD->getExtensionType(), dl, VT, TF, StackBase, 497 MachinePointerInfo(), LoadedVT, false, false, 0); 498 499 // Callers expect a MERGE_VALUES node. 500 ValResult = Load; 501 ChainResult = TF; 502 return; 503 } 504 assert(LoadedVT.isInteger() && !LoadedVT.isVector() && 505 "Unaligned load of unsupported type."); 506 507 // Compute the new VT that is half the size of the old one. This is an 508 // integer MVT. 509 unsigned NumBits = LoadedVT.getSizeInBits(); 510 EVT NewLoadedVT; 511 NewLoadedVT = EVT::getIntegerVT(*DAG.getContext(), NumBits/2); 512 NumBits >>= 1; 513 514 unsigned Alignment = LD->getAlignment(); 515 unsigned IncrementSize = NumBits / 8; 516 ISD::LoadExtType HiExtType = LD->getExtensionType(); 517 518 // If the original load is NON_EXTLOAD, the hi part load must be ZEXTLOAD. 519 if (HiExtType == ISD::NON_EXTLOAD) 520 HiExtType = ISD::ZEXTLOAD; 521 522 // Load the value in two parts 523 SDValue Lo, Hi; 524 if (TLI.isLittleEndian()) { 525 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, LD->getPointerInfo(), 526 NewLoadedVT, LD->isVolatile(), 527 LD->isNonTemporal(), Alignment); 528 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, 529 DAG.getConstant(IncrementSize, TLI.getPointerTy())); 530 Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, 531 LD->getPointerInfo().getWithOffset(IncrementSize), 532 NewLoadedVT, LD->isVolatile(), 533 LD->isNonTemporal(), MinAlign(Alignment,IncrementSize)); 534 } else { 535 Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, LD->getPointerInfo(), 536 NewLoadedVT, LD->isVolatile(), 537 LD->isNonTemporal(), Alignment); 538 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, 539 DAG.getConstant(IncrementSize, TLI.getPointerTy())); 540 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, 541 LD->getPointerInfo().getWithOffset(IncrementSize), 542 NewLoadedVT, LD->isVolatile(), 543 LD->isNonTemporal(), MinAlign(Alignment,IncrementSize)); 544 } 545 546 // aggregate the two parts 547 SDValue ShiftAmount = DAG.getConstant(NumBits, 548 TLI.getShiftAmountTy(Hi.getValueType())); 549 SDValue Result = DAG.getNode(ISD::SHL, dl, VT, Hi, ShiftAmount); 550 Result = DAG.getNode(ISD::OR, dl, VT, Result, Lo); 551 552 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1), 553 Hi.getValue(1)); 554 555 ValResult = Result; 556 ChainResult = TF; 557} 558 559/// PerformInsertVectorEltInMemory - Some target cannot handle a variable 560/// insertion index for the INSERT_VECTOR_ELT instruction. In this case, it 561/// is necessary to spill the vector being inserted into to memory, perform 562/// the insert there, and then read the result back. 563SDValue SelectionDAGLegalize:: 564PerformInsertVectorEltInMemory(SDValue Vec, SDValue Val, SDValue Idx, 565 DebugLoc dl) { 566 SDValue Tmp1 = Vec; 567 SDValue Tmp2 = Val; 568 SDValue Tmp3 = Idx; 569 570 // If the target doesn't support this, we have to spill the input vector 571 // to a temporary stack slot, update the element, then reload it. This is 572 // badness. We could also load the value into a vector register (either 573 // with a "move to register" or "extload into register" instruction, then 574 // permute it into place, if the idx is a constant and if the idx is 575 // supported by the target. 576 EVT VT = Tmp1.getValueType(); 577 EVT EltVT = VT.getVectorElementType(); 578 EVT IdxVT = Tmp3.getValueType(); 579 EVT PtrVT = TLI.getPointerTy(); 580 SDValue StackPtr = DAG.CreateStackTemporary(VT); 581 582 int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex(); 583 584 // Store the vector. 585 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, Tmp1, StackPtr, 586 MachinePointerInfo::getFixedStack(SPFI), 587 false, false, 0); 588 589 // Truncate or zero extend offset to target pointer type. 590 unsigned CastOpc = IdxVT.bitsGT(PtrVT) ? ISD::TRUNCATE : ISD::ZERO_EXTEND; 591 Tmp3 = DAG.getNode(CastOpc, dl, PtrVT, Tmp3); 592 // Add the offset to the index. 593 unsigned EltSize = EltVT.getSizeInBits()/8; 594 Tmp3 = DAG.getNode(ISD::MUL, dl, IdxVT, Tmp3,DAG.getConstant(EltSize, IdxVT)); 595 SDValue StackPtr2 = DAG.getNode(ISD::ADD, dl, IdxVT, Tmp3, StackPtr); 596 // Store the scalar value. 597 Ch = DAG.getTruncStore(Ch, dl, Tmp2, StackPtr2, MachinePointerInfo(), EltVT, 598 false, false, 0); 599 // Load the updated vector. 600 return DAG.getLoad(VT, dl, Ch, StackPtr, 601 MachinePointerInfo::getFixedStack(SPFI), false, false, 602 false, 0); 603} 604 605 606SDValue SelectionDAGLegalize:: 607ExpandINSERT_VECTOR_ELT(SDValue Vec, SDValue Val, SDValue Idx, DebugLoc dl) { 608 if (ConstantSDNode *InsertPos = dyn_cast<ConstantSDNode>(Idx)) { 609 // SCALAR_TO_VECTOR requires that the type of the value being inserted 610 // match the element type of the vector being created, except for 611 // integers in which case the inserted value can be over width. 612 EVT EltVT = Vec.getValueType().getVectorElementType(); 613 if (Val.getValueType() == EltVT || 614 (EltVT.isInteger() && Val.getValueType().bitsGE(EltVT))) { 615 SDValue ScVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 616 Vec.getValueType(), Val); 617 618 unsigned NumElts = Vec.getValueType().getVectorNumElements(); 619 // We generate a shuffle of InVec and ScVec, so the shuffle mask 620 // should be 0,1,2,3,4,5... with the appropriate element replaced with 621 // elt 0 of the RHS. 622 SmallVector<int, 8> ShufOps; 623 for (unsigned i = 0; i != NumElts; ++i) 624 ShufOps.push_back(i != InsertPos->getZExtValue() ? i : NumElts); 625 626 return DAG.getVectorShuffle(Vec.getValueType(), dl, Vec, ScVec, 627 &ShufOps[0]); 628 } 629 } 630 return PerformInsertVectorEltInMemory(Vec, Val, Idx, dl); 631} 632 633SDValue SelectionDAGLegalize::OptimizeFloatStore(StoreSDNode* ST) { 634 // Turn 'store float 1.0, Ptr' -> 'store int 0x12345678, Ptr' 635 // FIXME: We shouldn't do this for TargetConstantFP's. 636 // FIXME: move this to the DAG Combiner! Note that we can't regress due 637 // to phase ordering between legalized code and the dag combiner. This 638 // probably means that we need to integrate dag combiner and legalizer 639 // together. 640 // We generally can't do this one for long doubles. 641 SDValue Tmp1 = ST->getChain(); 642 SDValue Tmp2 = ST->getBasePtr(); 643 SDValue Tmp3; 644 unsigned Alignment = ST->getAlignment(); 645 bool isVolatile = ST->isVolatile(); 646 bool isNonTemporal = ST->isNonTemporal(); 647 DebugLoc dl = ST->getDebugLoc(); 648 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(ST->getValue())) { 649 if (CFP->getValueType(0) == MVT::f32 && 650 TLI.isTypeLegal(MVT::i32)) { 651 Tmp3 = DAG.getConstant(CFP->getValueAPF(). 652 bitcastToAPInt().zextOrTrunc(32), 653 MVT::i32); 654 return DAG.getStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(), 655 isVolatile, isNonTemporal, Alignment); 656 } 657 658 if (CFP->getValueType(0) == MVT::f64) { 659 // If this target supports 64-bit registers, do a single 64-bit store. 660 if (TLI.isTypeLegal(MVT::i64)) { 661 Tmp3 = DAG.getConstant(CFP->getValueAPF().bitcastToAPInt(). 662 zextOrTrunc(64), MVT::i64); 663 return DAG.getStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(), 664 isVolatile, isNonTemporal, Alignment); 665 } 666 667 if (TLI.isTypeLegal(MVT::i32) && !ST->isVolatile()) { 668 // Otherwise, if the target supports 32-bit registers, use 2 32-bit 669 // stores. If the target supports neither 32- nor 64-bits, this 670 // xform is certainly not worth it. 671 const APInt &IntVal =CFP->getValueAPF().bitcastToAPInt(); 672 SDValue Lo = DAG.getConstant(IntVal.trunc(32), MVT::i32); 673 SDValue Hi = DAG.getConstant(IntVal.lshr(32).trunc(32), MVT::i32); 674 if (TLI.isBigEndian()) std::swap(Lo, Hi); 675 676 Lo = DAG.getStore(Tmp1, dl, Lo, Tmp2, ST->getPointerInfo(), isVolatile, 677 isNonTemporal, Alignment); 678 Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2, 679 DAG.getIntPtrConstant(4)); 680 Hi = DAG.getStore(Tmp1, dl, Hi, Tmp2, 681 ST->getPointerInfo().getWithOffset(4), 682 isVolatile, isNonTemporal, MinAlign(Alignment, 4U)); 683 684 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo, Hi); 685 } 686 } 687 } 688 return SDValue(0, 0); 689} 690 691/// LegalizeOp - Return a legal replacement for the given operation, with 692/// all legal operands. 693void SelectionDAGLegalize::LegalizeOp(SDNode *Node) { 694 if (Node->getOpcode() == ISD::TargetConstant) // Allow illegal target nodes. 695 return; 696 697 DebugLoc dl = Node->getDebugLoc(); 698 699 for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i) 700 assert(TLI.getTypeAction(*DAG.getContext(), Node->getValueType(i)) == 701 TargetLowering::TypeLegal && 702 "Unexpected illegal type!"); 703 704 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) 705 assert((TLI.getTypeAction(*DAG.getContext(), 706 Node->getOperand(i).getValueType()) == 707 TargetLowering::TypeLegal || 708 Node->getOperand(i).getOpcode() == ISD::TargetConstant) && 709 "Unexpected illegal type!"); 710 711 SDValue Tmp1, Tmp2, Tmp3, Tmp4; 712 bool isCustom = false; 713 714 // Figure out the correct action; the way to query this varies by opcode 715 TargetLowering::LegalizeAction Action = TargetLowering::Legal; 716 bool SimpleFinishLegalizing = true; 717 switch (Node->getOpcode()) { 718 case ISD::INTRINSIC_W_CHAIN: 719 case ISD::INTRINSIC_WO_CHAIN: 720 case ISD::INTRINSIC_VOID: 721 case ISD::VAARG: 722 case ISD::STACKSAVE: 723 Action = TLI.getOperationAction(Node->getOpcode(), MVT::Other); 724 break; 725 case ISD::SINT_TO_FP: 726 case ISD::UINT_TO_FP: 727 case ISD::EXTRACT_VECTOR_ELT: 728 Action = TLI.getOperationAction(Node->getOpcode(), 729 Node->getOperand(0).getValueType()); 730 break; 731 case ISD::FP_ROUND_INREG: 732 case ISD::SIGN_EXTEND_INREG: { 733 EVT InnerType = cast<VTSDNode>(Node->getOperand(1))->getVT(); 734 Action = TLI.getOperationAction(Node->getOpcode(), InnerType); 735 break; 736 } 737 case ISD::ATOMIC_STORE: { 738 Action = TLI.getOperationAction(Node->getOpcode(), 739 Node->getOperand(2).getValueType()); 740 break; 741 } 742 case ISD::SELECT_CC: 743 case ISD::SETCC: 744 case ISD::BR_CC: { 745 unsigned CCOperand = Node->getOpcode() == ISD::SELECT_CC ? 4 : 746 Node->getOpcode() == ISD::SETCC ? 2 : 1; 747 unsigned CompareOperand = Node->getOpcode() == ISD::BR_CC ? 2 : 0; 748 EVT OpVT = Node->getOperand(CompareOperand).getValueType(); 749 ISD::CondCode CCCode = 750 cast<CondCodeSDNode>(Node->getOperand(CCOperand))->get(); 751 Action = TLI.getCondCodeAction(CCCode, OpVT); 752 if (Action == TargetLowering::Legal) { 753 if (Node->getOpcode() == ISD::SELECT_CC) 754 Action = TLI.getOperationAction(Node->getOpcode(), 755 Node->getValueType(0)); 756 else 757 Action = TLI.getOperationAction(Node->getOpcode(), OpVT); 758 } 759 break; 760 } 761 case ISD::LOAD: 762 case ISD::STORE: 763 // FIXME: Model these properly. LOAD and STORE are complicated, and 764 // STORE expects the unlegalized operand in some cases. 765 SimpleFinishLegalizing = false; 766 break; 767 case ISD::CALLSEQ_START: 768 case ISD::CALLSEQ_END: 769 // FIXME: This shouldn't be necessary. These nodes have special properties 770 // dealing with the recursive nature of legalization. Removing this 771 // special case should be done as part of making LegalizeDAG non-recursive. 772 SimpleFinishLegalizing = false; 773 break; 774 case ISD::EXTRACT_ELEMENT: 775 case ISD::FLT_ROUNDS_: 776 case ISD::SADDO: 777 case ISD::SSUBO: 778 case ISD::UADDO: 779 case ISD::USUBO: 780 case ISD::SMULO: 781 case ISD::UMULO: 782 case ISD::FPOWI: 783 case ISD::MERGE_VALUES: 784 case ISD::EH_RETURN: 785 case ISD::FRAME_TO_ARGS_OFFSET: 786 case ISD::EH_SJLJ_SETJMP: 787 case ISD::EH_SJLJ_LONGJMP: 788 // These operations lie about being legal: when they claim to be legal, 789 // they should actually be expanded. 790 Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0)); 791 if (Action == TargetLowering::Legal) 792 Action = TargetLowering::Expand; 793 break; 794 case ISD::INIT_TRAMPOLINE: 795 case ISD::ADJUST_TRAMPOLINE: 796 case ISD::FRAMEADDR: 797 case ISD::RETURNADDR: 798 // These operations lie about being legal: when they claim to be legal, 799 // they should actually be custom-lowered. 800 Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0)); 801 if (Action == TargetLowering::Legal) 802 Action = TargetLowering::Custom; 803 break; 804 default: 805 if (Node->getOpcode() >= ISD::BUILTIN_OP_END) { 806 Action = TargetLowering::Legal; 807 } else { 808 Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0)); 809 } 810 break; 811 } 812 813 if (SimpleFinishLegalizing) { 814 SmallVector<SDValue, 8> Ops; 815 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) 816 Ops.push_back(Node->getOperand(i)); 817 switch (Node->getOpcode()) { 818 default: break; 819 case ISD::SHL: 820 case ISD::SRL: 821 case ISD::SRA: 822 case ISD::ROTL: 823 case ISD::ROTR: 824 // Legalizing shifts/rotates requires adjusting the shift amount 825 // to the appropriate width. 826 if (!Ops[1].getValueType().isVector()) { 827 SDValue SAO = DAG.getShiftAmountOperand(Ops[0].getValueType(), Ops[1]); 828 HandleSDNode Handle(SAO); 829 LegalizeOp(SAO.getNode()); 830 Ops[1] = Handle.getValue(); 831 } 832 break; 833 case ISD::SRL_PARTS: 834 case ISD::SRA_PARTS: 835 case ISD::SHL_PARTS: 836 // Legalizing shifts/rotates requires adjusting the shift amount 837 // to the appropriate width. 838 if (!Ops[2].getValueType().isVector()) { 839 SDValue SAO = DAG.getShiftAmountOperand(Ops[0].getValueType(), Ops[2]); 840 HandleSDNode Handle(SAO); 841 LegalizeOp(SAO.getNode()); 842 Ops[2] = Handle.getValue(); 843 } 844 break; 845 } 846 847 SDNode *NewNode = DAG.UpdateNodeOperands(Node, Ops.data(), Ops.size()); 848 if (NewNode != Node) { 849 DAG.ReplaceAllUsesWith(Node, NewNode, this); 850 for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i) 851 DAG.TransferDbgValues(SDValue(Node, i), SDValue(NewNode, i)); 852 ReplacedNode(Node); 853 Node = NewNode; 854 } 855 switch (Action) { 856 case TargetLowering::Legal: 857 return; 858 case TargetLowering::Custom: 859 // FIXME: The handling for custom lowering with multiple results is 860 // a complete mess. 861 Tmp1 = TLI.LowerOperation(SDValue(Node, 0), DAG); 862 if (Tmp1.getNode()) { 863 SmallVector<SDValue, 8> ResultVals; 864 for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i) { 865 if (e == 1) 866 ResultVals.push_back(Tmp1); 867 else 868 ResultVals.push_back(Tmp1.getValue(i)); 869 } 870 if (Tmp1.getNode() != Node || Tmp1.getResNo() != 0) { 871 DAG.ReplaceAllUsesWith(Node, ResultVals.data(), this); 872 for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i) 873 DAG.TransferDbgValues(SDValue(Node, i), ResultVals[i]); 874 ReplacedNode(Node); 875 } 876 return; 877 } 878 879 // FALL THROUGH 880 case TargetLowering::Expand: 881 ExpandNode(Node); 882 return; 883 case TargetLowering::Promote: 884 PromoteNode(Node); 885 return; 886 } 887 } 888 889 switch (Node->getOpcode()) { 890 default: 891#ifndef NDEBUG 892 dbgs() << "NODE: "; 893 Node->dump( &DAG); 894 dbgs() << "\n"; 895#endif 896 assert(0 && "Do not know how to legalize this operator!"); 897 898 case ISD::CALLSEQ_START: 899 case ISD::CALLSEQ_END: 900 break; 901 case ISD::LOAD: { 902 LoadSDNode *LD = cast<LoadSDNode>(Node); 903 Tmp1 = LD->getChain(); // Legalize the chain. 904 Tmp2 = LD->getBasePtr(); // Legalize the base pointer. 905 906 ISD::LoadExtType ExtType = LD->getExtensionType(); 907 if (ExtType == ISD::NON_EXTLOAD) { 908 EVT VT = Node->getValueType(0); 909 Tmp3 = SDValue(Node, 0); 910 Tmp4 = SDValue(Node, 1); 911 912 switch (TLI.getOperationAction(Node->getOpcode(), VT)) { 913 default: assert(0 && "This action is not supported yet!"); 914 case TargetLowering::Legal: 915 // If this is an unaligned load and the target doesn't support it, 916 // expand it. 917 if (!TLI.allowsUnalignedMemoryAccesses(LD->getMemoryVT())) { 918 Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext()); 919 unsigned ABIAlignment = TLI.getTargetData()->getABITypeAlignment(Ty); 920 if (LD->getAlignment() < ABIAlignment){ 921 ExpandUnalignedLoad(cast<LoadSDNode>(Node), 922 DAG, TLI, Tmp3, Tmp4); 923 } 924 } 925 break; 926 case TargetLowering::Custom: 927 Tmp1 = TLI.LowerOperation(Tmp3, DAG); 928 if (Tmp1.getNode()) { 929 Tmp3 = Tmp1; 930 Tmp4 = Tmp1.getValue(1); 931 } 932 break; 933 case TargetLowering::Promote: { 934 // Only promote a load of vector type to another. 935 assert(VT.isVector() && "Cannot promote this load!"); 936 // Change base type to a different vector type. 937 EVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), VT); 938 939 Tmp1 = DAG.getLoad(NVT, dl, Tmp1, Tmp2, LD->getPointerInfo(), 940 LD->isVolatile(), LD->isNonTemporal(), 941 LD->isInvariant(), LD->getAlignment()); 942 Tmp3 = DAG.getNode(ISD::BITCAST, dl, VT, Tmp1); 943 Tmp4 = Tmp1.getValue(1); 944 break; 945 } 946 } 947 if (Tmp4.getNode() != Node) { 948 assert(Tmp3.getNode() != Node && "Load must be completely replaced"); 949 DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 0), Tmp3); 950 DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), Tmp4); 951 ReplacedNode(Node); 952 } 953 return; 954 } 955 956 EVT SrcVT = LD->getMemoryVT(); 957 unsigned SrcWidth = SrcVT.getSizeInBits(); 958 unsigned Alignment = LD->getAlignment(); 959 bool isVolatile = LD->isVolatile(); 960 bool isNonTemporal = LD->isNonTemporal(); 961 962 if (SrcWidth != SrcVT.getStoreSizeInBits() && 963 // Some targets pretend to have an i1 loading operation, and actually 964 // load an i8. This trick is correct for ZEXTLOAD because the top 7 965 // bits are guaranteed to be zero; it helps the optimizers understand 966 // that these bits are zero. It is also useful for EXTLOAD, since it 967 // tells the optimizers that those bits are undefined. It would be 968 // nice to have an effective generic way of getting these benefits... 969 // Until such a way is found, don't insist on promoting i1 here. 970 (SrcVT != MVT::i1 || 971 TLI.getLoadExtAction(ExtType, MVT::i1) == TargetLowering::Promote)) { 972 // Promote to a byte-sized load if not loading an integral number of 973 // bytes. For example, promote EXTLOAD:i20 -> EXTLOAD:i24. 974 unsigned NewWidth = SrcVT.getStoreSizeInBits(); 975 EVT NVT = EVT::getIntegerVT(*DAG.getContext(), NewWidth); 976 SDValue Ch; 977 978 // The extra bits are guaranteed to be zero, since we stored them that 979 // way. A zext load from NVT thus automatically gives zext from SrcVT. 980 981 ISD::LoadExtType NewExtType = 982 ExtType == ISD::ZEXTLOAD ? ISD::ZEXTLOAD : ISD::EXTLOAD; 983 984 SDValue Result = 985 DAG.getExtLoad(NewExtType, dl, Node->getValueType(0), 986 Tmp1, Tmp2, LD->getPointerInfo(), 987 NVT, isVolatile, isNonTemporal, Alignment); 988 989 Ch = Result.getValue(1); // The chain. 990 991 if (ExtType == ISD::SEXTLOAD) 992 // Having the top bits zero doesn't help when sign extending. 993 Result = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, 994 Result.getValueType(), 995 Result, DAG.getValueType(SrcVT)); 996 else if (ExtType == ISD::ZEXTLOAD || NVT == Result.getValueType()) 997 // All the top bits are guaranteed to be zero - inform the optimizers. 998 Result = DAG.getNode(ISD::AssertZext, dl, 999 Result.getValueType(), Result, 1000 DAG.getValueType(SrcVT)); 1001 1002 Tmp1 = Result; 1003 Tmp2 = Ch; 1004 } else if (SrcWidth & (SrcWidth - 1)) { 1005 // If not loading a power-of-2 number of bits, expand as two loads. 1006 assert(!SrcVT.isVector() && "Unsupported extload!"); 1007 unsigned RoundWidth = 1 << Log2_32(SrcWidth); 1008 assert(RoundWidth < SrcWidth); 1009 unsigned ExtraWidth = SrcWidth - RoundWidth; 1010 assert(ExtraWidth < RoundWidth); 1011 assert(!(RoundWidth % 8) && !(ExtraWidth % 8) && 1012 "Load size not an integral number of bytes!"); 1013 EVT RoundVT = EVT::getIntegerVT(*DAG.getContext(), RoundWidth); 1014 EVT ExtraVT = EVT::getIntegerVT(*DAG.getContext(), ExtraWidth); 1015 SDValue Lo, Hi, Ch; 1016 unsigned IncrementSize; 1017 1018 if (TLI.isLittleEndian()) { 1019 // EXTLOAD:i24 -> ZEXTLOAD:i16 | (shl EXTLOAD@+2:i8, 16) 1020 // Load the bottom RoundWidth bits. 1021 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, Node->getValueType(0), 1022 Tmp1, Tmp2, 1023 LD->getPointerInfo(), RoundVT, isVolatile, 1024 isNonTemporal, Alignment); 1025 1026 // Load the remaining ExtraWidth bits. 1027 IncrementSize = RoundWidth / 8; 1028 Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2, 1029 DAG.getIntPtrConstant(IncrementSize)); 1030 Hi = DAG.getExtLoad(ExtType, dl, Node->getValueType(0), Tmp1, Tmp2, 1031 LD->getPointerInfo().getWithOffset(IncrementSize), 1032 ExtraVT, isVolatile, isNonTemporal, 1033 MinAlign(Alignment, IncrementSize)); 1034 1035 // Build a factor node to remember that this load is independent of 1036 // the other one. 1037 Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1), 1038 Hi.getValue(1)); 1039 1040 // Move the top bits to the right place. 1041 Hi = DAG.getNode(ISD::SHL, dl, Hi.getValueType(), Hi, 1042 DAG.getConstant(RoundWidth, 1043 TLI.getShiftAmountTy(Hi.getValueType()))); 1044 1045 // Join the hi and lo parts. 1046 Tmp1 = DAG.getNode(ISD::OR, dl, Node->getValueType(0), Lo, Hi); 1047 } else { 1048 // Big endian - avoid unaligned loads. 1049 // EXTLOAD:i24 -> (shl EXTLOAD:i16, 8) | ZEXTLOAD@+2:i8 1050 // Load the top RoundWidth bits. 1051 Hi = DAG.getExtLoad(ExtType, dl, Node->getValueType(0), Tmp1, Tmp2, 1052 LD->getPointerInfo(), RoundVT, isVolatile, 1053 isNonTemporal, Alignment); 1054 1055 // Load the remaining ExtraWidth bits. 1056 IncrementSize = RoundWidth / 8; 1057 Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2, 1058 DAG.getIntPtrConstant(IncrementSize)); 1059 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, 1060 dl, Node->getValueType(0), Tmp1, Tmp2, 1061 LD->getPointerInfo().getWithOffset(IncrementSize), 1062 ExtraVT, isVolatile, isNonTemporal, 1063 MinAlign(Alignment, IncrementSize)); 1064 1065 // Build a factor node to remember that this load is independent of 1066 // the other one. 1067 Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1), 1068 Hi.getValue(1)); 1069 1070 // Move the top bits to the right place. 1071 Hi = DAG.getNode(ISD::SHL, dl, Hi.getValueType(), Hi, 1072 DAG.getConstant(ExtraWidth, 1073 TLI.getShiftAmountTy(Hi.getValueType()))); 1074 1075 // Join the hi and lo parts. 1076 Tmp1 = DAG.getNode(ISD::OR, dl, Node->getValueType(0), Lo, Hi); 1077 } 1078 1079 Tmp2 = Ch; 1080 } else { 1081 switch (TLI.getLoadExtAction(ExtType, SrcVT)) { 1082 default: assert(0 && "This action is not supported yet!"); 1083 case TargetLowering::Custom: 1084 isCustom = true; 1085 // FALLTHROUGH 1086 case TargetLowering::Legal: 1087 Tmp1 = SDValue(Node, 0); 1088 Tmp2 = SDValue(Node, 1); 1089 1090 if (isCustom) { 1091 Tmp3 = TLI.LowerOperation(SDValue(Node, 0), DAG); 1092 if (Tmp3.getNode()) { 1093 Tmp1 = Tmp3; 1094 Tmp2 = Tmp3.getValue(1); 1095 } 1096 } else { 1097 // If this is an unaligned load and the target doesn't support it, 1098 // expand it. 1099 if (!TLI.allowsUnalignedMemoryAccesses(LD->getMemoryVT())) { 1100 Type *Ty = 1101 LD->getMemoryVT().getTypeForEVT(*DAG.getContext()); 1102 unsigned ABIAlignment = 1103 TLI.getTargetData()->getABITypeAlignment(Ty); 1104 if (LD->getAlignment() < ABIAlignment){ 1105 ExpandUnalignedLoad(cast<LoadSDNode>(Node), 1106 DAG, TLI, Tmp1, Tmp2); 1107 } 1108 } 1109 } 1110 break; 1111 case TargetLowering::Expand: 1112 if (!TLI.isLoadExtLegal(ISD::EXTLOAD, SrcVT) && TLI.isTypeLegal(SrcVT)) { 1113 SDValue Load = DAG.getLoad(SrcVT, dl, Tmp1, Tmp2, 1114 LD->getPointerInfo(), 1115 LD->isVolatile(), LD->isNonTemporal(), 1116 LD->isInvariant(), LD->getAlignment()); 1117 unsigned ExtendOp; 1118 switch (ExtType) { 1119 case ISD::EXTLOAD: 1120 ExtendOp = (SrcVT.isFloatingPoint() ? 1121 ISD::FP_EXTEND : ISD::ANY_EXTEND); 1122 break; 1123 case ISD::SEXTLOAD: ExtendOp = ISD::SIGN_EXTEND; break; 1124 case ISD::ZEXTLOAD: ExtendOp = ISD::ZERO_EXTEND; break; 1125 default: llvm_unreachable("Unexpected extend load type!"); 1126 } 1127 Tmp1 = DAG.getNode(ExtendOp, dl, Node->getValueType(0), Load); 1128 Tmp2 = Load.getValue(1); 1129 break; 1130 } 1131 1132 assert(!SrcVT.isVector() && 1133 "Vector Loads are handled in LegalizeVectorOps"); 1134 1135 // FIXME: This does not work for vectors on most targets. Sign- and 1136 // zero-extend operations are currently folded into extending loads, 1137 // whether they are legal or not, and then we end up here without any 1138 // support for legalizing them. 1139 assert(ExtType != ISD::EXTLOAD && 1140 "EXTLOAD should always be supported!"); 1141 // Turn the unsupported load into an EXTLOAD followed by an explicit 1142 // zero/sign extend inreg. 1143 SDValue Result = DAG.getExtLoad(ISD::EXTLOAD, dl, Node->getValueType(0), 1144 Tmp1, Tmp2, LD->getPointerInfo(), SrcVT, 1145 LD->isVolatile(), LD->isNonTemporal(), 1146 LD->getAlignment()); 1147 SDValue ValRes; 1148 if (ExtType == ISD::SEXTLOAD) 1149 ValRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, 1150 Result.getValueType(), 1151 Result, DAG.getValueType(SrcVT)); 1152 else 1153 ValRes = DAG.getZeroExtendInReg(Result, dl, SrcVT.getScalarType()); 1154 Tmp1 = ValRes; 1155 Tmp2 = Result.getValue(1); 1156 break; 1157 } 1158 } 1159 1160 // Since loads produce two values, make sure to remember that we legalized 1161 // both of them. 1162 if (Tmp2.getNode() != Node) { 1163 assert(Tmp1.getNode() != Node && "Load must be completely replaced"); 1164 DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 0), Tmp1); 1165 DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), Tmp2); 1166 ReplacedNode(Node); 1167 } 1168 break; 1169 } 1170 case ISD::STORE: { 1171 StoreSDNode *ST = cast<StoreSDNode>(Node); 1172 Tmp1 = ST->getChain(); 1173 Tmp2 = ST->getBasePtr(); 1174 unsigned Alignment = ST->getAlignment(); 1175 bool isVolatile = ST->isVolatile(); 1176 bool isNonTemporal = ST->isNonTemporal(); 1177 1178 if (!ST->isTruncatingStore()) { 1179 if (SDNode *OptStore = OptimizeFloatStore(ST).getNode()) { 1180 ReplaceNode(ST, OptStore); 1181 break; 1182 } 1183 1184 { 1185 Tmp3 = ST->getValue(); 1186 EVT VT = Tmp3.getValueType(); 1187 switch (TLI.getOperationAction(ISD::STORE, VT)) { 1188 default: assert(0 && "This action is not supported yet!"); 1189 case TargetLowering::Legal: 1190 // If this is an unaligned store and the target doesn't support it, 1191 // expand it. 1192 if (!TLI.allowsUnalignedMemoryAccesses(ST->getMemoryVT())) { 1193 Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext()); 1194 unsigned ABIAlignment= TLI.getTargetData()->getABITypeAlignment(Ty); 1195 if (ST->getAlignment() < ABIAlignment) 1196 ExpandUnalignedStore(cast<StoreSDNode>(Node), 1197 DAG, TLI, this); 1198 } 1199 break; 1200 case TargetLowering::Custom: 1201 Tmp1 = TLI.LowerOperation(SDValue(Node, 0), DAG); 1202 if (Tmp1.getNode()) 1203 ReplaceNode(SDValue(Node, 0), Tmp1); 1204 break; 1205 case TargetLowering::Promote: { 1206 assert(VT.isVector() && "Unknown legal promote case!"); 1207 Tmp3 = DAG.getNode(ISD::BITCAST, dl, 1208 TLI.getTypeToPromoteTo(ISD::STORE, VT), Tmp3); 1209 SDValue Result = 1210 DAG.getStore(Tmp1, dl, Tmp3, Tmp2, 1211 ST->getPointerInfo(), isVolatile, 1212 isNonTemporal, Alignment); 1213 ReplaceNode(SDValue(Node, 0), Result); 1214 break; 1215 } 1216 } 1217 break; 1218 } 1219 } else { 1220 Tmp3 = ST->getValue(); 1221 1222 EVT StVT = ST->getMemoryVT(); 1223 unsigned StWidth = StVT.getSizeInBits(); 1224 1225 if (StWidth != StVT.getStoreSizeInBits()) { 1226 // Promote to a byte-sized store with upper bits zero if not 1227 // storing an integral number of bytes. For example, promote 1228 // TRUNCSTORE:i1 X -> TRUNCSTORE:i8 (and X, 1) 1229 EVT NVT = EVT::getIntegerVT(*DAG.getContext(), 1230 StVT.getStoreSizeInBits()); 1231 Tmp3 = DAG.getZeroExtendInReg(Tmp3, dl, StVT); 1232 SDValue Result = 1233 DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(), 1234 NVT, isVolatile, isNonTemporal, Alignment); 1235 ReplaceNode(SDValue(Node, 0), Result); 1236 } else if (StWidth & (StWidth - 1)) { 1237 // If not storing a power-of-2 number of bits, expand as two stores. 1238 assert(!StVT.isVector() && "Unsupported truncstore!"); 1239 unsigned RoundWidth = 1 << Log2_32(StWidth); 1240 assert(RoundWidth < StWidth); 1241 unsigned ExtraWidth = StWidth - RoundWidth; 1242 assert(ExtraWidth < RoundWidth); 1243 assert(!(RoundWidth % 8) && !(ExtraWidth % 8) && 1244 "Store size not an integral number of bytes!"); 1245 EVT RoundVT = EVT::getIntegerVT(*DAG.getContext(), RoundWidth); 1246 EVT ExtraVT = EVT::getIntegerVT(*DAG.getContext(), ExtraWidth); 1247 SDValue Lo, Hi; 1248 unsigned IncrementSize; 1249 1250 if (TLI.isLittleEndian()) { 1251 // TRUNCSTORE:i24 X -> TRUNCSTORE:i16 X, TRUNCSTORE@+2:i8 (srl X, 16) 1252 // Store the bottom RoundWidth bits. 1253 Lo = DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(), 1254 RoundVT, 1255 isVolatile, isNonTemporal, Alignment); 1256 1257 // Store the remaining ExtraWidth bits. 1258 IncrementSize = RoundWidth / 8; 1259 Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2, 1260 DAG.getIntPtrConstant(IncrementSize)); 1261 Hi = DAG.getNode(ISD::SRL, dl, Tmp3.getValueType(), Tmp3, 1262 DAG.getConstant(RoundWidth, 1263 TLI.getShiftAmountTy(Tmp3.getValueType()))); 1264 Hi = DAG.getTruncStore(Tmp1, dl, Hi, Tmp2, 1265 ST->getPointerInfo().getWithOffset(IncrementSize), 1266 ExtraVT, isVolatile, isNonTemporal, 1267 MinAlign(Alignment, IncrementSize)); 1268 } else { 1269 // Big endian - avoid unaligned stores. 1270 // TRUNCSTORE:i24 X -> TRUNCSTORE:i16 (srl X, 8), TRUNCSTORE@+2:i8 X 1271 // Store the top RoundWidth bits. 1272 Hi = DAG.getNode(ISD::SRL, dl, Tmp3.getValueType(), Tmp3, 1273 DAG.getConstant(ExtraWidth, 1274 TLI.getShiftAmountTy(Tmp3.getValueType()))); 1275 Hi = DAG.getTruncStore(Tmp1, dl, Hi, Tmp2, ST->getPointerInfo(), 1276 RoundVT, isVolatile, isNonTemporal, Alignment); 1277 1278 // Store the remaining ExtraWidth bits. 1279 IncrementSize = RoundWidth / 8; 1280 Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2, 1281 DAG.getIntPtrConstant(IncrementSize)); 1282 Lo = DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2, 1283 ST->getPointerInfo().getWithOffset(IncrementSize), 1284 ExtraVT, isVolatile, isNonTemporal, 1285 MinAlign(Alignment, IncrementSize)); 1286 } 1287 1288 // The order of the stores doesn't matter. 1289 SDValue Result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo, Hi); 1290 ReplaceNode(SDValue(Node, 0), Result); 1291 } else { 1292 switch (TLI.getTruncStoreAction(ST->getValue().getValueType(), StVT)) { 1293 default: assert(0 && "This action is not supported yet!"); 1294 case TargetLowering::Legal: 1295 // If this is an unaligned store and the target doesn't support it, 1296 // expand it. 1297 if (!TLI.allowsUnalignedMemoryAccesses(ST->getMemoryVT())) { 1298 Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext()); 1299 unsigned ABIAlignment= TLI.getTargetData()->getABITypeAlignment(Ty); 1300 if (ST->getAlignment() < ABIAlignment) 1301 ExpandUnalignedStore(cast<StoreSDNode>(Node), DAG, TLI, this); 1302 } 1303 break; 1304 case TargetLowering::Custom: 1305 ReplaceNode(SDValue(Node, 0), 1306 TLI.LowerOperation(SDValue(Node, 0), DAG)); 1307 break; 1308 case TargetLowering::Expand: 1309 assert(!StVT.isVector() && 1310 "Vector Stores are handled in LegalizeVectorOps"); 1311 1312 // TRUNCSTORE:i16 i32 -> STORE i16 1313 assert(TLI.isTypeLegal(StVT) && "Do not know how to expand this store!"); 1314 Tmp3 = DAG.getNode(ISD::TRUNCATE, dl, StVT, Tmp3); 1315 SDValue Result = 1316 DAG.getStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(), 1317 isVolatile, isNonTemporal, Alignment); 1318 ReplaceNode(SDValue(Node, 0), Result); 1319 break; 1320 } 1321 } 1322 } 1323 break; 1324 } 1325 } 1326} 1327 1328SDValue SelectionDAGLegalize::ExpandExtractFromVectorThroughStack(SDValue Op) { 1329 SDValue Vec = Op.getOperand(0); 1330 SDValue Idx = Op.getOperand(1); 1331 DebugLoc dl = Op.getDebugLoc(); 1332 // Store the value to a temporary stack slot, then LOAD the returned part. 1333 SDValue StackPtr = DAG.CreateStackTemporary(Vec.getValueType()); 1334 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, 1335 MachinePointerInfo(), false, false, 0); 1336 1337 // Add the offset to the index. 1338 unsigned EltSize = 1339 Vec.getValueType().getVectorElementType().getSizeInBits()/8; 1340 Idx = DAG.getNode(ISD::MUL, dl, Idx.getValueType(), Idx, 1341 DAG.getConstant(EltSize, Idx.getValueType())); 1342 1343 if (Idx.getValueType().bitsGT(TLI.getPointerTy())) 1344 Idx = DAG.getNode(ISD::TRUNCATE, dl, TLI.getPointerTy(), Idx); 1345 else 1346 Idx = DAG.getNode(ISD::ZERO_EXTEND, dl, TLI.getPointerTy(), Idx); 1347 1348 StackPtr = DAG.getNode(ISD::ADD, dl, Idx.getValueType(), Idx, StackPtr); 1349 1350 if (Op.getValueType().isVector()) 1351 return DAG.getLoad(Op.getValueType(), dl, Ch, StackPtr,MachinePointerInfo(), 1352 false, false, false, 0); 1353 return DAG.getExtLoad(ISD::EXTLOAD, dl, Op.getValueType(), Ch, StackPtr, 1354 MachinePointerInfo(), 1355 Vec.getValueType().getVectorElementType(), 1356 false, false, 0); 1357} 1358 1359SDValue SelectionDAGLegalize::ExpandInsertToVectorThroughStack(SDValue Op) { 1360 assert(Op.getValueType().isVector() && "Non-vector insert subvector!"); 1361 1362 SDValue Vec = Op.getOperand(0); 1363 SDValue Part = Op.getOperand(1); 1364 SDValue Idx = Op.getOperand(2); 1365 DebugLoc dl = Op.getDebugLoc(); 1366 1367 // Store the value to a temporary stack slot, then LOAD the returned part. 1368 1369 SDValue StackPtr = DAG.CreateStackTemporary(Vec.getValueType()); 1370 int FI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex(); 1371 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(FI); 1372 1373 // First store the whole vector. 1374 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, PtrInfo, 1375 false, false, 0); 1376 1377 // Then store the inserted part. 1378 1379 // Add the offset to the index. 1380 unsigned EltSize = 1381 Vec.getValueType().getVectorElementType().getSizeInBits()/8; 1382 1383 Idx = DAG.getNode(ISD::MUL, dl, Idx.getValueType(), Idx, 1384 DAG.getConstant(EltSize, Idx.getValueType())); 1385 1386 if (Idx.getValueType().bitsGT(TLI.getPointerTy())) 1387 Idx = DAG.getNode(ISD::TRUNCATE, dl, TLI.getPointerTy(), Idx); 1388 else 1389 Idx = DAG.getNode(ISD::ZERO_EXTEND, dl, TLI.getPointerTy(), Idx); 1390 1391 SDValue SubStackPtr = DAG.getNode(ISD::ADD, dl, Idx.getValueType(), Idx, 1392 StackPtr); 1393 1394 // Store the subvector. 1395 Ch = DAG.getStore(DAG.getEntryNode(), dl, Part, SubStackPtr, 1396 MachinePointerInfo(), false, false, 0); 1397 1398 // Finally, load the updated vector. 1399 return DAG.getLoad(Op.getValueType(), dl, Ch, StackPtr, PtrInfo, 1400 false, false, false, 0); 1401} 1402 1403SDValue SelectionDAGLegalize::ExpandVectorBuildThroughStack(SDNode* Node) { 1404 // We can't handle this case efficiently. Allocate a sufficiently 1405 // aligned object on the stack, store each element into it, then load 1406 // the result as a vector. 1407 // Create the stack frame object. 1408 EVT VT = Node->getValueType(0); 1409 EVT EltVT = VT.getVectorElementType(); 1410 DebugLoc dl = Node->getDebugLoc(); 1411 SDValue FIPtr = DAG.CreateStackTemporary(VT); 1412 int FI = cast<FrameIndexSDNode>(FIPtr.getNode())->getIndex(); 1413 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(FI); 1414 1415 // Emit a store of each element to the stack slot. 1416 SmallVector<SDValue, 8> Stores; 1417 unsigned TypeByteSize = EltVT.getSizeInBits() / 8; 1418 // Store (in the right endianness) the elements to memory. 1419 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) { 1420 // Ignore undef elements. 1421 if (Node->getOperand(i).getOpcode() == ISD::UNDEF) continue; 1422 1423 unsigned Offset = TypeByteSize*i; 1424 1425 SDValue Idx = DAG.getConstant(Offset, FIPtr.getValueType()); 1426 Idx = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr, Idx); 1427 1428 // If the destination vector element type is narrower than the source 1429 // element type, only store the bits necessary. 1430 if (EltVT.bitsLT(Node->getOperand(i).getValueType().getScalarType())) { 1431 Stores.push_back(DAG.getTruncStore(DAG.getEntryNode(), dl, 1432 Node->getOperand(i), Idx, 1433 PtrInfo.getWithOffset(Offset), 1434 EltVT, false, false, 0)); 1435 } else 1436 Stores.push_back(DAG.getStore(DAG.getEntryNode(), dl, 1437 Node->getOperand(i), Idx, 1438 PtrInfo.getWithOffset(Offset), 1439 false, false, 0)); 1440 } 1441 1442 SDValue StoreChain; 1443 if (!Stores.empty()) // Not all undef elements? 1444 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 1445 &Stores[0], Stores.size()); 1446 else 1447 StoreChain = DAG.getEntryNode(); 1448 1449 // Result is a load from the stack slot. 1450 return DAG.getLoad(VT, dl, StoreChain, FIPtr, PtrInfo, 1451 false, false, false, 0); 1452} 1453 1454SDValue SelectionDAGLegalize::ExpandFCOPYSIGN(SDNode* Node) { 1455 DebugLoc dl = Node->getDebugLoc(); 1456 SDValue Tmp1 = Node->getOperand(0); 1457 SDValue Tmp2 = Node->getOperand(1); 1458 1459 // Get the sign bit of the RHS. First obtain a value that has the same 1460 // sign as the sign bit, i.e. negative if and only if the sign bit is 1. 1461 SDValue SignBit; 1462 EVT FloatVT = Tmp2.getValueType(); 1463 EVT IVT = EVT::getIntegerVT(*DAG.getContext(), FloatVT.getSizeInBits()); 1464 if (TLI.isTypeLegal(IVT)) { 1465 // Convert to an integer with the same sign bit. 1466 SignBit = DAG.getNode(ISD::BITCAST, dl, IVT, Tmp2); 1467 } else { 1468 // Store the float to memory, then load the sign part out as an integer. 1469 MVT LoadTy = TLI.getPointerTy(); 1470 // First create a temporary that is aligned for both the load and store. 1471 SDValue StackPtr = DAG.CreateStackTemporary(FloatVT, LoadTy); 1472 // Then store the float to it. 1473 SDValue Ch = 1474 DAG.getStore(DAG.getEntryNode(), dl, Tmp2, StackPtr, MachinePointerInfo(), 1475 false, false, 0); 1476 if (TLI.isBigEndian()) { 1477 assert(FloatVT.isByteSized() && "Unsupported floating point type!"); 1478 // Load out a legal integer with the same sign bit as the float. 1479 SignBit = DAG.getLoad(LoadTy, dl, Ch, StackPtr, MachinePointerInfo(), 1480 false, false, false, 0); 1481 } else { // Little endian 1482 SDValue LoadPtr = StackPtr; 1483 // The float may be wider than the integer we are going to load. Advance 1484 // the pointer so that the loaded integer will contain the sign bit. 1485 unsigned Strides = (FloatVT.getSizeInBits()-1)/LoadTy.getSizeInBits(); 1486 unsigned ByteOffset = (Strides * LoadTy.getSizeInBits()) / 8; 1487 LoadPtr = DAG.getNode(ISD::ADD, dl, LoadPtr.getValueType(), 1488 LoadPtr, DAG.getIntPtrConstant(ByteOffset)); 1489 // Load a legal integer containing the sign bit. 1490 SignBit = DAG.getLoad(LoadTy, dl, Ch, LoadPtr, MachinePointerInfo(), 1491 false, false, false, 0); 1492 // Move the sign bit to the top bit of the loaded integer. 1493 unsigned BitShift = LoadTy.getSizeInBits() - 1494 (FloatVT.getSizeInBits() - 8 * ByteOffset); 1495 assert(BitShift < LoadTy.getSizeInBits() && "Pointer advanced wrong?"); 1496 if (BitShift) 1497 SignBit = DAG.getNode(ISD::SHL, dl, LoadTy, SignBit, 1498 DAG.getConstant(BitShift, 1499 TLI.getShiftAmountTy(SignBit.getValueType()))); 1500 } 1501 } 1502 // Now get the sign bit proper, by seeing whether the value is negative. 1503 SignBit = DAG.getSetCC(dl, TLI.getSetCCResultType(SignBit.getValueType()), 1504 SignBit, DAG.getConstant(0, SignBit.getValueType()), 1505 ISD::SETLT); 1506 // Get the absolute value of the result. 1507 SDValue AbsVal = DAG.getNode(ISD::FABS, dl, Tmp1.getValueType(), Tmp1); 1508 // Select between the nabs and abs value based on the sign bit of 1509 // the input. 1510 return DAG.getNode(ISD::SELECT, dl, AbsVal.getValueType(), SignBit, 1511 DAG.getNode(ISD::FNEG, dl, AbsVal.getValueType(), AbsVal), 1512 AbsVal); 1513} 1514 1515void SelectionDAGLegalize::ExpandDYNAMIC_STACKALLOC(SDNode* Node, 1516 SmallVectorImpl<SDValue> &Results) { 1517 unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore(); 1518 assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and" 1519 " not tell us which reg is the stack pointer!"); 1520 DebugLoc dl = Node->getDebugLoc(); 1521 EVT VT = Node->getValueType(0); 1522 SDValue Tmp1 = SDValue(Node, 0); 1523 SDValue Tmp2 = SDValue(Node, 1); 1524 SDValue Tmp3 = Node->getOperand(2); 1525 SDValue Chain = Tmp1.getOperand(0); 1526 1527 // Chain the dynamic stack allocation so that it doesn't modify the stack 1528 // pointer when other instructions are using the stack. 1529 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(0, true)); 1530 1531 SDValue Size = Tmp2.getOperand(1); 1532 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT); 1533 Chain = SP.getValue(1); 1534 unsigned Align = cast<ConstantSDNode>(Tmp3)->getZExtValue(); 1535 unsigned StackAlign = TM.getFrameLowering()->getStackAlignment(); 1536 if (Align > StackAlign) 1537 SP = DAG.getNode(ISD::AND, dl, VT, SP, 1538 DAG.getConstant(-(uint64_t)Align, VT)); 1539 Tmp1 = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value 1540 Chain = DAG.getCopyToReg(Chain, dl, SPReg, Tmp1); // Output chain 1541 1542 Tmp2 = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, true), 1543 DAG.getIntPtrConstant(0, true), SDValue()); 1544 1545 Results.push_back(Tmp1); 1546 Results.push_back(Tmp2); 1547} 1548 1549/// LegalizeSetCCCondCode - Legalize a SETCC with given LHS and RHS and 1550/// condition code CC on the current target. This routine expands SETCC with 1551/// illegal condition code into AND / OR of multiple SETCC values. 1552void SelectionDAGLegalize::LegalizeSetCCCondCode(EVT VT, 1553 SDValue &LHS, SDValue &RHS, 1554 SDValue &CC, 1555 DebugLoc dl) { 1556 EVT OpVT = LHS.getValueType(); 1557 ISD::CondCode CCCode = cast<CondCodeSDNode>(CC)->get(); 1558 switch (TLI.getCondCodeAction(CCCode, OpVT)) { 1559 default: assert(0 && "Unknown condition code action!"); 1560 case TargetLowering::Legal: 1561 // Nothing to do. 1562 break; 1563 case TargetLowering::Expand: { 1564 ISD::CondCode CC1 = ISD::SETCC_INVALID, CC2 = ISD::SETCC_INVALID; 1565 unsigned Opc = 0; 1566 switch (CCCode) { 1567 default: assert(0 && "Don't know how to expand this condition!"); 1568 case ISD::SETOEQ: CC1 = ISD::SETEQ; CC2 = ISD::SETO; Opc = ISD::AND; break; 1569 case ISD::SETOGT: CC1 = ISD::SETGT; CC2 = ISD::SETO; Opc = ISD::AND; break; 1570 case ISD::SETOGE: CC1 = ISD::SETGE; CC2 = ISD::SETO; Opc = ISD::AND; break; 1571 case ISD::SETOLT: CC1 = ISD::SETLT; CC2 = ISD::SETO; Opc = ISD::AND; break; 1572 case ISD::SETOLE: CC1 = ISD::SETLE; CC2 = ISD::SETO; Opc = ISD::AND; break; 1573 case ISD::SETONE: CC1 = ISD::SETNE; CC2 = ISD::SETO; Opc = ISD::AND; break; 1574 case ISD::SETUEQ: CC1 = ISD::SETEQ; CC2 = ISD::SETUO; Opc = ISD::OR; break; 1575 case ISD::SETUGT: CC1 = ISD::SETGT; CC2 = ISD::SETUO; Opc = ISD::OR; break; 1576 case ISD::SETUGE: CC1 = ISD::SETGE; CC2 = ISD::SETUO; Opc = ISD::OR; break; 1577 case ISD::SETULT: CC1 = ISD::SETLT; CC2 = ISD::SETUO; Opc = ISD::OR; break; 1578 case ISD::SETULE: CC1 = ISD::SETLE; CC2 = ISD::SETUO; Opc = ISD::OR; break; 1579 case ISD::SETUNE: CC1 = ISD::SETNE; CC2 = ISD::SETUO; Opc = ISD::OR; break; 1580 // FIXME: Implement more expansions. 1581 } 1582 1583 SDValue SetCC1 = DAG.getSetCC(dl, VT, LHS, RHS, CC1); 1584 SDValue SetCC2 = DAG.getSetCC(dl, VT, LHS, RHS, CC2); 1585 LHS = DAG.getNode(Opc, dl, VT, SetCC1, SetCC2); 1586 RHS = SDValue(); 1587 CC = SDValue(); 1588 break; 1589 } 1590 } 1591} 1592 1593/// EmitStackConvert - Emit a store/load combination to the stack. This stores 1594/// SrcOp to a stack slot of type SlotVT, truncating it if needed. It then does 1595/// a load from the stack slot to DestVT, extending it if needed. 1596/// The resultant code need not be legal. 1597SDValue SelectionDAGLegalize::EmitStackConvert(SDValue SrcOp, 1598 EVT SlotVT, 1599 EVT DestVT, 1600 DebugLoc dl) { 1601 // Create the stack frame object. 1602 unsigned SrcAlign = 1603 TLI.getTargetData()->getPrefTypeAlignment(SrcOp.getValueType(). 1604 getTypeForEVT(*DAG.getContext())); 1605 SDValue FIPtr = DAG.CreateStackTemporary(SlotVT, SrcAlign); 1606 1607 FrameIndexSDNode *StackPtrFI = cast<FrameIndexSDNode>(FIPtr); 1608 int SPFI = StackPtrFI->getIndex(); 1609 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(SPFI); 1610 1611 unsigned SrcSize = SrcOp.getValueType().getSizeInBits(); 1612 unsigned SlotSize = SlotVT.getSizeInBits(); 1613 unsigned DestSize = DestVT.getSizeInBits(); 1614 Type *DestType = DestVT.getTypeForEVT(*DAG.getContext()); 1615 unsigned DestAlign = TLI.getTargetData()->getPrefTypeAlignment(DestType); 1616 1617 // Emit a store to the stack slot. Use a truncstore if the input value is 1618 // later than DestVT. 1619 SDValue Store; 1620 1621 if (SrcSize > SlotSize) 1622 Store = DAG.getTruncStore(DAG.getEntryNode(), dl, SrcOp, FIPtr, 1623 PtrInfo, SlotVT, false, false, SrcAlign); 1624 else { 1625 assert(SrcSize == SlotSize && "Invalid store"); 1626 Store = DAG.getStore(DAG.getEntryNode(), dl, SrcOp, FIPtr, 1627 PtrInfo, false, false, SrcAlign); 1628 } 1629 1630 // Result is a load from the stack slot. 1631 if (SlotSize == DestSize) 1632 return DAG.getLoad(DestVT, dl, Store, FIPtr, PtrInfo, 1633 false, false, false, DestAlign); 1634 1635 assert(SlotSize < DestSize && "Unknown extension!"); 1636 return DAG.getExtLoad(ISD::EXTLOAD, dl, DestVT, Store, FIPtr, 1637 PtrInfo, SlotVT, false, false, DestAlign); 1638} 1639 1640SDValue SelectionDAGLegalize::ExpandSCALAR_TO_VECTOR(SDNode *Node) { 1641 DebugLoc dl = Node->getDebugLoc(); 1642 // Create a vector sized/aligned stack slot, store the value to element #0, 1643 // then load the whole vector back out. 1644 SDValue StackPtr = DAG.CreateStackTemporary(Node->getValueType(0)); 1645 1646 FrameIndexSDNode *StackPtrFI = cast<FrameIndexSDNode>(StackPtr); 1647 int SPFI = StackPtrFI->getIndex(); 1648 1649 SDValue Ch = DAG.getTruncStore(DAG.getEntryNode(), dl, Node->getOperand(0), 1650 StackPtr, 1651 MachinePointerInfo::getFixedStack(SPFI), 1652 Node->getValueType(0).getVectorElementType(), 1653 false, false, 0); 1654 return DAG.getLoad(Node->getValueType(0), dl, Ch, StackPtr, 1655 MachinePointerInfo::getFixedStack(SPFI), 1656 false, false, false, 0); 1657} 1658 1659 1660/// ExpandBUILD_VECTOR - Expand a BUILD_VECTOR node on targets that don't 1661/// support the operation, but do support the resultant vector type. 1662SDValue SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) { 1663 unsigned NumElems = Node->getNumOperands(); 1664 SDValue Value1, Value2; 1665 DebugLoc dl = Node->getDebugLoc(); 1666 EVT VT = Node->getValueType(0); 1667 EVT OpVT = Node->getOperand(0).getValueType(); 1668 EVT EltVT = VT.getVectorElementType(); 1669 1670 // If the only non-undef value is the low element, turn this into a 1671 // SCALAR_TO_VECTOR node. If this is { X, X, X, X }, determine X. 1672 bool isOnlyLowElement = true; 1673 bool MoreThanTwoValues = false; 1674 bool isConstant = true; 1675 for (unsigned i = 0; i < NumElems; ++i) { 1676 SDValue V = Node->getOperand(i); 1677 if (V.getOpcode() == ISD::UNDEF) 1678 continue; 1679 if (i > 0) 1680 isOnlyLowElement = false; 1681 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V)) 1682 isConstant = false; 1683 1684 if (!Value1.getNode()) { 1685 Value1 = V; 1686 } else if (!Value2.getNode()) { 1687 if (V != Value1) 1688 Value2 = V; 1689 } else if (V != Value1 && V != Value2) { 1690 MoreThanTwoValues = true; 1691 } 1692 } 1693 1694 if (!Value1.getNode()) 1695 return DAG.getUNDEF(VT); 1696 1697 if (isOnlyLowElement) 1698 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Node->getOperand(0)); 1699 1700 // If all elements are constants, create a load from the constant pool. 1701 if (isConstant) { 1702 std::vector<Constant*> CV; 1703 for (unsigned i = 0, e = NumElems; i != e; ++i) { 1704 if (ConstantFPSDNode *V = 1705 dyn_cast<ConstantFPSDNode>(Node->getOperand(i))) { 1706 CV.push_back(const_cast<ConstantFP *>(V->getConstantFPValue())); 1707 } else if (ConstantSDNode *V = 1708 dyn_cast<ConstantSDNode>(Node->getOperand(i))) { 1709 if (OpVT==EltVT) 1710 CV.push_back(const_cast<ConstantInt *>(V->getConstantIntValue())); 1711 else { 1712 // If OpVT and EltVT don't match, EltVT is not legal and the 1713 // element values have been promoted/truncated earlier. Undo this; 1714 // we don't want a v16i8 to become a v16i32 for example. 1715 const ConstantInt *CI = V->getConstantIntValue(); 1716 CV.push_back(ConstantInt::get(EltVT.getTypeForEVT(*DAG.getContext()), 1717 CI->getZExtValue())); 1718 } 1719 } else { 1720 assert(Node->getOperand(i).getOpcode() == ISD::UNDEF); 1721 Type *OpNTy = EltVT.getTypeForEVT(*DAG.getContext()); 1722 CV.push_back(UndefValue::get(OpNTy)); 1723 } 1724 } 1725 Constant *CP = ConstantVector::get(CV); 1726 SDValue CPIdx = DAG.getConstantPool(CP, TLI.getPointerTy()); 1727 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment(); 1728 return DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 1729 MachinePointerInfo::getConstantPool(), 1730 false, false, false, Alignment); 1731 } 1732 1733 if (!MoreThanTwoValues) { 1734 SmallVector<int, 8> ShuffleVec(NumElems, -1); 1735 for (unsigned i = 0; i < NumElems; ++i) { 1736 SDValue V = Node->getOperand(i); 1737 if (V.getOpcode() == ISD::UNDEF) 1738 continue; 1739 ShuffleVec[i] = V == Value1 ? 0 : NumElems; 1740 } 1741 if (TLI.isShuffleMaskLegal(ShuffleVec, Node->getValueType(0))) { 1742 // Get the splatted value into the low element of a vector register. 1743 SDValue Vec1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value1); 1744 SDValue Vec2; 1745 if (Value2.getNode()) 1746 Vec2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value2); 1747 else 1748 Vec2 = DAG.getUNDEF(VT); 1749 1750 // Return shuffle(LowValVec, undef, <0,0,0,0>) 1751 return DAG.getVectorShuffle(VT, dl, Vec1, Vec2, ShuffleVec.data()); 1752 } 1753 } 1754 1755 // Otherwise, we can't handle this case efficiently. 1756 return ExpandVectorBuildThroughStack(Node); 1757} 1758 1759// ExpandLibCall - Expand a node into a call to a libcall. If the result value 1760// does not fit into a register, return the lo part and set the hi part to the 1761// by-reg argument. If it does fit into a single register, return the result 1762// and leave the Hi part unset. 1763SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, SDNode *Node, 1764 bool isSigned) { 1765 // The input chain to this libcall is the entry node of the function. 1766 // Legalizing the call will automatically add the previous call to the 1767 // dependence. 1768 SDValue InChain = DAG.getEntryNode(); 1769 1770 TargetLowering::ArgListTy Args; 1771 TargetLowering::ArgListEntry Entry; 1772 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) { 1773 EVT ArgVT = Node->getOperand(i).getValueType(); 1774 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); 1775 Entry.Node = Node->getOperand(i); Entry.Ty = ArgTy; 1776 Entry.isSExt = isSigned; 1777 Entry.isZExt = !isSigned; 1778 Args.push_back(Entry); 1779 } 1780 SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC), 1781 TLI.getPointerTy()); 1782 1783 Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext()); 1784 1785 // isTailCall may be true since the callee does not reference caller stack 1786 // frame. Check if it's in the right position. 1787 bool isTailCall = isInTailCallPosition(DAG, Node, TLI); 1788 std::pair<SDValue, SDValue> CallInfo = 1789 TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false, 1790 0, TLI.getLibcallCallingConv(LC), isTailCall, 1791 /*isReturnValueUsed=*/true, 1792 Callee, Args, DAG, Node->getDebugLoc()); 1793 1794 if (!CallInfo.second.getNode()) 1795 // It's a tailcall, return the chain (which is the DAG root). 1796 return DAG.getRoot(); 1797 1798 return CallInfo.first; 1799} 1800 1801/// ExpandLibCall - Generate a libcall taking the given operands as arguments 1802/// and returning a result of type RetVT. 1803SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, EVT RetVT, 1804 const SDValue *Ops, unsigned NumOps, 1805 bool isSigned, DebugLoc dl) { 1806 TargetLowering::ArgListTy Args; 1807 Args.reserve(NumOps); 1808 1809 TargetLowering::ArgListEntry Entry; 1810 for (unsigned i = 0; i != NumOps; ++i) { 1811 Entry.Node = Ops[i]; 1812 Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.getContext()); 1813 Entry.isSExt = isSigned; 1814 Entry.isZExt = !isSigned; 1815 Args.push_back(Entry); 1816 } 1817 SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC), 1818 TLI.getPointerTy()); 1819 1820 Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext()); 1821 std::pair<SDValue,SDValue> CallInfo = 1822 TLI.LowerCallTo(DAG.getEntryNode(), RetTy, isSigned, !isSigned, false, 1823 false, 0, TLI.getLibcallCallingConv(LC), false, 1824 /*isReturnValueUsed=*/true, 1825 Callee, Args, DAG, dl); 1826 1827 return CallInfo.first; 1828} 1829 1830// ExpandChainLibCall - Expand a node into a call to a libcall. Similar to 1831// ExpandLibCall except that the first operand is the in-chain. 1832std::pair<SDValue, SDValue> 1833SelectionDAGLegalize::ExpandChainLibCall(RTLIB::Libcall LC, 1834 SDNode *Node, 1835 bool isSigned) { 1836 SDValue InChain = Node->getOperand(0); 1837 1838 TargetLowering::ArgListTy Args; 1839 TargetLowering::ArgListEntry Entry; 1840 for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i) { 1841 EVT ArgVT = Node->getOperand(i).getValueType(); 1842 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); 1843 Entry.Node = Node->getOperand(i); 1844 Entry.Ty = ArgTy; 1845 Entry.isSExt = isSigned; 1846 Entry.isZExt = !isSigned; 1847 Args.push_back(Entry); 1848 } 1849 SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC), 1850 TLI.getPointerTy()); 1851 1852 Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext()); 1853 std::pair<SDValue, SDValue> CallInfo = 1854 TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false, 1855 0, TLI.getLibcallCallingConv(LC), /*isTailCall=*/false, 1856 /*isReturnValueUsed=*/true, 1857 Callee, Args, DAG, Node->getDebugLoc()); 1858 1859 return CallInfo; 1860} 1861 1862SDValue SelectionDAGLegalize::ExpandFPLibCall(SDNode* Node, 1863 RTLIB::Libcall Call_F32, 1864 RTLIB::Libcall Call_F64, 1865 RTLIB::Libcall Call_F80, 1866 RTLIB::Libcall Call_PPCF128) { 1867 RTLIB::Libcall LC; 1868 switch (Node->getValueType(0).getSimpleVT().SimpleTy) { 1869 default: assert(0 && "Unexpected request for libcall!"); 1870 case MVT::f32: LC = Call_F32; break; 1871 case MVT::f64: LC = Call_F64; break; 1872 case MVT::f80: LC = Call_F80; break; 1873 case MVT::ppcf128: LC = Call_PPCF128; break; 1874 } 1875 return ExpandLibCall(LC, Node, false); 1876} 1877 1878SDValue SelectionDAGLegalize::ExpandIntLibCall(SDNode* Node, bool isSigned, 1879 RTLIB::Libcall Call_I8, 1880 RTLIB::Libcall Call_I16, 1881 RTLIB::Libcall Call_I32, 1882 RTLIB::Libcall Call_I64, 1883 RTLIB::Libcall Call_I128) { 1884 RTLIB::Libcall LC; 1885 switch (Node->getValueType(0).getSimpleVT().SimpleTy) { 1886 default: assert(0 && "Unexpected request for libcall!"); 1887 case MVT::i8: LC = Call_I8; break; 1888 case MVT::i16: LC = Call_I16; break; 1889 case MVT::i32: LC = Call_I32; break; 1890 case MVT::i64: LC = Call_I64; break; 1891 case MVT::i128: LC = Call_I128; break; 1892 } 1893 return ExpandLibCall(LC, Node, isSigned); 1894} 1895 1896/// isDivRemLibcallAvailable - Return true if divmod libcall is available. 1897static bool isDivRemLibcallAvailable(SDNode *Node, bool isSigned, 1898 const TargetLowering &TLI) { 1899 RTLIB::Libcall LC; 1900 switch (Node->getValueType(0).getSimpleVT().SimpleTy) { 1901 default: assert(0 && "Unexpected request for libcall!"); 1902 case MVT::i8: LC= isSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8; break; 1903 case MVT::i16: LC= isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break; 1904 case MVT::i32: LC= isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break; 1905 case MVT::i64: LC= isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break; 1906 case MVT::i128: LC= isSigned ? RTLIB::SDIVREM_I128:RTLIB::UDIVREM_I128; break; 1907 } 1908 1909 return TLI.getLibcallName(LC) != 0; 1910} 1911 1912/// UseDivRem - Only issue divrem libcall if both quotient and remainder are 1913/// needed. 1914static bool UseDivRem(SDNode *Node, bool isSigned, bool isDIV) { 1915 unsigned OtherOpcode = 0; 1916 if (isSigned) 1917 OtherOpcode = isDIV ? ISD::SREM : ISD::SDIV; 1918 else 1919 OtherOpcode = isDIV ? ISD::UREM : ISD::UDIV; 1920 1921 SDValue Op0 = Node->getOperand(0); 1922 SDValue Op1 = Node->getOperand(1); 1923 for (SDNode::use_iterator UI = Op0.getNode()->use_begin(), 1924 UE = Op0.getNode()->use_end(); UI != UE; ++UI) { 1925 SDNode *User = *UI; 1926 if (User == Node) 1927 continue; 1928 if (User->getOpcode() == OtherOpcode && 1929 User->getOperand(0) == Op0 && 1930 User->getOperand(1) == Op1) 1931 return true; 1932 } 1933 return false; 1934} 1935 1936/// ExpandDivRemLibCall - Issue libcalls to __{u}divmod to compute div / rem 1937/// pairs. 1938void 1939SelectionDAGLegalize::ExpandDivRemLibCall(SDNode *Node, 1940 SmallVectorImpl<SDValue> &Results) { 1941 unsigned Opcode = Node->getOpcode(); 1942 bool isSigned = Opcode == ISD::SDIVREM; 1943 1944 RTLIB::Libcall LC; 1945 switch (Node->getValueType(0).getSimpleVT().SimpleTy) { 1946 default: assert(0 && "Unexpected request for libcall!"); 1947 case MVT::i8: LC= isSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8; break; 1948 case MVT::i16: LC= isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break; 1949 case MVT::i32: LC= isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break; 1950 case MVT::i64: LC= isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break; 1951 case MVT::i128: LC= isSigned ? RTLIB::SDIVREM_I128:RTLIB::UDIVREM_I128; break; 1952 } 1953 1954 // The input chain to this libcall is the entry node of the function. 1955 // Legalizing the call will automatically add the previous call to the 1956 // dependence. 1957 SDValue InChain = DAG.getEntryNode(); 1958 1959 EVT RetVT = Node->getValueType(0); 1960 Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext()); 1961 1962 TargetLowering::ArgListTy Args; 1963 TargetLowering::ArgListEntry Entry; 1964 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) { 1965 EVT ArgVT = Node->getOperand(i).getValueType(); 1966 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); 1967 Entry.Node = Node->getOperand(i); Entry.Ty = ArgTy; 1968 Entry.isSExt = isSigned; 1969 Entry.isZExt = !isSigned; 1970 Args.push_back(Entry); 1971 } 1972 1973 // Also pass the return address of the remainder. 1974 SDValue FIPtr = DAG.CreateStackTemporary(RetVT); 1975 Entry.Node = FIPtr; 1976 Entry.Ty = RetTy->getPointerTo(); 1977 Entry.isSExt = isSigned; 1978 Entry.isZExt = !isSigned; 1979 Args.push_back(Entry); 1980 1981 SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC), 1982 TLI.getPointerTy()); 1983 1984 DebugLoc dl = Node->getDebugLoc(); 1985 std::pair<SDValue, SDValue> CallInfo = 1986 TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false, 1987 0, TLI.getLibcallCallingConv(LC), /*isTailCall=*/false, 1988 /*isReturnValueUsed=*/true, Callee, Args, DAG, dl); 1989 1990 // Remainder is loaded back from the stack frame. 1991 SDValue Rem = DAG.getLoad(RetVT, dl, CallInfo.second, FIPtr, 1992 MachinePointerInfo(), false, false, false, 0); 1993 Results.push_back(CallInfo.first); 1994 Results.push_back(Rem); 1995} 1996 1997/// ExpandLegalINT_TO_FP - This function is responsible for legalizing a 1998/// INT_TO_FP operation of the specified operand when the target requests that 1999/// we expand it. At this point, we know that the result and operand types are 2000/// legal for the target. 2001SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned, 2002 SDValue Op0, 2003 EVT DestVT, 2004 DebugLoc dl) { 2005 if (Op0.getValueType() == MVT::i32) { 2006 // simple 32-bit [signed|unsigned] integer to float/double expansion 2007 2008 // Get the stack frame index of a 8 byte buffer. 2009 SDValue StackSlot = DAG.CreateStackTemporary(MVT::f64); 2010 2011 // word offset constant for Hi/Lo address computation 2012 SDValue WordOff = DAG.getConstant(sizeof(int), TLI.getPointerTy()); 2013 // set up Hi and Lo (into buffer) address based on endian 2014 SDValue Hi = StackSlot; 2015 SDValue Lo = DAG.getNode(ISD::ADD, dl, 2016 TLI.getPointerTy(), StackSlot, WordOff); 2017 if (TLI.isLittleEndian()) 2018 std::swap(Hi, Lo); 2019 2020 // if signed map to unsigned space 2021 SDValue Op0Mapped; 2022 if (isSigned) { 2023 // constant used to invert sign bit (signed to unsigned mapping) 2024 SDValue SignBit = DAG.getConstant(0x80000000u, MVT::i32); 2025 Op0Mapped = DAG.getNode(ISD::XOR, dl, MVT::i32, Op0, SignBit); 2026 } else { 2027 Op0Mapped = Op0; 2028 } 2029 // store the lo of the constructed double - based on integer input 2030 SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, 2031 Op0Mapped, Lo, MachinePointerInfo(), 2032 false, false, 0); 2033 // initial hi portion of constructed double 2034 SDValue InitialHi = DAG.getConstant(0x43300000u, MVT::i32); 2035 // store the hi of the constructed double - biased exponent 2036 SDValue Store2 = DAG.getStore(Store1, dl, InitialHi, Hi, 2037 MachinePointerInfo(), 2038 false, false, 0); 2039 // load the constructed double 2040 SDValue Load = DAG.getLoad(MVT::f64, dl, Store2, StackSlot, 2041 MachinePointerInfo(), false, false, false, 0); 2042 // FP constant to bias correct the final result 2043 SDValue Bias = DAG.getConstantFP(isSigned ? 2044 BitsToDouble(0x4330000080000000ULL) : 2045 BitsToDouble(0x4330000000000000ULL), 2046 MVT::f64); 2047 // subtract the bias 2048 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Load, Bias); 2049 // final result 2050 SDValue Result; 2051 // handle final rounding 2052 if (DestVT == MVT::f64) { 2053 // do nothing 2054 Result = Sub; 2055 } else if (DestVT.bitsLT(MVT::f64)) { 2056 Result = DAG.getNode(ISD::FP_ROUND, dl, DestVT, Sub, 2057 DAG.getIntPtrConstant(0)); 2058 } else if (DestVT.bitsGT(MVT::f64)) { 2059 Result = DAG.getNode(ISD::FP_EXTEND, dl, DestVT, Sub); 2060 } 2061 return Result; 2062 } 2063 assert(!isSigned && "Legalize cannot Expand SINT_TO_FP for i64 yet"); 2064 // Code below here assumes !isSigned without checking again. 2065 2066 // Implementation of unsigned i64 to f64 following the algorithm in 2067 // __floatundidf in compiler_rt. This implementation has the advantage 2068 // of performing rounding correctly, both in the default rounding mode 2069 // and in all alternate rounding modes. 2070 // TODO: Generalize this for use with other types. 2071 if (Op0.getValueType() == MVT::i64 && DestVT == MVT::f64) { 2072 SDValue TwoP52 = 2073 DAG.getConstant(UINT64_C(0x4330000000000000), MVT::i64); 2074 SDValue TwoP84PlusTwoP52 = 2075 DAG.getConstantFP(BitsToDouble(UINT64_C(0x4530000000100000)), MVT::f64); 2076 SDValue TwoP84 = 2077 DAG.getConstant(UINT64_C(0x4530000000000000), MVT::i64); 2078 2079 SDValue Lo = DAG.getZeroExtendInReg(Op0, dl, MVT::i32); 2080 SDValue Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Op0, 2081 DAG.getConstant(32, MVT::i64)); 2082 SDValue LoOr = DAG.getNode(ISD::OR, dl, MVT::i64, Lo, TwoP52); 2083 SDValue HiOr = DAG.getNode(ISD::OR, dl, MVT::i64, Hi, TwoP84); 2084 SDValue LoFlt = DAG.getNode(ISD::BITCAST, dl, MVT::f64, LoOr); 2085 SDValue HiFlt = DAG.getNode(ISD::BITCAST, dl, MVT::f64, HiOr); 2086 SDValue HiSub = DAG.getNode(ISD::FSUB, dl, MVT::f64, HiFlt, 2087 TwoP84PlusTwoP52); 2088 return DAG.getNode(ISD::FADD, dl, MVT::f64, LoFlt, HiSub); 2089 } 2090 2091 // Implementation of unsigned i64 to f32. 2092 // TODO: Generalize this for use with other types. 2093 if (Op0.getValueType() == MVT::i64 && DestVT == MVT::f32) { 2094 // For unsigned conversions, convert them to signed conversions using the 2095 // algorithm from the x86_64 __floatundidf in compiler_rt. 2096 if (!isSigned) { 2097 SDValue Fast = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, Op0); 2098 2099 SDValue ShiftConst = 2100 DAG.getConstant(1, TLI.getShiftAmountTy(Op0.getValueType())); 2101 SDValue Shr = DAG.getNode(ISD::SRL, dl, MVT::i64, Op0, ShiftConst); 2102 SDValue AndConst = DAG.getConstant(1, MVT::i64); 2103 SDValue And = DAG.getNode(ISD::AND, dl, MVT::i64, Op0, AndConst); 2104 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::i64, And, Shr); 2105 2106 SDValue SignCvt = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, Or); 2107 SDValue Slow = DAG.getNode(ISD::FADD, dl, MVT::f32, SignCvt, SignCvt); 2108 2109 // TODO: This really should be implemented using a branch rather than a 2110 // select. We happen to get lucky and machinesink does the right 2111 // thing most of the time. This would be a good candidate for a 2112 //pseudo-op, or, even better, for whole-function isel. 2113 SDValue SignBitTest = DAG.getSetCC(dl, TLI.getSetCCResultType(MVT::i64), 2114 Op0, DAG.getConstant(0, MVT::i64), ISD::SETLT); 2115 return DAG.getNode(ISD::SELECT, dl, MVT::f32, SignBitTest, Slow, Fast); 2116 } 2117 2118 // Otherwise, implement the fully general conversion. 2119 2120 SDValue And = DAG.getNode(ISD::AND, dl, MVT::i64, Op0, 2121 DAG.getConstant(UINT64_C(0xfffffffffffff800), MVT::i64)); 2122 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::i64, And, 2123 DAG.getConstant(UINT64_C(0x800), MVT::i64)); 2124 SDValue And2 = DAG.getNode(ISD::AND, dl, MVT::i64, Op0, 2125 DAG.getConstant(UINT64_C(0x7ff), MVT::i64)); 2126 SDValue Ne = DAG.getSetCC(dl, TLI.getSetCCResultType(MVT::i64), 2127 And2, DAG.getConstant(UINT64_C(0), MVT::i64), ISD::SETNE); 2128 SDValue Sel = DAG.getNode(ISD::SELECT, dl, MVT::i64, Ne, Or, Op0); 2129 SDValue Ge = DAG.getSetCC(dl, TLI.getSetCCResultType(MVT::i64), 2130 Op0, DAG.getConstant(UINT64_C(0x0020000000000000), MVT::i64), 2131 ISD::SETUGE); 2132 SDValue Sel2 = DAG.getNode(ISD::SELECT, dl, MVT::i64, Ge, Sel, Op0); 2133 EVT SHVT = TLI.getShiftAmountTy(Sel2.getValueType()); 2134 2135 SDValue Sh = DAG.getNode(ISD::SRL, dl, MVT::i64, Sel2, 2136 DAG.getConstant(32, SHVT)); 2137 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Sh); 2138 SDValue Fcvt = DAG.getNode(ISD::UINT_TO_FP, dl, MVT::f64, Trunc); 2139 SDValue TwoP32 = 2140 DAG.getConstantFP(BitsToDouble(UINT64_C(0x41f0000000000000)), MVT::f64); 2141 SDValue Fmul = DAG.getNode(ISD::FMUL, dl, MVT::f64, TwoP32, Fcvt); 2142 SDValue Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Sel2); 2143 SDValue Fcvt2 = DAG.getNode(ISD::UINT_TO_FP, dl, MVT::f64, Lo); 2144 SDValue Fadd = DAG.getNode(ISD::FADD, dl, MVT::f64, Fmul, Fcvt2); 2145 return DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, Fadd, 2146 DAG.getIntPtrConstant(0)); 2147 } 2148 2149 SDValue Tmp1 = DAG.getNode(ISD::SINT_TO_FP, dl, DestVT, Op0); 2150 2151 SDValue SignSet = DAG.getSetCC(dl, TLI.getSetCCResultType(Op0.getValueType()), 2152 Op0, DAG.getConstant(0, Op0.getValueType()), 2153 ISD::SETLT); 2154 SDValue Zero = DAG.getIntPtrConstant(0), Four = DAG.getIntPtrConstant(4); 2155 SDValue CstOffset = DAG.getNode(ISD::SELECT, dl, Zero.getValueType(), 2156 SignSet, Four, Zero); 2157 2158 // If the sign bit of the integer is set, the large number will be treated 2159 // as a negative number. To counteract this, the dynamic code adds an 2160 // offset depending on the data type. 2161 uint64_t FF; 2162 switch (Op0.getValueType().getSimpleVT().SimpleTy) { 2163 default: assert(0 && "Unsupported integer type!"); 2164 case MVT::i8 : FF = 0x43800000ULL; break; // 2^8 (as a float) 2165 case MVT::i16: FF = 0x47800000ULL; break; // 2^16 (as a float) 2166 case MVT::i32: FF = 0x4F800000ULL; break; // 2^32 (as a float) 2167 case MVT::i64: FF = 0x5F800000ULL; break; // 2^64 (as a float) 2168 } 2169 if (TLI.isLittleEndian()) FF <<= 32; 2170 Constant *FudgeFactor = ConstantInt::get( 2171 Type::getInt64Ty(*DAG.getContext()), FF); 2172 2173 SDValue CPIdx = DAG.getConstantPool(FudgeFactor, TLI.getPointerTy()); 2174 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment(); 2175 CPIdx = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), CPIdx, CstOffset); 2176 Alignment = std::min(Alignment, 4u); 2177 SDValue FudgeInReg; 2178 if (DestVT == MVT::f32) 2179 FudgeInReg = DAG.getLoad(MVT::f32, dl, DAG.getEntryNode(), CPIdx, 2180 MachinePointerInfo::getConstantPool(), 2181 false, false, false, Alignment); 2182 else { 2183 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, DestVT, 2184 DAG.getEntryNode(), CPIdx, 2185 MachinePointerInfo::getConstantPool(), 2186 MVT::f32, false, false, Alignment); 2187 HandleSDNode Handle(Load); 2188 LegalizeOp(Load.getNode()); 2189 FudgeInReg = Handle.getValue(); 2190 } 2191 2192 return DAG.getNode(ISD::FADD, dl, DestVT, Tmp1, FudgeInReg); 2193} 2194 2195/// PromoteLegalINT_TO_FP - This function is responsible for legalizing a 2196/// *INT_TO_FP operation of the specified operand when the target requests that 2197/// we promote it. At this point, we know that the result and operand types are 2198/// legal for the target, and that there is a legal UINT_TO_FP or SINT_TO_FP 2199/// operation that takes a larger input. 2200SDValue SelectionDAGLegalize::PromoteLegalINT_TO_FP(SDValue LegalOp, 2201 EVT DestVT, 2202 bool isSigned, 2203 DebugLoc dl) { 2204 // First step, figure out the appropriate *INT_TO_FP operation to use. 2205 EVT NewInTy = LegalOp.getValueType(); 2206 2207 unsigned OpToUse = 0; 2208 2209 // Scan for the appropriate larger type to use. 2210 while (1) { 2211 NewInTy = (MVT::SimpleValueType)(NewInTy.getSimpleVT().SimpleTy+1); 2212 assert(NewInTy.isInteger() && "Ran out of possibilities!"); 2213 2214 // If the target supports SINT_TO_FP of this type, use it. 2215 if (TLI.isOperationLegalOrCustom(ISD::SINT_TO_FP, NewInTy)) { 2216 OpToUse = ISD::SINT_TO_FP; 2217 break; 2218 } 2219 if (isSigned) continue; 2220 2221 // If the target supports UINT_TO_FP of this type, use it. 2222 if (TLI.isOperationLegalOrCustom(ISD::UINT_TO_FP, NewInTy)) { 2223 OpToUse = ISD::UINT_TO_FP; 2224 break; 2225 } 2226 2227 // Otherwise, try a larger type. 2228 } 2229 2230 // Okay, we found the operation and type to use. Zero extend our input to the 2231 // desired type then run the operation on it. 2232 return DAG.getNode(OpToUse, dl, DestVT, 2233 DAG.getNode(isSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, 2234 dl, NewInTy, LegalOp)); 2235} 2236 2237/// PromoteLegalFP_TO_INT - This function is responsible for legalizing a 2238/// FP_TO_*INT operation of the specified operand when the target requests that 2239/// we promote it. At this point, we know that the result and operand types are 2240/// legal for the target, and that there is a legal FP_TO_UINT or FP_TO_SINT 2241/// operation that returns a larger result. 2242SDValue SelectionDAGLegalize::PromoteLegalFP_TO_INT(SDValue LegalOp, 2243 EVT DestVT, 2244 bool isSigned, 2245 DebugLoc dl) { 2246 // First step, figure out the appropriate FP_TO*INT operation to use. 2247 EVT NewOutTy = DestVT; 2248 2249 unsigned OpToUse = 0; 2250 2251 // Scan for the appropriate larger type to use. 2252 while (1) { 2253 NewOutTy = (MVT::SimpleValueType)(NewOutTy.getSimpleVT().SimpleTy+1); 2254 assert(NewOutTy.isInteger() && "Ran out of possibilities!"); 2255 2256 if (TLI.isOperationLegalOrCustom(ISD::FP_TO_SINT, NewOutTy)) { 2257 OpToUse = ISD::FP_TO_SINT; 2258 break; 2259 } 2260 2261 if (TLI.isOperationLegalOrCustom(ISD::FP_TO_UINT, NewOutTy)) { 2262 OpToUse = ISD::FP_TO_UINT; 2263 break; 2264 } 2265 2266 // Otherwise, try a larger type. 2267 } 2268 2269 2270 // Okay, we found the operation and type to use. 2271 SDValue Operation = DAG.getNode(OpToUse, dl, NewOutTy, LegalOp); 2272 2273 // Truncate the result of the extended FP_TO_*INT operation to the desired 2274 // size. 2275 return DAG.getNode(ISD::TRUNCATE, dl, DestVT, Operation); 2276} 2277 2278/// ExpandBSWAP - Open code the operations for BSWAP of the specified operation. 2279/// 2280SDValue SelectionDAGLegalize::ExpandBSWAP(SDValue Op, DebugLoc dl) { 2281 EVT VT = Op.getValueType(); 2282 EVT SHVT = TLI.getShiftAmountTy(VT); 2283 SDValue Tmp1, Tmp2, Tmp3, Tmp4, Tmp5, Tmp6, Tmp7, Tmp8; 2284 switch (VT.getSimpleVT().SimpleTy) { 2285 default: assert(0 && "Unhandled Expand type in BSWAP!"); 2286 case MVT::i16: 2287 Tmp2 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(8, SHVT)); 2288 Tmp1 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(8, SHVT)); 2289 return DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 2290 case MVT::i32: 2291 Tmp4 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(24, SHVT)); 2292 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(8, SHVT)); 2293 Tmp2 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(8, SHVT)); 2294 Tmp1 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(24, SHVT)); 2295 Tmp3 = DAG.getNode(ISD::AND, dl, VT, Tmp3, DAG.getConstant(0xFF0000, VT)); 2296 Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, DAG.getConstant(0xFF00, VT)); 2297 Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp3); 2298 Tmp2 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp1); 2299 return DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp2); 2300 case MVT::i64: 2301 Tmp8 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(56, SHVT)); 2302 Tmp7 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(40, SHVT)); 2303 Tmp6 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(24, SHVT)); 2304 Tmp5 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(8, SHVT)); 2305 Tmp4 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(8, SHVT)); 2306 Tmp3 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(24, SHVT)); 2307 Tmp2 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(40, SHVT)); 2308 Tmp1 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(56, SHVT)); 2309 Tmp7 = DAG.getNode(ISD::AND, dl, VT, Tmp7, DAG.getConstant(255ULL<<48, VT)); 2310 Tmp6 = DAG.getNode(ISD::AND, dl, VT, Tmp6, DAG.getConstant(255ULL<<40, VT)); 2311 Tmp5 = DAG.getNode(ISD::AND, dl, VT, Tmp5, DAG.getConstant(255ULL<<32, VT)); 2312 Tmp4 = DAG.getNode(ISD::AND, dl, VT, Tmp4, DAG.getConstant(255ULL<<24, VT)); 2313 Tmp3 = DAG.getNode(ISD::AND, dl, VT, Tmp3, DAG.getConstant(255ULL<<16, VT)); 2314 Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, DAG.getConstant(255ULL<<8 , VT)); 2315 Tmp8 = DAG.getNode(ISD::OR, dl, VT, Tmp8, Tmp7); 2316 Tmp6 = DAG.getNode(ISD::OR, dl, VT, Tmp6, Tmp5); 2317 Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp3); 2318 Tmp2 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp1); 2319 Tmp8 = DAG.getNode(ISD::OR, dl, VT, Tmp8, Tmp6); 2320 Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp2); 2321 return DAG.getNode(ISD::OR, dl, VT, Tmp8, Tmp4); 2322 } 2323} 2324 2325/// SplatByte - Distribute ByteVal over NumBits bits. 2326// FIXME: Move this helper to a common place. 2327static APInt SplatByte(unsigned NumBits, uint8_t ByteVal) { 2328 APInt Val = APInt(NumBits, ByteVal); 2329 unsigned Shift = 8; 2330 for (unsigned i = NumBits; i > 8; i >>= 1) { 2331 Val = (Val << Shift) | Val; 2332 Shift <<= 1; 2333 } 2334 return Val; 2335} 2336 2337/// ExpandBitCount - Expand the specified bitcount instruction into operations. 2338/// 2339SDValue SelectionDAGLegalize::ExpandBitCount(unsigned Opc, SDValue Op, 2340 DebugLoc dl) { 2341 switch (Opc) { 2342 default: assert(0 && "Cannot expand this yet!"); 2343 case ISD::CTPOP: { 2344 EVT VT = Op.getValueType(); 2345 EVT ShVT = TLI.getShiftAmountTy(VT); 2346 unsigned Len = VT.getSizeInBits(); 2347 2348 assert(VT.isInteger() && Len <= 128 && Len % 8 == 0 && 2349 "CTPOP not implemented for this type."); 2350 2351 // This is the "best" algorithm from 2352 // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel 2353 2354 SDValue Mask55 = DAG.getConstant(SplatByte(Len, 0x55), VT); 2355 SDValue Mask33 = DAG.getConstant(SplatByte(Len, 0x33), VT); 2356 SDValue Mask0F = DAG.getConstant(SplatByte(Len, 0x0F), VT); 2357 SDValue Mask01 = DAG.getConstant(SplatByte(Len, 0x01), VT); 2358 2359 // v = v - ((v >> 1) & 0x55555555...) 2360 Op = DAG.getNode(ISD::SUB, dl, VT, Op, 2361 DAG.getNode(ISD::AND, dl, VT, 2362 DAG.getNode(ISD::SRL, dl, VT, Op, 2363 DAG.getConstant(1, ShVT)), 2364 Mask55)); 2365 // v = (v & 0x33333333...) + ((v >> 2) & 0x33333333...) 2366 Op = DAG.getNode(ISD::ADD, dl, VT, 2367 DAG.getNode(ISD::AND, dl, VT, Op, Mask33), 2368 DAG.getNode(ISD::AND, dl, VT, 2369 DAG.getNode(ISD::SRL, dl, VT, Op, 2370 DAG.getConstant(2, ShVT)), 2371 Mask33)); 2372 // v = (v + (v >> 4)) & 0x0F0F0F0F... 2373 Op = DAG.getNode(ISD::AND, dl, VT, 2374 DAG.getNode(ISD::ADD, dl, VT, Op, 2375 DAG.getNode(ISD::SRL, dl, VT, Op, 2376 DAG.getConstant(4, ShVT))), 2377 Mask0F); 2378 // v = (v * 0x01010101...) >> (Len - 8) 2379 Op = DAG.getNode(ISD::SRL, dl, VT, 2380 DAG.getNode(ISD::MUL, dl, VT, Op, Mask01), 2381 DAG.getConstant(Len - 8, ShVT)); 2382 2383 return Op; 2384 } 2385 case ISD::CTLZ_ZERO_UNDEF: 2386 // This trivially expands to CTLZ. 2387 return DAG.getNode(ISD::CTLZ, dl, Op.getValueType(), Op); 2388 case ISD::CTLZ: { 2389 // for now, we do this: 2390 // x = x | (x >> 1); 2391 // x = x | (x >> 2); 2392 // ... 2393 // x = x | (x >>16); 2394 // x = x | (x >>32); // for 64-bit input 2395 // return popcount(~x); 2396 // 2397 // but see also: http://www.hackersdelight.org/HDcode/nlz.cc 2398 EVT VT = Op.getValueType(); 2399 EVT ShVT = TLI.getShiftAmountTy(VT); 2400 unsigned len = VT.getSizeInBits(); 2401 for (unsigned i = 0; (1U << i) <= (len / 2); ++i) { 2402 SDValue Tmp3 = DAG.getConstant(1ULL << i, ShVT); 2403 Op = DAG.getNode(ISD::OR, dl, VT, Op, 2404 DAG.getNode(ISD::SRL, dl, VT, Op, Tmp3)); 2405 } 2406 Op = DAG.getNOT(dl, Op, VT); 2407 return DAG.getNode(ISD::CTPOP, dl, VT, Op); 2408 } 2409 case ISD::CTTZ_ZERO_UNDEF: 2410 // This trivially expands to CTTZ. 2411 return DAG.getNode(ISD::CTTZ, dl, Op.getValueType(), Op); 2412 case ISD::CTTZ: { 2413 // for now, we use: { return popcount(~x & (x - 1)); } 2414 // unless the target has ctlz but not ctpop, in which case we use: 2415 // { return 32 - nlz(~x & (x-1)); } 2416 // see also http://www.hackersdelight.org/HDcode/ntz.cc 2417 EVT VT = Op.getValueType(); 2418 SDValue Tmp3 = DAG.getNode(ISD::AND, dl, VT, 2419 DAG.getNOT(dl, Op, VT), 2420 DAG.getNode(ISD::SUB, dl, VT, Op, 2421 DAG.getConstant(1, VT))); 2422 // If ISD::CTLZ is legal and CTPOP isn't, then do that instead. 2423 if (!TLI.isOperationLegalOrCustom(ISD::CTPOP, VT) && 2424 TLI.isOperationLegalOrCustom(ISD::CTLZ, VT)) 2425 return DAG.getNode(ISD::SUB, dl, VT, 2426 DAG.getConstant(VT.getSizeInBits(), VT), 2427 DAG.getNode(ISD::CTLZ, dl, VT, Tmp3)); 2428 return DAG.getNode(ISD::CTPOP, dl, VT, Tmp3); 2429 } 2430 } 2431} 2432 2433std::pair <SDValue, SDValue> SelectionDAGLegalize::ExpandAtomic(SDNode *Node) { 2434 unsigned Opc = Node->getOpcode(); 2435 MVT VT = cast<AtomicSDNode>(Node)->getMemoryVT().getSimpleVT(); 2436 RTLIB::Libcall LC; 2437 2438 switch (Opc) { 2439 default: 2440 llvm_unreachable("Unhandled atomic intrinsic Expand!"); 2441 break; 2442 case ISD::ATOMIC_SWAP: 2443 switch (VT.SimpleTy) { 2444 default: llvm_unreachable("Unexpected value type for atomic!"); 2445 case MVT::i8: LC = RTLIB::SYNC_LOCK_TEST_AND_SET_1; break; 2446 case MVT::i16: LC = RTLIB::SYNC_LOCK_TEST_AND_SET_2; break; 2447 case MVT::i32: LC = RTLIB::SYNC_LOCK_TEST_AND_SET_4; break; 2448 case MVT::i64: LC = RTLIB::SYNC_LOCK_TEST_AND_SET_8; break; 2449 } 2450 break; 2451 case ISD::ATOMIC_CMP_SWAP: 2452 switch (VT.SimpleTy) { 2453 default: llvm_unreachable("Unexpected value type for atomic!"); 2454 case MVT::i8: LC = RTLIB::SYNC_VAL_COMPARE_AND_SWAP_1; break; 2455 case MVT::i16: LC = RTLIB::SYNC_VAL_COMPARE_AND_SWAP_2; break; 2456 case MVT::i32: LC = RTLIB::SYNC_VAL_COMPARE_AND_SWAP_4; break; 2457 case MVT::i64: LC = RTLIB::SYNC_VAL_COMPARE_AND_SWAP_8; break; 2458 } 2459 break; 2460 case ISD::ATOMIC_LOAD_ADD: 2461 switch (VT.SimpleTy) { 2462 default: llvm_unreachable("Unexpected value type for atomic!"); 2463 case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_ADD_1; break; 2464 case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_ADD_2; break; 2465 case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_ADD_4; break; 2466 case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_ADD_8; break; 2467 } 2468 break; 2469 case ISD::ATOMIC_LOAD_SUB: 2470 switch (VT.SimpleTy) { 2471 default: llvm_unreachable("Unexpected value type for atomic!"); 2472 case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_SUB_1; break; 2473 case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_SUB_2; break; 2474 case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_SUB_4; break; 2475 case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_SUB_8; break; 2476 } 2477 break; 2478 case ISD::ATOMIC_LOAD_AND: 2479 switch (VT.SimpleTy) { 2480 default: llvm_unreachable("Unexpected value type for atomic!"); 2481 case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_AND_1; break; 2482 case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_AND_2; break; 2483 case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_AND_4; break; 2484 case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_AND_8; break; 2485 } 2486 break; 2487 case ISD::ATOMIC_LOAD_OR: 2488 switch (VT.SimpleTy) { 2489 default: llvm_unreachable("Unexpected value type for atomic!"); 2490 case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_OR_1; break; 2491 case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_OR_2; break; 2492 case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_OR_4; break; 2493 case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_OR_8; break; 2494 } 2495 break; 2496 case ISD::ATOMIC_LOAD_XOR: 2497 switch (VT.SimpleTy) { 2498 default: llvm_unreachable("Unexpected value type for atomic!"); 2499 case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_XOR_1; break; 2500 case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_XOR_2; break; 2501 case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_XOR_4; break; 2502 case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_XOR_8; break; 2503 } 2504 break; 2505 case ISD::ATOMIC_LOAD_NAND: 2506 switch (VT.SimpleTy) { 2507 default: llvm_unreachable("Unexpected value type for atomic!"); 2508 case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_NAND_1; break; 2509 case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_NAND_2; break; 2510 case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_NAND_4; break; 2511 case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_NAND_8; break; 2512 } 2513 break; 2514 } 2515 2516 return ExpandChainLibCall(LC, Node, false); 2517} 2518 2519void SelectionDAGLegalize::ExpandNode(SDNode *Node) { 2520 SmallVector<SDValue, 8> Results; 2521 DebugLoc dl = Node->getDebugLoc(); 2522 SDValue Tmp1, Tmp2, Tmp3, Tmp4; 2523 switch (Node->getOpcode()) { 2524 case ISD::CTPOP: 2525 case ISD::CTLZ: 2526 case ISD::CTLZ_ZERO_UNDEF: 2527 case ISD::CTTZ: 2528 case ISD::CTTZ_ZERO_UNDEF: 2529 Tmp1 = ExpandBitCount(Node->getOpcode(), Node->getOperand(0), dl); 2530 Results.push_back(Tmp1); 2531 break; 2532 case ISD::BSWAP: 2533 Results.push_back(ExpandBSWAP(Node->getOperand(0), dl)); 2534 break; 2535 case ISD::FRAMEADDR: 2536 case ISD::RETURNADDR: 2537 case ISD::FRAME_TO_ARGS_OFFSET: 2538 Results.push_back(DAG.getConstant(0, Node->getValueType(0))); 2539 break; 2540 case ISD::FLT_ROUNDS_: 2541 Results.push_back(DAG.getConstant(1, Node->getValueType(0))); 2542 break; 2543 case ISD::EH_RETURN: 2544 case ISD::EH_LABEL: 2545 case ISD::PREFETCH: 2546 case ISD::VAEND: 2547 case ISD::EH_SJLJ_LONGJMP: 2548 // If the target didn't expand these, there's nothing to do, so just 2549 // preserve the chain and be done. 2550 Results.push_back(Node->getOperand(0)); 2551 break; 2552 case ISD::EH_SJLJ_SETJMP: 2553 // If the target didn't expand this, just return 'zero' and preserve the 2554 // chain. 2555 Results.push_back(DAG.getConstant(0, MVT::i32)); 2556 Results.push_back(Node->getOperand(0)); 2557 break; 2558 case ISD::ATOMIC_FENCE: 2559 case ISD::MEMBARRIER: { 2560 // If the target didn't lower this, lower it to '__sync_synchronize()' call 2561 // FIXME: handle "fence singlethread" more efficiently. 2562 TargetLowering::ArgListTy Args; 2563 std::pair<SDValue, SDValue> CallResult = 2564 TLI.LowerCallTo(Node->getOperand(0), Type::getVoidTy(*DAG.getContext()), 2565 false, false, false, false, 0, CallingConv::C, 2566 /*isTailCall=*/false, 2567 /*isReturnValueUsed=*/true, 2568 DAG.getExternalSymbol("__sync_synchronize", 2569 TLI.getPointerTy()), 2570 Args, DAG, dl); 2571 Results.push_back(CallResult.second); 2572 break; 2573 } 2574 case ISD::ATOMIC_LOAD: { 2575 // There is no libcall for atomic load; fake it with ATOMIC_CMP_SWAP. 2576 SDValue Zero = DAG.getConstant(0, Node->getValueType(0)); 2577 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_CMP_SWAP, dl, 2578 cast<AtomicSDNode>(Node)->getMemoryVT(), 2579 Node->getOperand(0), 2580 Node->getOperand(1), Zero, Zero, 2581 cast<AtomicSDNode>(Node)->getMemOperand(), 2582 cast<AtomicSDNode>(Node)->getOrdering(), 2583 cast<AtomicSDNode>(Node)->getSynchScope()); 2584 Results.push_back(Swap.getValue(0)); 2585 Results.push_back(Swap.getValue(1)); 2586 break; 2587 } 2588 case ISD::ATOMIC_STORE: { 2589 // There is no libcall for atomic store; fake it with ATOMIC_SWAP. 2590 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl, 2591 cast<AtomicSDNode>(Node)->getMemoryVT(), 2592 Node->getOperand(0), 2593 Node->getOperand(1), Node->getOperand(2), 2594 cast<AtomicSDNode>(Node)->getMemOperand(), 2595 cast<AtomicSDNode>(Node)->getOrdering(), 2596 cast<AtomicSDNode>(Node)->getSynchScope()); 2597 Results.push_back(Swap.getValue(1)); 2598 break; 2599 } 2600 // By default, atomic intrinsics are marked Legal and lowered. Targets 2601 // which don't support them directly, however, may want libcalls, in which 2602 // case they mark them Expand, and we get here. 2603 case ISD::ATOMIC_SWAP: 2604 case ISD::ATOMIC_LOAD_ADD: 2605 case ISD::ATOMIC_LOAD_SUB: 2606 case ISD::ATOMIC_LOAD_AND: 2607 case ISD::ATOMIC_LOAD_OR: 2608 case ISD::ATOMIC_LOAD_XOR: 2609 case ISD::ATOMIC_LOAD_NAND: 2610 case ISD::ATOMIC_LOAD_MIN: 2611 case ISD::ATOMIC_LOAD_MAX: 2612 case ISD::ATOMIC_LOAD_UMIN: 2613 case ISD::ATOMIC_LOAD_UMAX: 2614 case ISD::ATOMIC_CMP_SWAP: { 2615 std::pair<SDValue, SDValue> Tmp = ExpandAtomic(Node); 2616 Results.push_back(Tmp.first); 2617 Results.push_back(Tmp.second); 2618 break; 2619 } 2620 case ISD::DYNAMIC_STACKALLOC: 2621 ExpandDYNAMIC_STACKALLOC(Node, Results); 2622 break; 2623 case ISD::MERGE_VALUES: 2624 for (unsigned i = 0; i < Node->getNumValues(); i++) 2625 Results.push_back(Node->getOperand(i)); 2626 break; 2627 case ISD::UNDEF: { 2628 EVT VT = Node->getValueType(0); 2629 if (VT.isInteger()) 2630 Results.push_back(DAG.getConstant(0, VT)); 2631 else { 2632 assert(VT.isFloatingPoint() && "Unknown value type!"); 2633 Results.push_back(DAG.getConstantFP(0, VT)); 2634 } 2635 break; 2636 } 2637 case ISD::TRAP: { 2638 // If this operation is not supported, lower it to 'abort()' call 2639 TargetLowering::ArgListTy Args; 2640 std::pair<SDValue, SDValue> CallResult = 2641 TLI.LowerCallTo(Node->getOperand(0), Type::getVoidTy(*DAG.getContext()), 2642 false, false, false, false, 0, CallingConv::C, 2643 /*isTailCall=*/false, 2644 /*isReturnValueUsed=*/true, 2645 DAG.getExternalSymbol("abort", TLI.getPointerTy()), 2646 Args, DAG, dl); 2647 Results.push_back(CallResult.second); 2648 break; 2649 } 2650 case ISD::FP_ROUND: 2651 case ISD::BITCAST: 2652 Tmp1 = EmitStackConvert(Node->getOperand(0), Node->getValueType(0), 2653 Node->getValueType(0), dl); 2654 Results.push_back(Tmp1); 2655 break; 2656 case ISD::FP_EXTEND: 2657 Tmp1 = EmitStackConvert(Node->getOperand(0), 2658 Node->getOperand(0).getValueType(), 2659 Node->getValueType(0), dl); 2660 Results.push_back(Tmp1); 2661 break; 2662 case ISD::SIGN_EXTEND_INREG: { 2663 // NOTE: we could fall back on load/store here too for targets without 2664 // SAR. However, it is doubtful that any exist. 2665 EVT ExtraVT = cast<VTSDNode>(Node->getOperand(1))->getVT(); 2666 EVT VT = Node->getValueType(0); 2667 EVT ShiftAmountTy = TLI.getShiftAmountTy(VT); 2668 if (VT.isVector()) 2669 ShiftAmountTy = VT; 2670 unsigned BitsDiff = VT.getScalarType().getSizeInBits() - 2671 ExtraVT.getScalarType().getSizeInBits(); 2672 SDValue ShiftCst = DAG.getConstant(BitsDiff, ShiftAmountTy); 2673 Tmp1 = DAG.getNode(ISD::SHL, dl, Node->getValueType(0), 2674 Node->getOperand(0), ShiftCst); 2675 Tmp1 = DAG.getNode(ISD::SRA, dl, Node->getValueType(0), Tmp1, ShiftCst); 2676 Results.push_back(Tmp1); 2677 break; 2678 } 2679 case ISD::FP_ROUND_INREG: { 2680 // The only way we can lower this is to turn it into a TRUNCSTORE, 2681 // EXTLOAD pair, targeting a temporary location (a stack slot). 2682 2683 // NOTE: there is a choice here between constantly creating new stack 2684 // slots and always reusing the same one. We currently always create 2685 // new ones, as reuse may inhibit scheduling. 2686 EVT ExtraVT = cast<VTSDNode>(Node->getOperand(1))->getVT(); 2687 Tmp1 = EmitStackConvert(Node->getOperand(0), ExtraVT, 2688 Node->getValueType(0), dl); 2689 Results.push_back(Tmp1); 2690 break; 2691 } 2692 case ISD::SINT_TO_FP: 2693 case ISD::UINT_TO_FP: 2694 Tmp1 = ExpandLegalINT_TO_FP(Node->getOpcode() == ISD::SINT_TO_FP, 2695 Node->getOperand(0), Node->getValueType(0), dl); 2696 Results.push_back(Tmp1); 2697 break; 2698 case ISD::FP_TO_UINT: { 2699 SDValue True, False; 2700 EVT VT = Node->getOperand(0).getValueType(); 2701 EVT NVT = Node->getValueType(0); 2702 APFloat apf(APInt::getNullValue(VT.getSizeInBits())); 2703 APInt x = APInt::getSignBit(NVT.getSizeInBits()); 2704 (void)apf.convertFromAPInt(x, false, APFloat::rmNearestTiesToEven); 2705 Tmp1 = DAG.getConstantFP(apf, VT); 2706 Tmp2 = DAG.getSetCC(dl, TLI.getSetCCResultType(VT), 2707 Node->getOperand(0), 2708 Tmp1, ISD::SETLT); 2709 True = DAG.getNode(ISD::FP_TO_SINT, dl, NVT, Node->getOperand(0)); 2710 False = DAG.getNode(ISD::FP_TO_SINT, dl, NVT, 2711 DAG.getNode(ISD::FSUB, dl, VT, 2712 Node->getOperand(0), Tmp1)); 2713 False = DAG.getNode(ISD::XOR, dl, NVT, False, 2714 DAG.getConstant(x, NVT)); 2715 Tmp1 = DAG.getNode(ISD::SELECT, dl, NVT, Tmp2, True, False); 2716 Results.push_back(Tmp1); 2717 break; 2718 } 2719 case ISD::VAARG: { 2720 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 2721 EVT VT = Node->getValueType(0); 2722 Tmp1 = Node->getOperand(0); 2723 Tmp2 = Node->getOperand(1); 2724 unsigned Align = Node->getConstantOperandVal(3); 2725 2726 SDValue VAListLoad = DAG.getLoad(TLI.getPointerTy(), dl, Tmp1, Tmp2, 2727 MachinePointerInfo(V), 2728 false, false, false, 0); 2729 SDValue VAList = VAListLoad; 2730 2731 if (Align > TLI.getMinStackArgumentAlignment()) { 2732 assert(((Align & (Align-1)) == 0) && "Expected Align to be a power of 2"); 2733 2734 VAList = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), VAList, 2735 DAG.getConstant(Align - 1, 2736 TLI.getPointerTy())); 2737 2738 VAList = DAG.getNode(ISD::AND, dl, TLI.getPointerTy(), VAList, 2739 DAG.getConstant(-(int64_t)Align, 2740 TLI.getPointerTy())); 2741 } 2742 2743 // Increment the pointer, VAList, to the next vaarg 2744 Tmp3 = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), VAList, 2745 DAG.getConstant(TLI.getTargetData()-> 2746 getTypeAllocSize(VT.getTypeForEVT(*DAG.getContext())), 2747 TLI.getPointerTy())); 2748 // Store the incremented VAList to the legalized pointer 2749 Tmp3 = DAG.getStore(VAListLoad.getValue(1), dl, Tmp3, Tmp2, 2750 MachinePointerInfo(V), false, false, 0); 2751 // Load the actual argument out of the pointer VAList 2752 Results.push_back(DAG.getLoad(VT, dl, Tmp3, VAList, MachinePointerInfo(), 2753 false, false, false, 0)); 2754 Results.push_back(Results[0].getValue(1)); 2755 break; 2756 } 2757 case ISD::VACOPY: { 2758 // This defaults to loading a pointer from the input and storing it to the 2759 // output, returning the chain. 2760 const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue(); 2761 const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue(); 2762 Tmp1 = DAG.getLoad(TLI.getPointerTy(), dl, Node->getOperand(0), 2763 Node->getOperand(2), MachinePointerInfo(VS), 2764 false, false, false, 0); 2765 Tmp1 = DAG.getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1), 2766 MachinePointerInfo(VD), false, false, 0); 2767 Results.push_back(Tmp1); 2768 break; 2769 } 2770 case ISD::EXTRACT_VECTOR_ELT: 2771 if (Node->getOperand(0).getValueType().getVectorNumElements() == 1) 2772 // This must be an access of the only element. Return it. 2773 Tmp1 = DAG.getNode(ISD::BITCAST, dl, Node->getValueType(0), 2774 Node->getOperand(0)); 2775 else 2776 Tmp1 = ExpandExtractFromVectorThroughStack(SDValue(Node, 0)); 2777 Results.push_back(Tmp1); 2778 break; 2779 case ISD::EXTRACT_SUBVECTOR: 2780 Results.push_back(ExpandExtractFromVectorThroughStack(SDValue(Node, 0))); 2781 break; 2782 case ISD::INSERT_SUBVECTOR: 2783 Results.push_back(ExpandInsertToVectorThroughStack(SDValue(Node, 0))); 2784 break; 2785 case ISD::CONCAT_VECTORS: { 2786 Results.push_back(ExpandVectorBuildThroughStack(Node)); 2787 break; 2788 } 2789 case ISD::SCALAR_TO_VECTOR: 2790 Results.push_back(ExpandSCALAR_TO_VECTOR(Node)); 2791 break; 2792 case ISD::INSERT_VECTOR_ELT: 2793 Results.push_back(ExpandINSERT_VECTOR_ELT(Node->getOperand(0), 2794 Node->getOperand(1), 2795 Node->getOperand(2), dl)); 2796 break; 2797 case ISD::VECTOR_SHUFFLE: { 2798 SmallVector<int, 8> Mask; 2799 cast<ShuffleVectorSDNode>(Node)->getMask(Mask); 2800 2801 EVT VT = Node->getValueType(0); 2802 EVT EltVT = VT.getVectorElementType(); 2803 if (!TLI.isTypeLegal(EltVT)) 2804 EltVT = TLI.getTypeToTransformTo(*DAG.getContext(), EltVT); 2805 unsigned NumElems = VT.getVectorNumElements(); 2806 SmallVector<SDValue, 8> Ops; 2807 for (unsigned i = 0; i != NumElems; ++i) { 2808 if (Mask[i] < 0) { 2809 Ops.push_back(DAG.getUNDEF(EltVT)); 2810 continue; 2811 } 2812 unsigned Idx = Mask[i]; 2813 if (Idx < NumElems) 2814 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, 2815 Node->getOperand(0), 2816 DAG.getIntPtrConstant(Idx))); 2817 else 2818 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, 2819 Node->getOperand(1), 2820 DAG.getIntPtrConstant(Idx - NumElems))); 2821 } 2822 Tmp1 = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &Ops[0], Ops.size()); 2823 Results.push_back(Tmp1); 2824 break; 2825 } 2826 case ISD::EXTRACT_ELEMENT: { 2827 EVT OpTy = Node->getOperand(0).getValueType(); 2828 if (cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue()) { 2829 // 1 -> Hi 2830 Tmp1 = DAG.getNode(ISD::SRL, dl, OpTy, Node->getOperand(0), 2831 DAG.getConstant(OpTy.getSizeInBits()/2, 2832 TLI.getShiftAmountTy(Node->getOperand(0).getValueType()))); 2833 Tmp1 = DAG.getNode(ISD::TRUNCATE, dl, Node->getValueType(0), Tmp1); 2834 } else { 2835 // 0 -> Lo 2836 Tmp1 = DAG.getNode(ISD::TRUNCATE, dl, Node->getValueType(0), 2837 Node->getOperand(0)); 2838 } 2839 Results.push_back(Tmp1); 2840 break; 2841 } 2842 case ISD::STACKSAVE: 2843 // Expand to CopyFromReg if the target set 2844 // StackPointerRegisterToSaveRestore. 2845 if (unsigned SP = TLI.getStackPointerRegisterToSaveRestore()) { 2846 Results.push_back(DAG.getCopyFromReg(Node->getOperand(0), dl, SP, 2847 Node->getValueType(0))); 2848 Results.push_back(Results[0].getValue(1)); 2849 } else { 2850 Results.push_back(DAG.getUNDEF(Node->getValueType(0))); 2851 Results.push_back(Node->getOperand(0)); 2852 } 2853 break; 2854 case ISD::STACKRESTORE: 2855 // Expand to CopyToReg if the target set 2856 // StackPointerRegisterToSaveRestore. 2857 if (unsigned SP = TLI.getStackPointerRegisterToSaveRestore()) { 2858 Results.push_back(DAG.getCopyToReg(Node->getOperand(0), dl, SP, 2859 Node->getOperand(1))); 2860 } else { 2861 Results.push_back(Node->getOperand(0)); 2862 } 2863 break; 2864 case ISD::FCOPYSIGN: 2865 Results.push_back(ExpandFCOPYSIGN(Node)); 2866 break; 2867 case ISD::FNEG: 2868 // Expand Y = FNEG(X) -> Y = SUB -0.0, X 2869 Tmp1 = DAG.getConstantFP(-0.0, Node->getValueType(0)); 2870 Tmp1 = DAG.getNode(ISD::FSUB, dl, Node->getValueType(0), Tmp1, 2871 Node->getOperand(0)); 2872 Results.push_back(Tmp1); 2873 break; 2874 case ISD::FABS: { 2875 // Expand Y = FABS(X) -> Y = (X >u 0.0) ? X : fneg(X). 2876 EVT VT = Node->getValueType(0); 2877 Tmp1 = Node->getOperand(0); 2878 Tmp2 = DAG.getConstantFP(0.0, VT); 2879 Tmp2 = DAG.getSetCC(dl, TLI.getSetCCResultType(Tmp1.getValueType()), 2880 Tmp1, Tmp2, ISD::SETUGT); 2881 Tmp3 = DAG.getNode(ISD::FNEG, dl, VT, Tmp1); 2882 Tmp1 = DAG.getNode(ISD::SELECT, dl, VT, Tmp2, Tmp1, Tmp3); 2883 Results.push_back(Tmp1); 2884 break; 2885 } 2886 case ISD::FSQRT: 2887 Results.push_back(ExpandFPLibCall(Node, RTLIB::SQRT_F32, RTLIB::SQRT_F64, 2888 RTLIB::SQRT_F80, RTLIB::SQRT_PPCF128)); 2889 break; 2890 case ISD::FSIN: 2891 Results.push_back(ExpandFPLibCall(Node, RTLIB::SIN_F32, RTLIB::SIN_F64, 2892 RTLIB::SIN_F80, RTLIB::SIN_PPCF128)); 2893 break; 2894 case ISD::FCOS: 2895 Results.push_back(ExpandFPLibCall(Node, RTLIB::COS_F32, RTLIB::COS_F64, 2896 RTLIB::COS_F80, RTLIB::COS_PPCF128)); 2897 break; 2898 case ISD::FLOG: 2899 Results.push_back(ExpandFPLibCall(Node, RTLIB::LOG_F32, RTLIB::LOG_F64, 2900 RTLIB::LOG_F80, RTLIB::LOG_PPCF128)); 2901 break; 2902 case ISD::FLOG2: 2903 Results.push_back(ExpandFPLibCall(Node, RTLIB::LOG2_F32, RTLIB::LOG2_F64, 2904 RTLIB::LOG2_F80, RTLIB::LOG2_PPCF128)); 2905 break; 2906 case ISD::FLOG10: 2907 Results.push_back(ExpandFPLibCall(Node, RTLIB::LOG10_F32, RTLIB::LOG10_F64, 2908 RTLIB::LOG10_F80, RTLIB::LOG10_PPCF128)); 2909 break; 2910 case ISD::FEXP: 2911 Results.push_back(ExpandFPLibCall(Node, RTLIB::EXP_F32, RTLIB::EXP_F64, 2912 RTLIB::EXP_F80, RTLIB::EXP_PPCF128)); 2913 break; 2914 case ISD::FEXP2: 2915 Results.push_back(ExpandFPLibCall(Node, RTLIB::EXP2_F32, RTLIB::EXP2_F64, 2916 RTLIB::EXP2_F80, RTLIB::EXP2_PPCF128)); 2917 break; 2918 case ISD::FTRUNC: 2919 Results.push_back(ExpandFPLibCall(Node, RTLIB::TRUNC_F32, RTLIB::TRUNC_F64, 2920 RTLIB::TRUNC_F80, RTLIB::TRUNC_PPCF128)); 2921 break; 2922 case ISD::FFLOOR: 2923 Results.push_back(ExpandFPLibCall(Node, RTLIB::FLOOR_F32, RTLIB::FLOOR_F64, 2924 RTLIB::FLOOR_F80, RTLIB::FLOOR_PPCF128)); 2925 break; 2926 case ISD::FCEIL: 2927 Results.push_back(ExpandFPLibCall(Node, RTLIB::CEIL_F32, RTLIB::CEIL_F64, 2928 RTLIB::CEIL_F80, RTLIB::CEIL_PPCF128)); 2929 break; 2930 case ISD::FRINT: 2931 Results.push_back(ExpandFPLibCall(Node, RTLIB::RINT_F32, RTLIB::RINT_F64, 2932 RTLIB::RINT_F80, RTLIB::RINT_PPCF128)); 2933 break; 2934 case ISD::FNEARBYINT: 2935 Results.push_back(ExpandFPLibCall(Node, RTLIB::NEARBYINT_F32, 2936 RTLIB::NEARBYINT_F64, 2937 RTLIB::NEARBYINT_F80, 2938 RTLIB::NEARBYINT_PPCF128)); 2939 break; 2940 case ISD::FPOWI: 2941 Results.push_back(ExpandFPLibCall(Node, RTLIB::POWI_F32, RTLIB::POWI_F64, 2942 RTLIB::POWI_F80, RTLIB::POWI_PPCF128)); 2943 break; 2944 case ISD::FPOW: 2945 Results.push_back(ExpandFPLibCall(Node, RTLIB::POW_F32, RTLIB::POW_F64, 2946 RTLIB::POW_F80, RTLIB::POW_PPCF128)); 2947 break; 2948 case ISD::FDIV: 2949 Results.push_back(ExpandFPLibCall(Node, RTLIB::DIV_F32, RTLIB::DIV_F64, 2950 RTLIB::DIV_F80, RTLIB::DIV_PPCF128)); 2951 break; 2952 case ISD::FREM: 2953 Results.push_back(ExpandFPLibCall(Node, RTLIB::REM_F32, RTLIB::REM_F64, 2954 RTLIB::REM_F80, RTLIB::REM_PPCF128)); 2955 break; 2956 case ISD::FMA: 2957 Results.push_back(ExpandFPLibCall(Node, RTLIB::FMA_F32, RTLIB::FMA_F64, 2958 RTLIB::FMA_F80, RTLIB::FMA_PPCF128)); 2959 break; 2960 case ISD::FP16_TO_FP32: 2961 Results.push_back(ExpandLibCall(RTLIB::FPEXT_F16_F32, Node, false)); 2962 break; 2963 case ISD::FP32_TO_FP16: 2964 Results.push_back(ExpandLibCall(RTLIB::FPROUND_F32_F16, Node, false)); 2965 break; 2966 case ISD::ConstantFP: { 2967 ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Node); 2968 // Check to see if this FP immediate is already legal. 2969 // If this is a legal constant, turn it into a TargetConstantFP node. 2970 if (!TLI.isFPImmLegal(CFP->getValueAPF(), Node->getValueType(0))) 2971 Results.push_back(ExpandConstantFP(CFP, true)); 2972 break; 2973 } 2974 case ISD::EHSELECTION: { 2975 unsigned Reg = TLI.getExceptionSelectorRegister(); 2976 assert(Reg && "Can't expand to unknown register!"); 2977 Results.push_back(DAG.getCopyFromReg(Node->getOperand(1), dl, Reg, 2978 Node->getValueType(0))); 2979 Results.push_back(Results[0].getValue(1)); 2980 break; 2981 } 2982 case ISD::EXCEPTIONADDR: { 2983 unsigned Reg = TLI.getExceptionAddressRegister(); 2984 assert(Reg && "Can't expand to unknown register!"); 2985 Results.push_back(DAG.getCopyFromReg(Node->getOperand(0), dl, Reg, 2986 Node->getValueType(0))); 2987 Results.push_back(Results[0].getValue(1)); 2988 break; 2989 } 2990 case ISD::SUB: { 2991 EVT VT = Node->getValueType(0); 2992 assert(TLI.isOperationLegalOrCustom(ISD::ADD, VT) && 2993 TLI.isOperationLegalOrCustom(ISD::XOR, VT) && 2994 "Don't know how to expand this subtraction!"); 2995 Tmp1 = DAG.getNode(ISD::XOR, dl, VT, Node->getOperand(1), 2996 DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), VT)); 2997 Tmp1 = DAG.getNode(ISD::ADD, dl, VT, Tmp2, DAG.getConstant(1, VT)); 2998 Results.push_back(DAG.getNode(ISD::ADD, dl, VT, Node->getOperand(0), Tmp1)); 2999 break; 3000 } 3001 case ISD::UREM: 3002 case ISD::SREM: { 3003 EVT VT = Node->getValueType(0); 3004 SDVTList VTs = DAG.getVTList(VT, VT); 3005 bool isSigned = Node->getOpcode() == ISD::SREM; 3006 unsigned DivOpc = isSigned ? ISD::SDIV : ISD::UDIV; 3007 unsigned DivRemOpc = isSigned ? ISD::SDIVREM : ISD::UDIVREM; 3008 Tmp2 = Node->getOperand(0); 3009 Tmp3 = Node->getOperand(1); 3010 if (TLI.isOperationLegalOrCustom(DivRemOpc, VT) || 3011 (isDivRemLibcallAvailable(Node, isSigned, TLI) && 3012 UseDivRem(Node, isSigned, false))) { 3013 Tmp1 = DAG.getNode(DivRemOpc, dl, VTs, Tmp2, Tmp3).getValue(1); 3014 } else if (TLI.isOperationLegalOrCustom(DivOpc, VT)) { 3015 // X % Y -> X-X/Y*Y 3016 Tmp1 = DAG.getNode(DivOpc, dl, VT, Tmp2, Tmp3); 3017 Tmp1 = DAG.getNode(ISD::MUL, dl, VT, Tmp1, Tmp3); 3018 Tmp1 = DAG.getNode(ISD::SUB, dl, VT, Tmp2, Tmp1); 3019 } else if (isSigned) 3020 Tmp1 = ExpandIntLibCall(Node, true, 3021 RTLIB::SREM_I8, 3022 RTLIB::SREM_I16, RTLIB::SREM_I32, 3023 RTLIB::SREM_I64, RTLIB::SREM_I128); 3024 else 3025 Tmp1 = ExpandIntLibCall(Node, false, 3026 RTLIB::UREM_I8, 3027 RTLIB::UREM_I16, RTLIB::UREM_I32, 3028 RTLIB::UREM_I64, RTLIB::UREM_I128); 3029 Results.push_back(Tmp1); 3030 break; 3031 } 3032 case ISD::UDIV: 3033 case ISD::SDIV: { 3034 bool isSigned = Node->getOpcode() == ISD::SDIV; 3035 unsigned DivRemOpc = isSigned ? ISD::SDIVREM : ISD::UDIVREM; 3036 EVT VT = Node->getValueType(0); 3037 SDVTList VTs = DAG.getVTList(VT, VT); 3038 if (TLI.isOperationLegalOrCustom(DivRemOpc, VT) || 3039 (isDivRemLibcallAvailable(Node, isSigned, TLI) && 3040 UseDivRem(Node, isSigned, true))) 3041 Tmp1 = DAG.getNode(DivRemOpc, dl, VTs, Node->getOperand(0), 3042 Node->getOperand(1)); 3043 else if (isSigned) 3044 Tmp1 = ExpandIntLibCall(Node, true, 3045 RTLIB::SDIV_I8, 3046 RTLIB::SDIV_I16, RTLIB::SDIV_I32, 3047 RTLIB::SDIV_I64, RTLIB::SDIV_I128); 3048 else 3049 Tmp1 = ExpandIntLibCall(Node, false, 3050 RTLIB::UDIV_I8, 3051 RTLIB::UDIV_I16, RTLIB::UDIV_I32, 3052 RTLIB::UDIV_I64, RTLIB::UDIV_I128); 3053 Results.push_back(Tmp1); 3054 break; 3055 } 3056 case ISD::MULHU: 3057 case ISD::MULHS: { 3058 unsigned ExpandOpcode = Node->getOpcode() == ISD::MULHU ? ISD::UMUL_LOHI : 3059 ISD::SMUL_LOHI; 3060 EVT VT = Node->getValueType(0); 3061 SDVTList VTs = DAG.getVTList(VT, VT); 3062 assert(TLI.isOperationLegalOrCustom(ExpandOpcode, VT) && 3063 "If this wasn't legal, it shouldn't have been created!"); 3064 Tmp1 = DAG.getNode(ExpandOpcode, dl, VTs, Node->getOperand(0), 3065 Node->getOperand(1)); 3066 Results.push_back(Tmp1.getValue(1)); 3067 break; 3068 } 3069 case ISD::SDIVREM: 3070 case ISD::UDIVREM: 3071 // Expand into divrem libcall 3072 ExpandDivRemLibCall(Node, Results); 3073 break; 3074 case ISD::MUL: { 3075 EVT VT = Node->getValueType(0); 3076 SDVTList VTs = DAG.getVTList(VT, VT); 3077 // See if multiply or divide can be lowered using two-result operations. 3078 // We just need the low half of the multiply; try both the signed 3079 // and unsigned forms. If the target supports both SMUL_LOHI and 3080 // UMUL_LOHI, form a preference by checking which forms of plain 3081 // MULH it supports. 3082 bool HasSMUL_LOHI = TLI.isOperationLegalOrCustom(ISD::SMUL_LOHI, VT); 3083 bool HasUMUL_LOHI = TLI.isOperationLegalOrCustom(ISD::UMUL_LOHI, VT); 3084 bool HasMULHS = TLI.isOperationLegalOrCustom(ISD::MULHS, VT); 3085 bool HasMULHU = TLI.isOperationLegalOrCustom(ISD::MULHU, VT); 3086 unsigned OpToUse = 0; 3087 if (HasSMUL_LOHI && !HasMULHS) { 3088 OpToUse = ISD::SMUL_LOHI; 3089 } else if (HasUMUL_LOHI && !HasMULHU) { 3090 OpToUse = ISD::UMUL_LOHI; 3091 } else if (HasSMUL_LOHI) { 3092 OpToUse = ISD::SMUL_LOHI; 3093 } else if (HasUMUL_LOHI) { 3094 OpToUse = ISD::UMUL_LOHI; 3095 } 3096 if (OpToUse) { 3097 Results.push_back(DAG.getNode(OpToUse, dl, VTs, Node->getOperand(0), 3098 Node->getOperand(1))); 3099 break; 3100 } 3101 Tmp1 = ExpandIntLibCall(Node, false, 3102 RTLIB::MUL_I8, 3103 RTLIB::MUL_I16, RTLIB::MUL_I32, 3104 RTLIB::MUL_I64, RTLIB::MUL_I128); 3105 Results.push_back(Tmp1); 3106 break; 3107 } 3108 case ISD::SADDO: 3109 case ISD::SSUBO: { 3110 SDValue LHS = Node->getOperand(0); 3111 SDValue RHS = Node->getOperand(1); 3112 SDValue Sum = DAG.getNode(Node->getOpcode() == ISD::SADDO ? 3113 ISD::ADD : ISD::SUB, dl, LHS.getValueType(), 3114 LHS, RHS); 3115 Results.push_back(Sum); 3116 EVT OType = Node->getValueType(1); 3117 3118 SDValue Zero = DAG.getConstant(0, LHS.getValueType()); 3119 3120 // LHSSign -> LHS >= 0 3121 // RHSSign -> RHS >= 0 3122 // SumSign -> Sum >= 0 3123 // 3124 // Add: 3125 // Overflow -> (LHSSign == RHSSign) && (LHSSign != SumSign) 3126 // Sub: 3127 // Overflow -> (LHSSign != RHSSign) && (LHSSign != SumSign) 3128 // 3129 SDValue LHSSign = DAG.getSetCC(dl, OType, LHS, Zero, ISD::SETGE); 3130 SDValue RHSSign = DAG.getSetCC(dl, OType, RHS, Zero, ISD::SETGE); 3131 SDValue SignsMatch = DAG.getSetCC(dl, OType, LHSSign, RHSSign, 3132 Node->getOpcode() == ISD::SADDO ? 3133 ISD::SETEQ : ISD::SETNE); 3134 3135 SDValue SumSign = DAG.getSetCC(dl, OType, Sum, Zero, ISD::SETGE); 3136 SDValue SumSignNE = DAG.getSetCC(dl, OType, LHSSign, SumSign, ISD::SETNE); 3137 3138 SDValue Cmp = DAG.getNode(ISD::AND, dl, OType, SignsMatch, SumSignNE); 3139 Results.push_back(Cmp); 3140 break; 3141 } 3142 case ISD::UADDO: 3143 case ISD::USUBO: { 3144 SDValue LHS = Node->getOperand(0); 3145 SDValue RHS = Node->getOperand(1); 3146 SDValue Sum = DAG.getNode(Node->getOpcode() == ISD::UADDO ? 3147 ISD::ADD : ISD::SUB, dl, LHS.getValueType(), 3148 LHS, RHS); 3149 Results.push_back(Sum); 3150 Results.push_back(DAG.getSetCC(dl, Node->getValueType(1), Sum, LHS, 3151 Node->getOpcode () == ISD::UADDO ? 3152 ISD::SETULT : ISD::SETUGT)); 3153 break; 3154 } 3155 case ISD::UMULO: 3156 case ISD::SMULO: { 3157 EVT VT = Node->getValueType(0); 3158 EVT WideVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits() * 2); 3159 SDValue LHS = Node->getOperand(0); 3160 SDValue RHS = Node->getOperand(1); 3161 SDValue BottomHalf; 3162 SDValue TopHalf; 3163 static const unsigned Ops[2][3] = 3164 { { ISD::MULHU, ISD::UMUL_LOHI, ISD::ZERO_EXTEND }, 3165 { ISD::MULHS, ISD::SMUL_LOHI, ISD::SIGN_EXTEND }}; 3166 bool isSigned = Node->getOpcode() == ISD::SMULO; 3167 if (TLI.isOperationLegalOrCustom(Ops[isSigned][0], VT)) { 3168 BottomHalf = DAG.getNode(ISD::MUL, dl, VT, LHS, RHS); 3169 TopHalf = DAG.getNode(Ops[isSigned][0], dl, VT, LHS, RHS); 3170 } else if (TLI.isOperationLegalOrCustom(Ops[isSigned][1], VT)) { 3171 BottomHalf = DAG.getNode(Ops[isSigned][1], dl, DAG.getVTList(VT, VT), LHS, 3172 RHS); 3173 TopHalf = BottomHalf.getValue(1); 3174 } else if (TLI.isTypeLegal(EVT::getIntegerVT(*DAG.getContext(), 3175 VT.getSizeInBits() * 2))) { 3176 LHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, LHS); 3177 RHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, RHS); 3178 Tmp1 = DAG.getNode(ISD::MUL, dl, WideVT, LHS, RHS); 3179 BottomHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, Tmp1, 3180 DAG.getIntPtrConstant(0)); 3181 TopHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, Tmp1, 3182 DAG.getIntPtrConstant(1)); 3183 } else { 3184 // We can fall back to a libcall with an illegal type for the MUL if we 3185 // have a libcall big enough. 3186 // Also, we can fall back to a division in some cases, but that's a big 3187 // performance hit in the general case. 3188 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 3189 if (WideVT == MVT::i16) 3190 LC = RTLIB::MUL_I16; 3191 else if (WideVT == MVT::i32) 3192 LC = RTLIB::MUL_I32; 3193 else if (WideVT == MVT::i64) 3194 LC = RTLIB::MUL_I64; 3195 else if (WideVT == MVT::i128) 3196 LC = RTLIB::MUL_I128; 3197 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Cannot expand this operation!"); 3198 3199 // The high part is obtained by SRA'ing all but one of the bits of low 3200 // part. 3201 unsigned LoSize = VT.getSizeInBits(); 3202 SDValue HiLHS = DAG.getNode(ISD::SRA, dl, VT, RHS, 3203 DAG.getConstant(LoSize-1, TLI.getPointerTy())); 3204 SDValue HiRHS = DAG.getNode(ISD::SRA, dl, VT, LHS, 3205 DAG.getConstant(LoSize-1, TLI.getPointerTy())); 3206 3207 // Here we're passing the 2 arguments explicitly as 4 arguments that are 3208 // pre-lowered to the correct types. This all depends upon WideVT not 3209 // being a legal type for the architecture and thus has to be split to 3210 // two arguments. 3211 SDValue Args[] = { LHS, HiLHS, RHS, HiRHS }; 3212 SDValue Ret = ExpandLibCall(LC, WideVT, Args, 4, isSigned, dl); 3213 BottomHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, Ret, 3214 DAG.getIntPtrConstant(0)); 3215 TopHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, Ret, 3216 DAG.getIntPtrConstant(1)); 3217 // Ret is a node with an illegal type. Because such things are not 3218 // generally permitted during this phase of legalization, delete the 3219 // node. The above EXTRACT_ELEMENT nodes should have been folded. 3220 DAG.DeleteNode(Ret.getNode()); 3221 } 3222 3223 if (isSigned) { 3224 Tmp1 = DAG.getConstant(VT.getSizeInBits() - 1, 3225 TLI.getShiftAmountTy(BottomHalf.getValueType())); 3226 Tmp1 = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, Tmp1); 3227 TopHalf = DAG.getSetCC(dl, TLI.getSetCCResultType(VT), TopHalf, Tmp1, 3228 ISD::SETNE); 3229 } else { 3230 TopHalf = DAG.getSetCC(dl, TLI.getSetCCResultType(VT), TopHalf, 3231 DAG.getConstant(0, VT), ISD::SETNE); 3232 } 3233 Results.push_back(BottomHalf); 3234 Results.push_back(TopHalf); 3235 break; 3236 } 3237 case ISD::BUILD_PAIR: { 3238 EVT PairTy = Node->getValueType(0); 3239 Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, dl, PairTy, Node->getOperand(0)); 3240 Tmp2 = DAG.getNode(ISD::ANY_EXTEND, dl, PairTy, Node->getOperand(1)); 3241 Tmp2 = DAG.getNode(ISD::SHL, dl, PairTy, Tmp2, 3242 DAG.getConstant(PairTy.getSizeInBits()/2, 3243 TLI.getShiftAmountTy(PairTy))); 3244 Results.push_back(DAG.getNode(ISD::OR, dl, PairTy, Tmp1, Tmp2)); 3245 break; 3246 } 3247 case ISD::SELECT: 3248 Tmp1 = Node->getOperand(0); 3249 Tmp2 = Node->getOperand(1); 3250 Tmp3 = Node->getOperand(2); 3251 if (Tmp1.getOpcode() == ISD::SETCC) { 3252 Tmp1 = DAG.getSelectCC(dl, Tmp1.getOperand(0), Tmp1.getOperand(1), 3253 Tmp2, Tmp3, 3254 cast<CondCodeSDNode>(Tmp1.getOperand(2))->get()); 3255 } else { 3256 Tmp1 = DAG.getSelectCC(dl, Tmp1, 3257 DAG.getConstant(0, Tmp1.getValueType()), 3258 Tmp2, Tmp3, ISD::SETNE); 3259 } 3260 Results.push_back(Tmp1); 3261 break; 3262 case ISD::BR_JT: { 3263 SDValue Chain = Node->getOperand(0); 3264 SDValue Table = Node->getOperand(1); 3265 SDValue Index = Node->getOperand(2); 3266 3267 EVT PTy = TLI.getPointerTy(); 3268 3269 const TargetData &TD = *TLI.getTargetData(); 3270 unsigned EntrySize = 3271 DAG.getMachineFunction().getJumpTableInfo()->getEntrySize(TD); 3272 3273 Index = DAG.getNode(ISD::MUL, dl, PTy, 3274 Index, DAG.getConstant(EntrySize, PTy)); 3275 SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Index, Table); 3276 3277 EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), EntrySize * 8); 3278 SDValue LD = DAG.getExtLoad(ISD::SEXTLOAD, dl, PTy, Chain, Addr, 3279 MachinePointerInfo::getJumpTable(), MemVT, 3280 false, false, 0); 3281 Addr = LD; 3282 if (TM.getRelocationModel() == Reloc::PIC_) { 3283 // For PIC, the sequence is: 3284 // BRIND(load(Jumptable + index) + RelocBase) 3285 // RelocBase can be JumpTable, GOT or some sort of global base. 3286 Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr, 3287 TLI.getPICJumpTableRelocBase(Table, DAG)); 3288 } 3289 Tmp1 = DAG.getNode(ISD::BRIND, dl, MVT::Other, LD.getValue(1), Addr); 3290 Results.push_back(Tmp1); 3291 break; 3292 } 3293 case ISD::BRCOND: 3294 // Expand brcond's setcc into its constituent parts and create a BR_CC 3295 // Node. 3296 Tmp1 = Node->getOperand(0); 3297 Tmp2 = Node->getOperand(1); 3298 if (Tmp2.getOpcode() == ISD::SETCC) { 3299 Tmp1 = DAG.getNode(ISD::BR_CC, dl, MVT::Other, 3300 Tmp1, Tmp2.getOperand(2), 3301 Tmp2.getOperand(0), Tmp2.getOperand(1), 3302 Node->getOperand(2)); 3303 } else { 3304 // We test only the i1 bit. Skip the AND if UNDEF. 3305 Tmp3 = (Tmp2.getOpcode() == ISD::UNDEF) ? Tmp2 : 3306 DAG.getNode(ISD::AND, dl, Tmp2.getValueType(), Tmp2, 3307 DAG.getConstant(1, Tmp2.getValueType())); 3308 Tmp1 = DAG.getNode(ISD::BR_CC, dl, MVT::Other, Tmp1, 3309 DAG.getCondCode(ISD::SETNE), Tmp3, 3310 DAG.getConstant(0, Tmp3.getValueType()), 3311 Node->getOperand(2)); 3312 } 3313 Results.push_back(Tmp1); 3314 break; 3315 case ISD::SETCC: { 3316 Tmp1 = Node->getOperand(0); 3317 Tmp2 = Node->getOperand(1); 3318 Tmp3 = Node->getOperand(2); 3319 LegalizeSetCCCondCode(Node->getValueType(0), Tmp1, Tmp2, Tmp3, dl); 3320 3321 // If we expanded the SETCC into an AND/OR, return the new node 3322 if (Tmp2.getNode() == 0) { 3323 Results.push_back(Tmp1); 3324 break; 3325 } 3326 3327 // Otherwise, SETCC for the given comparison type must be completely 3328 // illegal; expand it into a SELECT_CC. 3329 EVT VT = Node->getValueType(0); 3330 Tmp1 = DAG.getNode(ISD::SELECT_CC, dl, VT, Tmp1, Tmp2, 3331 DAG.getConstant(1, VT), DAG.getConstant(0, VT), Tmp3); 3332 Results.push_back(Tmp1); 3333 break; 3334 } 3335 case ISD::SELECT_CC: { 3336 Tmp1 = Node->getOperand(0); // LHS 3337 Tmp2 = Node->getOperand(1); // RHS 3338 Tmp3 = Node->getOperand(2); // True 3339 Tmp4 = Node->getOperand(3); // False 3340 SDValue CC = Node->getOperand(4); 3341 3342 LegalizeSetCCCondCode(TLI.getSetCCResultType(Tmp1.getValueType()), 3343 Tmp1, Tmp2, CC, dl); 3344 3345 assert(!Tmp2.getNode() && "Can't legalize SELECT_CC with legal condition!"); 3346 Tmp2 = DAG.getConstant(0, Tmp1.getValueType()); 3347 CC = DAG.getCondCode(ISD::SETNE); 3348 Tmp1 = DAG.getNode(ISD::SELECT_CC, dl, Node->getValueType(0), Tmp1, Tmp2, 3349 Tmp3, Tmp4, CC); 3350 Results.push_back(Tmp1); 3351 break; 3352 } 3353 case ISD::BR_CC: { 3354 Tmp1 = Node->getOperand(0); // Chain 3355 Tmp2 = Node->getOperand(2); // LHS 3356 Tmp3 = Node->getOperand(3); // RHS 3357 Tmp4 = Node->getOperand(1); // CC 3358 3359 LegalizeSetCCCondCode(TLI.getSetCCResultType(Tmp2.getValueType()), 3360 Tmp2, Tmp3, Tmp4, dl); 3361 3362 assert(!Tmp3.getNode() && "Can't legalize BR_CC with legal condition!"); 3363 Tmp3 = DAG.getConstant(0, Tmp2.getValueType()); 3364 Tmp4 = DAG.getCondCode(ISD::SETNE); 3365 Tmp1 = DAG.getNode(ISD::BR_CC, dl, Node->getValueType(0), Tmp1, Tmp4, Tmp2, 3366 Tmp3, Node->getOperand(4)); 3367 Results.push_back(Tmp1); 3368 break; 3369 } 3370 case ISD::BUILD_VECTOR: 3371 Results.push_back(ExpandBUILD_VECTOR(Node)); 3372 break; 3373 case ISD::SRA: 3374 case ISD::SRL: 3375 case ISD::SHL: { 3376 // Scalarize vector SRA/SRL/SHL. 3377 EVT VT = Node->getValueType(0); 3378 assert(VT.isVector() && "Unable to legalize non-vector shift"); 3379 assert(TLI.isTypeLegal(VT.getScalarType())&& "Element type must be legal"); 3380 unsigned NumElem = VT.getVectorNumElements(); 3381 3382 SmallVector<SDValue, 8> Scalars; 3383 for (unsigned Idx = 0; Idx < NumElem; Idx++) { 3384 SDValue Ex = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, 3385 VT.getScalarType(), 3386 Node->getOperand(0), DAG.getIntPtrConstant(Idx)); 3387 SDValue Sh = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, 3388 VT.getScalarType(), 3389 Node->getOperand(1), DAG.getIntPtrConstant(Idx)); 3390 Scalars.push_back(DAG.getNode(Node->getOpcode(), dl, 3391 VT.getScalarType(), Ex, Sh)); 3392 } 3393 SDValue Result = 3394 DAG.getNode(ISD::BUILD_VECTOR, dl, Node->getValueType(0), 3395 &Scalars[0], Scalars.size()); 3396 ReplaceNode(SDValue(Node, 0), Result); 3397 break; 3398 } 3399 case ISD::GLOBAL_OFFSET_TABLE: 3400 case ISD::GlobalAddress: 3401 case ISD::GlobalTLSAddress: 3402 case ISD::ExternalSymbol: 3403 case ISD::ConstantPool: 3404 case ISD::JumpTable: 3405 case ISD::INTRINSIC_W_CHAIN: 3406 case ISD::INTRINSIC_WO_CHAIN: 3407 case ISD::INTRINSIC_VOID: 3408 // FIXME: Custom lowering for these operations shouldn't return null! 3409 break; 3410 } 3411 3412 // Replace the original node with the legalized result. 3413 if (!Results.empty()) 3414 ReplaceNode(Node, Results.data()); 3415} 3416 3417void SelectionDAGLegalize::PromoteNode(SDNode *Node) { 3418 SmallVector<SDValue, 8> Results; 3419 EVT OVT = Node->getValueType(0); 3420 if (Node->getOpcode() == ISD::UINT_TO_FP || 3421 Node->getOpcode() == ISD::SINT_TO_FP || 3422 Node->getOpcode() == ISD::SETCC) { 3423 OVT = Node->getOperand(0).getValueType(); 3424 } 3425 EVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), OVT); 3426 DebugLoc dl = Node->getDebugLoc(); 3427 SDValue Tmp1, Tmp2, Tmp3; 3428 switch (Node->getOpcode()) { 3429 case ISD::CTTZ: 3430 case ISD::CTTZ_ZERO_UNDEF: 3431 case ISD::CTLZ: 3432 case ISD::CTLZ_ZERO_UNDEF: 3433 case ISD::CTPOP: 3434 // Zero extend the argument. 3435 Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, Node->getOperand(0)); 3436 // Perform the larger operation. For CTPOP and CTTZ_ZERO_UNDEF, this is 3437 // already the correct result. 3438 Tmp1 = DAG.getNode(Node->getOpcode(), dl, NVT, Tmp1); 3439 if (Node->getOpcode() == ISD::CTTZ) { 3440 // FIXME: This should set a bit in the zero extended value instead. 3441 Tmp2 = DAG.getSetCC(dl, TLI.getSetCCResultType(NVT), 3442 Tmp1, DAG.getConstant(NVT.getSizeInBits(), NVT), 3443 ISD::SETEQ); 3444 Tmp1 = DAG.getNode(ISD::SELECT, dl, NVT, Tmp2, 3445 DAG.getConstant(OVT.getSizeInBits(), NVT), Tmp1); 3446 } else if (Node->getOpcode() == ISD::CTLZ || 3447 Node->getOpcode() == ISD::CTLZ_ZERO_UNDEF) { 3448 // Tmp1 = Tmp1 - (sizeinbits(NVT) - sizeinbits(Old VT)) 3449 Tmp1 = DAG.getNode(ISD::SUB, dl, NVT, Tmp1, 3450 DAG.getConstant(NVT.getSizeInBits() - 3451 OVT.getSizeInBits(), NVT)); 3452 } 3453 Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, OVT, Tmp1)); 3454 break; 3455 case ISD::BSWAP: { 3456 unsigned DiffBits = NVT.getSizeInBits() - OVT.getSizeInBits(); 3457 Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, Node->getOperand(0)); 3458 Tmp1 = DAG.getNode(ISD::BSWAP, dl, NVT, Tmp1); 3459 Tmp1 = DAG.getNode(ISD::SRL, dl, NVT, Tmp1, 3460 DAG.getConstant(DiffBits, TLI.getShiftAmountTy(NVT))); 3461 Results.push_back(Tmp1); 3462 break; 3463 } 3464 case ISD::FP_TO_UINT: 3465 case ISD::FP_TO_SINT: 3466 Tmp1 = PromoteLegalFP_TO_INT(Node->getOperand(0), Node->getValueType(0), 3467 Node->getOpcode() == ISD::FP_TO_SINT, dl); 3468 Results.push_back(Tmp1); 3469 break; 3470 case ISD::UINT_TO_FP: 3471 case ISD::SINT_TO_FP: 3472 Tmp1 = PromoteLegalINT_TO_FP(Node->getOperand(0), Node->getValueType(0), 3473 Node->getOpcode() == ISD::SINT_TO_FP, dl); 3474 Results.push_back(Tmp1); 3475 break; 3476 case ISD::AND: 3477 case ISD::OR: 3478 case ISD::XOR: { 3479 unsigned ExtOp, TruncOp; 3480 if (OVT.isVector()) { 3481 ExtOp = ISD::BITCAST; 3482 TruncOp = ISD::BITCAST; 3483 } else { 3484 assert(OVT.isInteger() && "Cannot promote logic operation"); 3485 ExtOp = ISD::ANY_EXTEND; 3486 TruncOp = ISD::TRUNCATE; 3487 } 3488 // Promote each of the values to the new type. 3489 Tmp1 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(0)); 3490 Tmp2 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(1)); 3491 // Perform the larger operation, then convert back 3492 Tmp1 = DAG.getNode(Node->getOpcode(), dl, NVT, Tmp1, Tmp2); 3493 Results.push_back(DAG.getNode(TruncOp, dl, OVT, Tmp1)); 3494 break; 3495 } 3496 case ISD::SELECT: { 3497 unsigned ExtOp, TruncOp; 3498 if (Node->getValueType(0).isVector()) { 3499 ExtOp = ISD::BITCAST; 3500 TruncOp = ISD::BITCAST; 3501 } else if (Node->getValueType(0).isInteger()) { 3502 ExtOp = ISD::ANY_EXTEND; 3503 TruncOp = ISD::TRUNCATE; 3504 } else { 3505 ExtOp = ISD::FP_EXTEND; 3506 TruncOp = ISD::FP_ROUND; 3507 } 3508 Tmp1 = Node->getOperand(0); 3509 // Promote each of the values to the new type. 3510 Tmp2 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(1)); 3511 Tmp3 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(2)); 3512 // Perform the larger operation, then round down. 3513 Tmp1 = DAG.getNode(ISD::SELECT, dl, NVT, Tmp1, Tmp2, Tmp3); 3514 if (TruncOp != ISD::FP_ROUND) 3515 Tmp1 = DAG.getNode(TruncOp, dl, Node->getValueType(0), Tmp1); 3516 else 3517 Tmp1 = DAG.getNode(TruncOp, dl, Node->getValueType(0), Tmp1, 3518 DAG.getIntPtrConstant(0)); 3519 Results.push_back(Tmp1); 3520 break; 3521 } 3522 case ISD::VECTOR_SHUFFLE: { 3523 SmallVector<int, 8> Mask; 3524 cast<ShuffleVectorSDNode>(Node)->getMask(Mask); 3525 3526 // Cast the two input vectors. 3527 Tmp1 = DAG.getNode(ISD::BITCAST, dl, NVT, Node->getOperand(0)); 3528 Tmp2 = DAG.getNode(ISD::BITCAST, dl, NVT, Node->getOperand(1)); 3529 3530 // Convert the shuffle mask to the right # elements. 3531 Tmp1 = ShuffleWithNarrowerEltType(NVT, OVT, dl, Tmp1, Tmp2, Mask); 3532 Tmp1 = DAG.getNode(ISD::BITCAST, dl, OVT, Tmp1); 3533 Results.push_back(Tmp1); 3534 break; 3535 } 3536 case ISD::SETCC: { 3537 unsigned ExtOp = ISD::FP_EXTEND; 3538 if (NVT.isInteger()) { 3539 ISD::CondCode CCCode = 3540 cast<CondCodeSDNode>(Node->getOperand(2))->get(); 3541 ExtOp = isSignedIntSetCC(CCCode) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 3542 } 3543 Tmp1 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(0)); 3544 Tmp2 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(1)); 3545 Results.push_back(DAG.getNode(ISD::SETCC, dl, Node->getValueType(0), 3546 Tmp1, Tmp2, Node->getOperand(2))); 3547 break; 3548 } 3549 } 3550 3551 // Replace the original node with the legalized result. 3552 if (!Results.empty()) 3553 ReplaceNode(Node, Results.data()); 3554} 3555 3556// SelectionDAG::Legalize - This is the entry point for the file. 3557// 3558void SelectionDAG::Legalize() { 3559 /// run - This is the main entry point to this class. 3560 /// 3561 SelectionDAGLegalize(*this).LegalizeDAG(); 3562} 3563