LegalizeDAG.cpp revision ec52aaa12f57896fc806e849fa21a61603050ac4
1//===-- LegalizeDAG.cpp - Implement SelectionDAG::Legalize ----------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the SelectionDAG::Legalize method. 11// 12//===----------------------------------------------------------------------===// 13 14#include "llvm/Analysis/DebugInfo.h" 15#include "llvm/CodeGen/Analysis.h" 16#include "llvm/CodeGen/MachineFunction.h" 17#include "llvm/CodeGen/MachineJumpTableInfo.h" 18#include "llvm/CodeGen/SelectionDAG.h" 19#include "llvm/Target/TargetFrameLowering.h" 20#include "llvm/Target/TargetLowering.h" 21#include "llvm/Target/TargetData.h" 22#include "llvm/Target/TargetMachine.h" 23#include "llvm/CallingConv.h" 24#include "llvm/Constants.h" 25#include "llvm/DerivedTypes.h" 26#include "llvm/LLVMContext.h" 27#include "llvm/Support/Debug.h" 28#include "llvm/Support/ErrorHandling.h" 29#include "llvm/Support/MathExtras.h" 30#include "llvm/Support/raw_ostream.h" 31#include "llvm/ADT/DenseMap.h" 32#include "llvm/ADT/SmallVector.h" 33#include "llvm/ADT/SmallPtrSet.h" 34using namespace llvm; 35 36//===----------------------------------------------------------------------===// 37/// SelectionDAGLegalize - This takes an arbitrary SelectionDAG as input and 38/// hacks on it until the target machine can handle it. This involves 39/// eliminating value sizes the machine cannot handle (promoting small sizes to 40/// large sizes or splitting up large values into small values) as well as 41/// eliminating operations the machine cannot handle. 42/// 43/// This code also does a small amount of optimization and recognition of idioms 44/// as part of its processing. For example, if a target does not support a 45/// 'setcc' instruction efficiently, but does support 'brcc' instruction, this 46/// will attempt merge setcc and brc instructions into brcc's. 47/// 48namespace { 49class SelectionDAGLegalize : public SelectionDAG::DAGUpdateListener { 50 const TargetMachine &TM; 51 const TargetLowering &TLI; 52 SelectionDAG &DAG; 53 54 /// LegalizePosition - The iterator for walking through the node list. 55 SelectionDAG::allnodes_iterator LegalizePosition; 56 57 /// LegalizedNodes - The set of nodes which have already been legalized. 58 SmallPtrSet<SDNode *, 16> LegalizedNodes; 59 60 // Libcall insertion helpers. 61 62public: 63 explicit SelectionDAGLegalize(SelectionDAG &DAG); 64 65 void LegalizeDAG(); 66 67private: 68 /// LegalizeOp - Legalizes the given operation. 69 void LegalizeOp(SDNode *Node); 70 71 SDValue OptimizeFloatStore(StoreSDNode *ST); 72 73 /// PerformInsertVectorEltInMemory - Some target cannot handle a variable 74 /// insertion index for the INSERT_VECTOR_ELT instruction. In this case, it 75 /// is necessary to spill the vector being inserted into to memory, perform 76 /// the insert there, and then read the result back. 77 SDValue PerformInsertVectorEltInMemory(SDValue Vec, SDValue Val, 78 SDValue Idx, DebugLoc dl); 79 SDValue ExpandINSERT_VECTOR_ELT(SDValue Vec, SDValue Val, 80 SDValue Idx, DebugLoc dl); 81 82 /// ShuffleWithNarrowerEltType - Return a vector shuffle operation which 83 /// performs the same shuffe in terms of order or result bytes, but on a type 84 /// whose vector element type is narrower than the original shuffle type. 85 /// e.g. <v4i32> <0, 1, 0, 1> -> v8i16 <0, 1, 2, 3, 0, 1, 2, 3> 86 SDValue ShuffleWithNarrowerEltType(EVT NVT, EVT VT, DebugLoc dl, 87 SDValue N1, SDValue N2, 88 ArrayRef<int> Mask) const; 89 90 void LegalizeSetCCCondCode(EVT VT, SDValue &LHS, SDValue &RHS, SDValue &CC, 91 DebugLoc dl); 92 93 SDValue ExpandLibCall(RTLIB::Libcall LC, SDNode *Node, bool isSigned); 94 SDValue ExpandLibCall(RTLIB::Libcall LC, EVT RetVT, const SDValue *Ops, 95 unsigned NumOps, bool isSigned, DebugLoc dl); 96 97 std::pair<SDValue, SDValue> ExpandChainLibCall(RTLIB::Libcall LC, 98 SDNode *Node, bool isSigned); 99 SDValue ExpandFPLibCall(SDNode *Node, RTLIB::Libcall Call_F32, 100 RTLIB::Libcall Call_F64, RTLIB::Libcall Call_F80, 101 RTLIB::Libcall Call_PPCF128); 102 SDValue ExpandIntLibCall(SDNode *Node, bool isSigned, 103 RTLIB::Libcall Call_I8, 104 RTLIB::Libcall Call_I16, 105 RTLIB::Libcall Call_I32, 106 RTLIB::Libcall Call_I64, 107 RTLIB::Libcall Call_I128); 108 void ExpandDivRemLibCall(SDNode *Node, SmallVectorImpl<SDValue> &Results); 109 110 SDValue EmitStackConvert(SDValue SrcOp, EVT SlotVT, EVT DestVT, DebugLoc dl); 111 SDValue ExpandBUILD_VECTOR(SDNode *Node); 112 SDValue ExpandSCALAR_TO_VECTOR(SDNode *Node); 113 void ExpandDYNAMIC_STACKALLOC(SDNode *Node, 114 SmallVectorImpl<SDValue> &Results); 115 SDValue ExpandFCOPYSIGN(SDNode *Node); 116 SDValue ExpandLegalINT_TO_FP(bool isSigned, SDValue LegalOp, EVT DestVT, 117 DebugLoc dl); 118 SDValue PromoteLegalINT_TO_FP(SDValue LegalOp, EVT DestVT, bool isSigned, 119 DebugLoc dl); 120 SDValue PromoteLegalFP_TO_INT(SDValue LegalOp, EVT DestVT, bool isSigned, 121 DebugLoc dl); 122 123 SDValue ExpandBSWAP(SDValue Op, DebugLoc dl); 124 SDValue ExpandBitCount(unsigned Opc, SDValue Op, DebugLoc dl); 125 126 SDValue ExpandExtractFromVectorThroughStack(SDValue Op); 127 SDValue ExpandInsertToVectorThroughStack(SDValue Op); 128 SDValue ExpandVectorBuildThroughStack(SDNode* Node); 129 130 SDValue ExpandConstantFP(ConstantFPSDNode *CFP, bool UseCP); 131 132 std::pair<SDValue, SDValue> ExpandAtomic(SDNode *Node); 133 134 void ExpandNode(SDNode *Node); 135 void PromoteNode(SDNode *Node); 136 137 void ForgetNode(SDNode *N) { 138 LegalizedNodes.erase(N); 139 if (LegalizePosition == SelectionDAG::allnodes_iterator(N)) 140 ++LegalizePosition; 141 } 142 143public: 144 // DAGUpdateListener implementation. 145 virtual void NodeDeleted(SDNode *N, SDNode *E) { 146 ForgetNode(N); 147 } 148 virtual void NodeUpdated(SDNode *N) {} 149 150 // Node replacement helpers 151 void ReplacedNode(SDNode *N) { 152 if (N->use_empty()) { 153 DAG.RemoveDeadNode(N, this); 154 } else { 155 ForgetNode(N); 156 } 157 } 158 void ReplaceNode(SDNode *Old, SDNode *New) { 159 DAG.ReplaceAllUsesWith(Old, New, this); 160 ReplacedNode(Old); 161 } 162 void ReplaceNode(SDValue Old, SDValue New) { 163 DAG.ReplaceAllUsesWith(Old, New, this); 164 ReplacedNode(Old.getNode()); 165 } 166 void ReplaceNode(SDNode *Old, const SDValue *New) { 167 DAG.ReplaceAllUsesWith(Old, New, this); 168 ReplacedNode(Old); 169 } 170}; 171} 172 173/// ShuffleWithNarrowerEltType - Return a vector shuffle operation which 174/// performs the same shuffe in terms of order or result bytes, but on a type 175/// whose vector element type is narrower than the original shuffle type. 176/// e.g. <v4i32> <0, 1, 0, 1> -> v8i16 <0, 1, 2, 3, 0, 1, 2, 3> 177SDValue 178SelectionDAGLegalize::ShuffleWithNarrowerEltType(EVT NVT, EVT VT, DebugLoc dl, 179 SDValue N1, SDValue N2, 180 ArrayRef<int> Mask) const { 181 unsigned NumMaskElts = VT.getVectorNumElements(); 182 unsigned NumDestElts = NVT.getVectorNumElements(); 183 unsigned NumEltsGrowth = NumDestElts / NumMaskElts; 184 185 assert(NumEltsGrowth && "Cannot promote to vector type with fewer elts!"); 186 187 if (NumEltsGrowth == 1) 188 return DAG.getVectorShuffle(NVT, dl, N1, N2, &Mask[0]); 189 190 SmallVector<int, 8> NewMask; 191 for (unsigned i = 0; i != NumMaskElts; ++i) { 192 int Idx = Mask[i]; 193 for (unsigned j = 0; j != NumEltsGrowth; ++j) { 194 if (Idx < 0) 195 NewMask.push_back(-1); 196 else 197 NewMask.push_back(Idx * NumEltsGrowth + j); 198 } 199 } 200 assert(NewMask.size() == NumDestElts && "Non-integer NumEltsGrowth?"); 201 assert(TLI.isShuffleMaskLegal(NewMask, NVT) && "Shuffle not legal?"); 202 return DAG.getVectorShuffle(NVT, dl, N1, N2, &NewMask[0]); 203} 204 205SelectionDAGLegalize::SelectionDAGLegalize(SelectionDAG &dag) 206 : TM(dag.getTarget()), TLI(dag.getTargetLoweringInfo()), 207 DAG(dag) { 208} 209 210void SelectionDAGLegalize::LegalizeDAG() { 211 DAG.AssignTopologicalOrder(); 212 213 // Visit all the nodes. We start in topological order, so that we see 214 // nodes with their original operands intact. Legalization can produce 215 // new nodes which may themselves need to be legalized. Iterate until all 216 // nodes have been legalized. 217 for (;;) { 218 bool AnyLegalized = false; 219 for (LegalizePosition = DAG.allnodes_end(); 220 LegalizePosition != DAG.allnodes_begin(); ) { 221 --LegalizePosition; 222 223 SDNode *N = LegalizePosition; 224 if (LegalizedNodes.insert(N)) { 225 AnyLegalized = true; 226 LegalizeOp(N); 227 } 228 } 229 if (!AnyLegalized) 230 break; 231 232 } 233 234 // Remove dead nodes now. 235 DAG.RemoveDeadNodes(); 236} 237 238/// ExpandConstantFP - Expands the ConstantFP node to an integer constant or 239/// a load from the constant pool. 240SDValue 241SelectionDAGLegalize::ExpandConstantFP(ConstantFPSDNode *CFP, bool UseCP) { 242 bool Extend = false; 243 DebugLoc dl = CFP->getDebugLoc(); 244 245 // If a FP immediate is precise when represented as a float and if the 246 // target can do an extending load from float to double, we put it into 247 // the constant pool as a float, even if it's is statically typed as a 248 // double. This shrinks FP constants and canonicalizes them for targets where 249 // an FP extending load is the same cost as a normal load (such as on the x87 250 // fp stack or PPC FP unit). 251 EVT VT = CFP->getValueType(0); 252 ConstantFP *LLVMC = const_cast<ConstantFP*>(CFP->getConstantFPValue()); 253 if (!UseCP) { 254 assert((VT == MVT::f64 || VT == MVT::f32) && "Invalid type expansion"); 255 return DAG.getConstant(LLVMC->getValueAPF().bitcastToAPInt(), 256 (VT == MVT::f64) ? MVT::i64 : MVT::i32); 257 } 258 259 EVT OrigVT = VT; 260 EVT SVT = VT; 261 while (SVT != MVT::f32) { 262 SVT = (MVT::SimpleValueType)(SVT.getSimpleVT().SimpleTy - 1); 263 if (ConstantFPSDNode::isValueValidForType(SVT, CFP->getValueAPF()) && 264 // Only do this if the target has a native EXTLOAD instruction from 265 // smaller type. 266 TLI.isLoadExtLegal(ISD::EXTLOAD, SVT) && 267 TLI.ShouldShrinkFPConstant(OrigVT)) { 268 Type *SType = SVT.getTypeForEVT(*DAG.getContext()); 269 LLVMC = cast<ConstantFP>(ConstantExpr::getFPTrunc(LLVMC, SType)); 270 VT = SVT; 271 Extend = true; 272 } 273 } 274 275 SDValue CPIdx = DAG.getConstantPool(LLVMC, TLI.getPointerTy()); 276 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment(); 277 if (Extend) { 278 SDValue Result = 279 DAG.getExtLoad(ISD::EXTLOAD, dl, OrigVT, 280 DAG.getEntryNode(), 281 CPIdx, MachinePointerInfo::getConstantPool(), 282 VT, false, false, Alignment); 283 return Result; 284 } 285 SDValue Result = 286 DAG.getLoad(OrigVT, dl, DAG.getEntryNode(), CPIdx, 287 MachinePointerInfo::getConstantPool(), false, false, false, 288 Alignment); 289 return Result; 290} 291 292/// ExpandUnalignedStore - Expands an unaligned store to 2 half-size stores. 293static void ExpandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG, 294 const TargetLowering &TLI, 295 SelectionDAGLegalize *DAGLegalize) { 296 assert(ST->getAddressingMode() == ISD::UNINDEXED && 297 "unaligned indexed stores not implemented!"); 298 SDValue Chain = ST->getChain(); 299 SDValue Ptr = ST->getBasePtr(); 300 SDValue Val = ST->getValue(); 301 EVT VT = Val.getValueType(); 302 int Alignment = ST->getAlignment(); 303 DebugLoc dl = ST->getDebugLoc(); 304 if (ST->getMemoryVT().isFloatingPoint() || 305 ST->getMemoryVT().isVector()) { 306 EVT intVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits()); 307 if (TLI.isTypeLegal(intVT)) { 308 // Expand to a bitconvert of the value to the integer type of the 309 // same size, then a (misaligned) int store. 310 // FIXME: Does not handle truncating floating point stores! 311 SDValue Result = DAG.getNode(ISD::BITCAST, dl, intVT, Val); 312 Result = DAG.getStore(Chain, dl, Result, Ptr, ST->getPointerInfo(), 313 ST->isVolatile(), ST->isNonTemporal(), Alignment); 314 DAGLegalize->ReplaceNode(SDValue(ST, 0), Result); 315 return; 316 } 317 // Do a (aligned) store to a stack slot, then copy from the stack slot 318 // to the final destination using (unaligned) integer loads and stores. 319 EVT StoredVT = ST->getMemoryVT(); 320 EVT RegVT = 321 TLI.getRegisterType(*DAG.getContext(), 322 EVT::getIntegerVT(*DAG.getContext(), 323 StoredVT.getSizeInBits())); 324 unsigned StoredBytes = StoredVT.getSizeInBits() / 8; 325 unsigned RegBytes = RegVT.getSizeInBits() / 8; 326 unsigned NumRegs = (StoredBytes + RegBytes - 1) / RegBytes; 327 328 // Make sure the stack slot is also aligned for the register type. 329 SDValue StackPtr = DAG.CreateStackTemporary(StoredVT, RegVT); 330 331 // Perform the original store, only redirected to the stack slot. 332 SDValue Store = DAG.getTruncStore(Chain, dl, 333 Val, StackPtr, MachinePointerInfo(), 334 StoredVT, false, false, 0); 335 SDValue Increment = DAG.getConstant(RegBytes, TLI.getPointerTy()); 336 SmallVector<SDValue, 8> Stores; 337 unsigned Offset = 0; 338 339 // Do all but one copies using the full register width. 340 for (unsigned i = 1; i < NumRegs; i++) { 341 // Load one integer register's worth from the stack slot. 342 SDValue Load = DAG.getLoad(RegVT, dl, Store, StackPtr, 343 MachinePointerInfo(), 344 false, false, false, 0); 345 // Store it to the final location. Remember the store. 346 Stores.push_back(DAG.getStore(Load.getValue(1), dl, Load, Ptr, 347 ST->getPointerInfo().getWithOffset(Offset), 348 ST->isVolatile(), ST->isNonTemporal(), 349 MinAlign(ST->getAlignment(), Offset))); 350 // Increment the pointers. 351 Offset += RegBytes; 352 StackPtr = DAG.getNode(ISD::ADD, dl, StackPtr.getValueType(), StackPtr, 353 Increment); 354 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 355 } 356 357 // The last store may be partial. Do a truncating store. On big-endian 358 // machines this requires an extending load from the stack slot to ensure 359 // that the bits are in the right place. 360 EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), 361 8 * (StoredBytes - Offset)); 362 363 // Load from the stack slot. 364 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Store, StackPtr, 365 MachinePointerInfo(), 366 MemVT, false, false, 0); 367 368 Stores.push_back(DAG.getTruncStore(Load.getValue(1), dl, Load, Ptr, 369 ST->getPointerInfo() 370 .getWithOffset(Offset), 371 MemVT, ST->isVolatile(), 372 ST->isNonTemporal(), 373 MinAlign(ST->getAlignment(), Offset))); 374 // The order of the stores doesn't matter - say it with a TokenFactor. 375 SDValue Result = 376 DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Stores[0], 377 Stores.size()); 378 DAGLegalize->ReplaceNode(SDValue(ST, 0), Result); 379 return; 380 } 381 assert(ST->getMemoryVT().isInteger() && 382 !ST->getMemoryVT().isVector() && 383 "Unaligned store of unknown type."); 384 // Get the half-size VT 385 EVT NewStoredVT = ST->getMemoryVT().getHalfSizedIntegerVT(*DAG.getContext()); 386 int NumBits = NewStoredVT.getSizeInBits(); 387 int IncrementSize = NumBits / 8; 388 389 // Divide the stored value in two parts. 390 SDValue ShiftAmount = DAG.getConstant(NumBits, 391 TLI.getShiftAmountTy(Val.getValueType())); 392 SDValue Lo = Val; 393 SDValue Hi = DAG.getNode(ISD::SRL, dl, VT, Val, ShiftAmount); 394 395 // Store the two parts 396 SDValue Store1, Store2; 397 Store1 = DAG.getTruncStore(Chain, dl, TLI.isLittleEndian()?Lo:Hi, Ptr, 398 ST->getPointerInfo(), NewStoredVT, 399 ST->isVolatile(), ST->isNonTemporal(), Alignment); 400 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, 401 DAG.getConstant(IncrementSize, TLI.getPointerTy())); 402 Alignment = MinAlign(Alignment, IncrementSize); 403 Store2 = DAG.getTruncStore(Chain, dl, TLI.isLittleEndian()?Hi:Lo, Ptr, 404 ST->getPointerInfo().getWithOffset(IncrementSize), 405 NewStoredVT, ST->isVolatile(), ST->isNonTemporal(), 406 Alignment); 407 408 SDValue Result = 409 DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Store1, Store2); 410 DAGLegalize->ReplaceNode(SDValue(ST, 0), Result); 411} 412 413/// ExpandUnalignedLoad - Expands an unaligned load to 2 half-size loads. 414static void 415ExpandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG, 416 const TargetLowering &TLI, 417 SDValue &ValResult, SDValue &ChainResult) { 418 assert(LD->getAddressingMode() == ISD::UNINDEXED && 419 "unaligned indexed loads not implemented!"); 420 SDValue Chain = LD->getChain(); 421 SDValue Ptr = LD->getBasePtr(); 422 EVT VT = LD->getValueType(0); 423 EVT LoadedVT = LD->getMemoryVT(); 424 DebugLoc dl = LD->getDebugLoc(); 425 if (VT.isFloatingPoint() || VT.isVector()) { 426 EVT intVT = EVT::getIntegerVT(*DAG.getContext(), LoadedVT.getSizeInBits()); 427 if (TLI.isTypeLegal(intVT)) { 428 // Expand to a (misaligned) integer load of the same size, 429 // then bitconvert to floating point or vector. 430 SDValue newLoad = DAG.getLoad(intVT, dl, Chain, Ptr, LD->getPointerInfo(), 431 LD->isVolatile(), 432 LD->isNonTemporal(), 433 LD->isInvariant(), LD->getAlignment()); 434 SDValue Result = DAG.getNode(ISD::BITCAST, dl, LoadedVT, newLoad); 435 if (VT.isFloatingPoint() && LoadedVT != VT) 436 Result = DAG.getNode(ISD::FP_EXTEND, dl, VT, Result); 437 438 ValResult = Result; 439 ChainResult = Chain; 440 return; 441 } 442 443 // Copy the value to a (aligned) stack slot using (unaligned) integer 444 // loads and stores, then do a (aligned) load from the stack slot. 445 EVT RegVT = TLI.getRegisterType(*DAG.getContext(), intVT); 446 unsigned LoadedBytes = LoadedVT.getSizeInBits() / 8; 447 unsigned RegBytes = RegVT.getSizeInBits() / 8; 448 unsigned NumRegs = (LoadedBytes + RegBytes - 1) / RegBytes; 449 450 // Make sure the stack slot is also aligned for the register type. 451 SDValue StackBase = DAG.CreateStackTemporary(LoadedVT, RegVT); 452 453 SDValue Increment = DAG.getConstant(RegBytes, TLI.getPointerTy()); 454 SmallVector<SDValue, 8> Stores; 455 SDValue StackPtr = StackBase; 456 unsigned Offset = 0; 457 458 // Do all but one copies using the full register width. 459 for (unsigned i = 1; i < NumRegs; i++) { 460 // Load one integer register's worth from the original location. 461 SDValue Load = DAG.getLoad(RegVT, dl, Chain, Ptr, 462 LD->getPointerInfo().getWithOffset(Offset), 463 LD->isVolatile(), LD->isNonTemporal(), 464 LD->isInvariant(), 465 MinAlign(LD->getAlignment(), Offset)); 466 // Follow the load with a store to the stack slot. Remember the store. 467 Stores.push_back(DAG.getStore(Load.getValue(1), dl, Load, StackPtr, 468 MachinePointerInfo(), false, false, 0)); 469 // Increment the pointers. 470 Offset += RegBytes; 471 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 472 StackPtr = DAG.getNode(ISD::ADD, dl, StackPtr.getValueType(), StackPtr, 473 Increment); 474 } 475 476 // The last copy may be partial. Do an extending load. 477 EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), 478 8 * (LoadedBytes - Offset)); 479 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Chain, Ptr, 480 LD->getPointerInfo().getWithOffset(Offset), 481 MemVT, LD->isVolatile(), 482 LD->isNonTemporal(), 483 MinAlign(LD->getAlignment(), Offset)); 484 // Follow the load with a store to the stack slot. Remember the store. 485 // On big-endian machines this requires a truncating store to ensure 486 // that the bits end up in the right place. 487 Stores.push_back(DAG.getTruncStore(Load.getValue(1), dl, Load, StackPtr, 488 MachinePointerInfo(), MemVT, 489 false, false, 0)); 490 491 // The order of the stores doesn't matter - say it with a TokenFactor. 492 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Stores[0], 493 Stores.size()); 494 495 // Finally, perform the original load only redirected to the stack slot. 496 Load = DAG.getExtLoad(LD->getExtensionType(), dl, VT, TF, StackBase, 497 MachinePointerInfo(), LoadedVT, false, false, 0); 498 499 // Callers expect a MERGE_VALUES node. 500 ValResult = Load; 501 ChainResult = TF; 502 return; 503 } 504 assert(LoadedVT.isInteger() && !LoadedVT.isVector() && 505 "Unaligned load of unsupported type."); 506 507 // Compute the new VT that is half the size of the old one. This is an 508 // integer MVT. 509 unsigned NumBits = LoadedVT.getSizeInBits(); 510 EVT NewLoadedVT; 511 NewLoadedVT = EVT::getIntegerVT(*DAG.getContext(), NumBits/2); 512 NumBits >>= 1; 513 514 unsigned Alignment = LD->getAlignment(); 515 unsigned IncrementSize = NumBits / 8; 516 ISD::LoadExtType HiExtType = LD->getExtensionType(); 517 518 // If the original load is NON_EXTLOAD, the hi part load must be ZEXTLOAD. 519 if (HiExtType == ISD::NON_EXTLOAD) 520 HiExtType = ISD::ZEXTLOAD; 521 522 // Load the value in two parts 523 SDValue Lo, Hi; 524 if (TLI.isLittleEndian()) { 525 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, LD->getPointerInfo(), 526 NewLoadedVT, LD->isVolatile(), 527 LD->isNonTemporal(), Alignment); 528 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, 529 DAG.getConstant(IncrementSize, TLI.getPointerTy())); 530 Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, 531 LD->getPointerInfo().getWithOffset(IncrementSize), 532 NewLoadedVT, LD->isVolatile(), 533 LD->isNonTemporal(), MinAlign(Alignment,IncrementSize)); 534 } else { 535 Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, LD->getPointerInfo(), 536 NewLoadedVT, LD->isVolatile(), 537 LD->isNonTemporal(), Alignment); 538 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, 539 DAG.getConstant(IncrementSize, TLI.getPointerTy())); 540 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, 541 LD->getPointerInfo().getWithOffset(IncrementSize), 542 NewLoadedVT, LD->isVolatile(), 543 LD->isNonTemporal(), MinAlign(Alignment,IncrementSize)); 544 } 545 546 // aggregate the two parts 547 SDValue ShiftAmount = DAG.getConstant(NumBits, 548 TLI.getShiftAmountTy(Hi.getValueType())); 549 SDValue Result = DAG.getNode(ISD::SHL, dl, VT, Hi, ShiftAmount); 550 Result = DAG.getNode(ISD::OR, dl, VT, Result, Lo); 551 552 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1), 553 Hi.getValue(1)); 554 555 ValResult = Result; 556 ChainResult = TF; 557} 558 559/// PerformInsertVectorEltInMemory - Some target cannot handle a variable 560/// insertion index for the INSERT_VECTOR_ELT instruction. In this case, it 561/// is necessary to spill the vector being inserted into to memory, perform 562/// the insert there, and then read the result back. 563SDValue SelectionDAGLegalize:: 564PerformInsertVectorEltInMemory(SDValue Vec, SDValue Val, SDValue Idx, 565 DebugLoc dl) { 566 SDValue Tmp1 = Vec; 567 SDValue Tmp2 = Val; 568 SDValue Tmp3 = Idx; 569 570 // If the target doesn't support this, we have to spill the input vector 571 // to a temporary stack slot, update the element, then reload it. This is 572 // badness. We could also load the value into a vector register (either 573 // with a "move to register" or "extload into register" instruction, then 574 // permute it into place, if the idx is a constant and if the idx is 575 // supported by the target. 576 EVT VT = Tmp1.getValueType(); 577 EVT EltVT = VT.getVectorElementType(); 578 EVT IdxVT = Tmp3.getValueType(); 579 EVT PtrVT = TLI.getPointerTy(); 580 SDValue StackPtr = DAG.CreateStackTemporary(VT); 581 582 int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex(); 583 584 // Store the vector. 585 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, Tmp1, StackPtr, 586 MachinePointerInfo::getFixedStack(SPFI), 587 false, false, 0); 588 589 // Truncate or zero extend offset to target pointer type. 590 unsigned CastOpc = IdxVT.bitsGT(PtrVT) ? ISD::TRUNCATE : ISD::ZERO_EXTEND; 591 Tmp3 = DAG.getNode(CastOpc, dl, PtrVT, Tmp3); 592 // Add the offset to the index. 593 unsigned EltSize = EltVT.getSizeInBits()/8; 594 Tmp3 = DAG.getNode(ISD::MUL, dl, IdxVT, Tmp3,DAG.getConstant(EltSize, IdxVT)); 595 SDValue StackPtr2 = DAG.getNode(ISD::ADD, dl, IdxVT, Tmp3, StackPtr); 596 // Store the scalar value. 597 Ch = DAG.getTruncStore(Ch, dl, Tmp2, StackPtr2, MachinePointerInfo(), EltVT, 598 false, false, 0); 599 // Load the updated vector. 600 return DAG.getLoad(VT, dl, Ch, StackPtr, 601 MachinePointerInfo::getFixedStack(SPFI), false, false, 602 false, 0); 603} 604 605 606SDValue SelectionDAGLegalize:: 607ExpandINSERT_VECTOR_ELT(SDValue Vec, SDValue Val, SDValue Idx, DebugLoc dl) { 608 if (ConstantSDNode *InsertPos = dyn_cast<ConstantSDNode>(Idx)) { 609 // SCALAR_TO_VECTOR requires that the type of the value being inserted 610 // match the element type of the vector being created, except for 611 // integers in which case the inserted value can be over width. 612 EVT EltVT = Vec.getValueType().getVectorElementType(); 613 if (Val.getValueType() == EltVT || 614 (EltVT.isInteger() && Val.getValueType().bitsGE(EltVT))) { 615 SDValue ScVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 616 Vec.getValueType(), Val); 617 618 unsigned NumElts = Vec.getValueType().getVectorNumElements(); 619 // We generate a shuffle of InVec and ScVec, so the shuffle mask 620 // should be 0,1,2,3,4,5... with the appropriate element replaced with 621 // elt 0 of the RHS. 622 SmallVector<int, 8> ShufOps; 623 for (unsigned i = 0; i != NumElts; ++i) 624 ShufOps.push_back(i != InsertPos->getZExtValue() ? i : NumElts); 625 626 return DAG.getVectorShuffle(Vec.getValueType(), dl, Vec, ScVec, 627 &ShufOps[0]); 628 } 629 } 630 return PerformInsertVectorEltInMemory(Vec, Val, Idx, dl); 631} 632 633SDValue SelectionDAGLegalize::OptimizeFloatStore(StoreSDNode* ST) { 634 // Turn 'store float 1.0, Ptr' -> 'store int 0x12345678, Ptr' 635 // FIXME: We shouldn't do this for TargetConstantFP's. 636 // FIXME: move this to the DAG Combiner! Note that we can't regress due 637 // to phase ordering between legalized code and the dag combiner. This 638 // probably means that we need to integrate dag combiner and legalizer 639 // together. 640 // We generally can't do this one for long doubles. 641 SDValue Tmp1 = ST->getChain(); 642 SDValue Tmp2 = ST->getBasePtr(); 643 SDValue Tmp3; 644 unsigned Alignment = ST->getAlignment(); 645 bool isVolatile = ST->isVolatile(); 646 bool isNonTemporal = ST->isNonTemporal(); 647 DebugLoc dl = ST->getDebugLoc(); 648 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(ST->getValue())) { 649 if (CFP->getValueType(0) == MVT::f32 && 650 TLI.isTypeLegal(MVT::i32)) { 651 Tmp3 = DAG.getConstant(CFP->getValueAPF(). 652 bitcastToAPInt().zextOrTrunc(32), 653 MVT::i32); 654 return DAG.getStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(), 655 isVolatile, isNonTemporal, Alignment); 656 } 657 658 if (CFP->getValueType(0) == MVT::f64) { 659 // If this target supports 64-bit registers, do a single 64-bit store. 660 if (TLI.isTypeLegal(MVT::i64)) { 661 Tmp3 = DAG.getConstant(CFP->getValueAPF().bitcastToAPInt(). 662 zextOrTrunc(64), MVT::i64); 663 return DAG.getStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(), 664 isVolatile, isNonTemporal, Alignment); 665 } 666 667 if (TLI.isTypeLegal(MVT::i32) && !ST->isVolatile()) { 668 // Otherwise, if the target supports 32-bit registers, use 2 32-bit 669 // stores. If the target supports neither 32- nor 64-bits, this 670 // xform is certainly not worth it. 671 const APInt &IntVal =CFP->getValueAPF().bitcastToAPInt(); 672 SDValue Lo = DAG.getConstant(IntVal.trunc(32), MVT::i32); 673 SDValue Hi = DAG.getConstant(IntVal.lshr(32).trunc(32), MVT::i32); 674 if (TLI.isBigEndian()) std::swap(Lo, Hi); 675 676 Lo = DAG.getStore(Tmp1, dl, Lo, Tmp2, ST->getPointerInfo(), isVolatile, 677 isNonTemporal, Alignment); 678 Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2, 679 DAG.getIntPtrConstant(4)); 680 Hi = DAG.getStore(Tmp1, dl, Hi, Tmp2, 681 ST->getPointerInfo().getWithOffset(4), 682 isVolatile, isNonTemporal, MinAlign(Alignment, 4U)); 683 684 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo, Hi); 685 } 686 } 687 } 688 return SDValue(0, 0); 689} 690 691/// LegalizeOp - Return a legal replacement for the given operation, with 692/// all legal operands. 693void SelectionDAGLegalize::LegalizeOp(SDNode *Node) { 694 if (Node->getOpcode() == ISD::TargetConstant) // Allow illegal target nodes. 695 return; 696 697 DebugLoc dl = Node->getDebugLoc(); 698 699 for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i) 700 assert(TLI.getTypeAction(*DAG.getContext(), Node->getValueType(i)) == 701 TargetLowering::TypeLegal && 702 "Unexpected illegal type!"); 703 704 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) 705 assert((TLI.getTypeAction(*DAG.getContext(), 706 Node->getOperand(i).getValueType()) == 707 TargetLowering::TypeLegal || 708 Node->getOperand(i).getOpcode() == ISD::TargetConstant) && 709 "Unexpected illegal type!"); 710 711 SDValue Tmp1, Tmp2, Tmp3, Tmp4; 712 bool isCustom = false; 713 714 // Figure out the correct action; the way to query this varies by opcode 715 TargetLowering::LegalizeAction Action = TargetLowering::Legal; 716 bool SimpleFinishLegalizing = true; 717 switch (Node->getOpcode()) { 718 case ISD::INTRINSIC_W_CHAIN: 719 case ISD::INTRINSIC_WO_CHAIN: 720 case ISD::INTRINSIC_VOID: 721 case ISD::VAARG: 722 case ISD::STACKSAVE: 723 Action = TLI.getOperationAction(Node->getOpcode(), MVT::Other); 724 break; 725 case ISD::SINT_TO_FP: 726 case ISD::UINT_TO_FP: 727 case ISD::EXTRACT_VECTOR_ELT: 728 Action = TLI.getOperationAction(Node->getOpcode(), 729 Node->getOperand(0).getValueType()); 730 break; 731 case ISD::FP_ROUND_INREG: 732 case ISD::SIGN_EXTEND_INREG: { 733 EVT InnerType = cast<VTSDNode>(Node->getOperand(1))->getVT(); 734 Action = TLI.getOperationAction(Node->getOpcode(), InnerType); 735 break; 736 } 737 case ISD::ATOMIC_STORE: { 738 Action = TLI.getOperationAction(Node->getOpcode(), 739 Node->getOperand(2).getValueType()); 740 break; 741 } 742 case ISD::SELECT_CC: 743 case ISD::SETCC: 744 case ISD::BR_CC: { 745 unsigned CCOperand = Node->getOpcode() == ISD::SELECT_CC ? 4 : 746 Node->getOpcode() == ISD::SETCC ? 2 : 1; 747 unsigned CompareOperand = Node->getOpcode() == ISD::BR_CC ? 2 : 0; 748 EVT OpVT = Node->getOperand(CompareOperand).getValueType(); 749 ISD::CondCode CCCode = 750 cast<CondCodeSDNode>(Node->getOperand(CCOperand))->get(); 751 Action = TLI.getCondCodeAction(CCCode, OpVT); 752 if (Action == TargetLowering::Legal) { 753 if (Node->getOpcode() == ISD::SELECT_CC) 754 Action = TLI.getOperationAction(Node->getOpcode(), 755 Node->getValueType(0)); 756 else 757 Action = TLI.getOperationAction(Node->getOpcode(), OpVT); 758 } 759 break; 760 } 761 case ISD::LOAD: 762 case ISD::STORE: 763 // FIXME: Model these properly. LOAD and STORE are complicated, and 764 // STORE expects the unlegalized operand in some cases. 765 SimpleFinishLegalizing = false; 766 break; 767 case ISD::CALLSEQ_START: 768 case ISD::CALLSEQ_END: 769 // FIXME: This shouldn't be necessary. These nodes have special properties 770 // dealing with the recursive nature of legalization. Removing this 771 // special case should be done as part of making LegalizeDAG non-recursive. 772 SimpleFinishLegalizing = false; 773 break; 774 case ISD::EXTRACT_ELEMENT: 775 case ISD::FLT_ROUNDS_: 776 case ISD::SADDO: 777 case ISD::SSUBO: 778 case ISD::UADDO: 779 case ISD::USUBO: 780 case ISD::SMULO: 781 case ISD::UMULO: 782 case ISD::FPOWI: 783 case ISD::MERGE_VALUES: 784 case ISD::EH_RETURN: 785 case ISD::FRAME_TO_ARGS_OFFSET: 786 case ISD::EH_SJLJ_SETJMP: 787 case ISD::EH_SJLJ_LONGJMP: 788 // These operations lie about being legal: when they claim to be legal, 789 // they should actually be expanded. 790 Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0)); 791 if (Action == TargetLowering::Legal) 792 Action = TargetLowering::Expand; 793 break; 794 case ISD::INIT_TRAMPOLINE: 795 case ISD::ADJUST_TRAMPOLINE: 796 case ISD::FRAMEADDR: 797 case ISD::RETURNADDR: 798 // These operations lie about being legal: when they claim to be legal, 799 // they should actually be custom-lowered. 800 Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0)); 801 if (Action == TargetLowering::Legal) 802 Action = TargetLowering::Custom; 803 break; 804 default: 805 if (Node->getOpcode() >= ISD::BUILTIN_OP_END) { 806 Action = TargetLowering::Legal; 807 } else { 808 Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0)); 809 } 810 break; 811 } 812 813 if (SimpleFinishLegalizing) { 814 SmallVector<SDValue, 8> Ops; 815 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) 816 Ops.push_back(Node->getOperand(i)); 817 switch (Node->getOpcode()) { 818 default: break; 819 case ISD::SHL: 820 case ISD::SRL: 821 case ISD::SRA: 822 case ISD::ROTL: 823 case ISD::ROTR: 824 // Legalizing shifts/rotates requires adjusting the shift amount 825 // to the appropriate width. 826 if (!Ops[1].getValueType().isVector()) { 827 SDValue SAO = DAG.getShiftAmountOperand(Ops[0].getValueType(), Ops[1]); 828 HandleSDNode Handle(SAO); 829 LegalizeOp(SAO.getNode()); 830 Ops[1] = Handle.getValue(); 831 } 832 break; 833 case ISD::SRL_PARTS: 834 case ISD::SRA_PARTS: 835 case ISD::SHL_PARTS: 836 // Legalizing shifts/rotates requires adjusting the shift amount 837 // to the appropriate width. 838 if (!Ops[2].getValueType().isVector()) { 839 SDValue SAO = DAG.getShiftAmountOperand(Ops[0].getValueType(), Ops[2]); 840 HandleSDNode Handle(SAO); 841 LegalizeOp(SAO.getNode()); 842 Ops[2] = Handle.getValue(); 843 } 844 break; 845 } 846 847 SDNode *NewNode = DAG.UpdateNodeOperands(Node, Ops.data(), Ops.size()); 848 if (NewNode != Node) { 849 DAG.ReplaceAllUsesWith(Node, NewNode, this); 850 for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i) 851 DAG.TransferDbgValues(SDValue(Node, i), SDValue(NewNode, i)); 852 ReplacedNode(Node); 853 Node = NewNode; 854 } 855 switch (Action) { 856 case TargetLowering::Legal: 857 return; 858 case TargetLowering::Custom: 859 // FIXME: The handling for custom lowering with multiple results is 860 // a complete mess. 861 Tmp1 = TLI.LowerOperation(SDValue(Node, 0), DAG); 862 if (Tmp1.getNode()) { 863 SmallVector<SDValue, 8> ResultVals; 864 for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i) { 865 if (e == 1) 866 ResultVals.push_back(Tmp1); 867 else 868 ResultVals.push_back(Tmp1.getValue(i)); 869 } 870 if (Tmp1.getNode() != Node || Tmp1.getResNo() != 0) { 871 DAG.ReplaceAllUsesWith(Node, ResultVals.data(), this); 872 for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i) 873 DAG.TransferDbgValues(SDValue(Node, i), ResultVals[i]); 874 ReplacedNode(Node); 875 } 876 return; 877 } 878 879 // FALL THROUGH 880 case TargetLowering::Expand: 881 ExpandNode(Node); 882 return; 883 case TargetLowering::Promote: 884 PromoteNode(Node); 885 return; 886 } 887 } 888 889 switch (Node->getOpcode()) { 890 default: 891#ifndef NDEBUG 892 dbgs() << "NODE: "; 893 Node->dump( &DAG); 894 dbgs() << "\n"; 895#endif 896 llvm_unreachable("Do not know how to legalize this operator!"); 897 898 case ISD::CALLSEQ_START: 899 case ISD::CALLSEQ_END: 900 break; 901 case ISD::LOAD: { 902 LoadSDNode *LD = cast<LoadSDNode>(Node); 903 Tmp1 = LD->getChain(); // Legalize the chain. 904 Tmp2 = LD->getBasePtr(); // Legalize the base pointer. 905 906 ISD::LoadExtType ExtType = LD->getExtensionType(); 907 if (ExtType == ISD::NON_EXTLOAD) { 908 EVT VT = Node->getValueType(0); 909 Tmp3 = SDValue(Node, 0); 910 Tmp4 = SDValue(Node, 1); 911 912 switch (TLI.getOperationAction(Node->getOpcode(), VT)) { 913 default: llvm_unreachable("This action is not supported yet!"); 914 case TargetLowering::Legal: 915 // If this is an unaligned load and the target doesn't support it, 916 // expand it. 917 if (!TLI.allowsUnalignedMemoryAccesses(LD->getMemoryVT())) { 918 Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext()); 919 unsigned ABIAlignment = TLI.getTargetData()->getABITypeAlignment(Ty); 920 if (LD->getAlignment() < ABIAlignment){ 921 ExpandUnalignedLoad(cast<LoadSDNode>(Node), 922 DAG, TLI, Tmp3, Tmp4); 923 } 924 } 925 break; 926 case TargetLowering::Custom: 927 Tmp1 = TLI.LowerOperation(Tmp3, DAG); 928 if (Tmp1.getNode()) { 929 Tmp3 = Tmp1; 930 Tmp4 = Tmp1.getValue(1); 931 } 932 break; 933 case TargetLowering::Promote: { 934 // Only promote a load of vector type to another. 935 assert(VT.isVector() && "Cannot promote this load!"); 936 // Change base type to a different vector type. 937 EVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), VT); 938 939 Tmp1 = DAG.getLoad(NVT, dl, Tmp1, Tmp2, LD->getPointerInfo(), 940 LD->isVolatile(), LD->isNonTemporal(), 941 LD->isInvariant(), LD->getAlignment()); 942 Tmp3 = DAG.getNode(ISD::BITCAST, dl, VT, Tmp1); 943 Tmp4 = Tmp1.getValue(1); 944 break; 945 } 946 } 947 if (Tmp4.getNode() != Node) { 948 assert(Tmp3.getNode() != Node && "Load must be completely replaced"); 949 DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 0), Tmp3); 950 DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), Tmp4); 951 ReplacedNode(Node); 952 } 953 return; 954 } 955 956 EVT SrcVT = LD->getMemoryVT(); 957 unsigned SrcWidth = SrcVT.getSizeInBits(); 958 unsigned Alignment = LD->getAlignment(); 959 bool isVolatile = LD->isVolatile(); 960 bool isNonTemporal = LD->isNonTemporal(); 961 962 if (SrcWidth != SrcVT.getStoreSizeInBits() && 963 // Some targets pretend to have an i1 loading operation, and actually 964 // load an i8. This trick is correct for ZEXTLOAD because the top 7 965 // bits are guaranteed to be zero; it helps the optimizers understand 966 // that these bits are zero. It is also useful for EXTLOAD, since it 967 // tells the optimizers that those bits are undefined. It would be 968 // nice to have an effective generic way of getting these benefits... 969 // Until such a way is found, don't insist on promoting i1 here. 970 (SrcVT != MVT::i1 || 971 TLI.getLoadExtAction(ExtType, MVT::i1) == TargetLowering::Promote)) { 972 // Promote to a byte-sized load if not loading an integral number of 973 // bytes. For example, promote EXTLOAD:i20 -> EXTLOAD:i24. 974 unsigned NewWidth = SrcVT.getStoreSizeInBits(); 975 EVT NVT = EVT::getIntegerVT(*DAG.getContext(), NewWidth); 976 SDValue Ch; 977 978 // The extra bits are guaranteed to be zero, since we stored them that 979 // way. A zext load from NVT thus automatically gives zext from SrcVT. 980 981 ISD::LoadExtType NewExtType = 982 ExtType == ISD::ZEXTLOAD ? ISD::ZEXTLOAD : ISD::EXTLOAD; 983 984 SDValue Result = 985 DAG.getExtLoad(NewExtType, dl, Node->getValueType(0), 986 Tmp1, Tmp2, LD->getPointerInfo(), 987 NVT, isVolatile, isNonTemporal, Alignment); 988 989 Ch = Result.getValue(1); // The chain. 990 991 if (ExtType == ISD::SEXTLOAD) 992 // Having the top bits zero doesn't help when sign extending. 993 Result = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, 994 Result.getValueType(), 995 Result, DAG.getValueType(SrcVT)); 996 else if (ExtType == ISD::ZEXTLOAD || NVT == Result.getValueType()) 997 // All the top bits are guaranteed to be zero - inform the optimizers. 998 Result = DAG.getNode(ISD::AssertZext, dl, 999 Result.getValueType(), Result, 1000 DAG.getValueType(SrcVT)); 1001 1002 Tmp1 = Result; 1003 Tmp2 = Ch; 1004 } else if (SrcWidth & (SrcWidth - 1)) { 1005 // If not loading a power-of-2 number of bits, expand as two loads. 1006 assert(!SrcVT.isVector() && "Unsupported extload!"); 1007 unsigned RoundWidth = 1 << Log2_32(SrcWidth); 1008 assert(RoundWidth < SrcWidth); 1009 unsigned ExtraWidth = SrcWidth - RoundWidth; 1010 assert(ExtraWidth < RoundWidth); 1011 assert(!(RoundWidth % 8) && !(ExtraWidth % 8) && 1012 "Load size not an integral number of bytes!"); 1013 EVT RoundVT = EVT::getIntegerVT(*DAG.getContext(), RoundWidth); 1014 EVT ExtraVT = EVT::getIntegerVT(*DAG.getContext(), ExtraWidth); 1015 SDValue Lo, Hi, Ch; 1016 unsigned IncrementSize; 1017 1018 if (TLI.isLittleEndian()) { 1019 // EXTLOAD:i24 -> ZEXTLOAD:i16 | (shl EXTLOAD@+2:i8, 16) 1020 // Load the bottom RoundWidth bits. 1021 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, Node->getValueType(0), 1022 Tmp1, Tmp2, 1023 LD->getPointerInfo(), RoundVT, isVolatile, 1024 isNonTemporal, Alignment); 1025 1026 // Load the remaining ExtraWidth bits. 1027 IncrementSize = RoundWidth / 8; 1028 Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2, 1029 DAG.getIntPtrConstant(IncrementSize)); 1030 Hi = DAG.getExtLoad(ExtType, dl, Node->getValueType(0), Tmp1, Tmp2, 1031 LD->getPointerInfo().getWithOffset(IncrementSize), 1032 ExtraVT, isVolatile, isNonTemporal, 1033 MinAlign(Alignment, IncrementSize)); 1034 1035 // Build a factor node to remember that this load is independent of 1036 // the other one. 1037 Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1), 1038 Hi.getValue(1)); 1039 1040 // Move the top bits to the right place. 1041 Hi = DAG.getNode(ISD::SHL, dl, Hi.getValueType(), Hi, 1042 DAG.getConstant(RoundWidth, 1043 TLI.getShiftAmountTy(Hi.getValueType()))); 1044 1045 // Join the hi and lo parts. 1046 Tmp1 = DAG.getNode(ISD::OR, dl, Node->getValueType(0), Lo, Hi); 1047 } else { 1048 // Big endian - avoid unaligned loads. 1049 // EXTLOAD:i24 -> (shl EXTLOAD:i16, 8) | ZEXTLOAD@+2:i8 1050 // Load the top RoundWidth bits. 1051 Hi = DAG.getExtLoad(ExtType, dl, Node->getValueType(0), Tmp1, Tmp2, 1052 LD->getPointerInfo(), RoundVT, isVolatile, 1053 isNonTemporal, Alignment); 1054 1055 // Load the remaining ExtraWidth bits. 1056 IncrementSize = RoundWidth / 8; 1057 Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2, 1058 DAG.getIntPtrConstant(IncrementSize)); 1059 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, 1060 dl, Node->getValueType(0), Tmp1, Tmp2, 1061 LD->getPointerInfo().getWithOffset(IncrementSize), 1062 ExtraVT, isVolatile, isNonTemporal, 1063 MinAlign(Alignment, IncrementSize)); 1064 1065 // Build a factor node to remember that this load is independent of 1066 // the other one. 1067 Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1), 1068 Hi.getValue(1)); 1069 1070 // Move the top bits to the right place. 1071 Hi = DAG.getNode(ISD::SHL, dl, Hi.getValueType(), Hi, 1072 DAG.getConstant(ExtraWidth, 1073 TLI.getShiftAmountTy(Hi.getValueType()))); 1074 1075 // Join the hi and lo parts. 1076 Tmp1 = DAG.getNode(ISD::OR, dl, Node->getValueType(0), Lo, Hi); 1077 } 1078 1079 Tmp2 = Ch; 1080 } else { 1081 switch (TLI.getLoadExtAction(ExtType, SrcVT)) { 1082 default: llvm_unreachable("This action is not supported yet!"); 1083 case TargetLowering::Custom: 1084 isCustom = true; 1085 // FALLTHROUGH 1086 case TargetLowering::Legal: 1087 Tmp1 = SDValue(Node, 0); 1088 Tmp2 = SDValue(Node, 1); 1089 1090 if (isCustom) { 1091 Tmp3 = TLI.LowerOperation(SDValue(Node, 0), DAG); 1092 if (Tmp3.getNode()) { 1093 Tmp1 = Tmp3; 1094 Tmp2 = Tmp3.getValue(1); 1095 } 1096 } else { 1097 // If this is an unaligned load and the target doesn't support it, 1098 // expand it. 1099 if (!TLI.allowsUnalignedMemoryAccesses(LD->getMemoryVT())) { 1100 Type *Ty = 1101 LD->getMemoryVT().getTypeForEVT(*DAG.getContext()); 1102 unsigned ABIAlignment = 1103 TLI.getTargetData()->getABITypeAlignment(Ty); 1104 if (LD->getAlignment() < ABIAlignment){ 1105 ExpandUnalignedLoad(cast<LoadSDNode>(Node), 1106 DAG, TLI, Tmp1, Tmp2); 1107 } 1108 } 1109 } 1110 break; 1111 case TargetLowering::Expand: 1112 if (!TLI.isLoadExtLegal(ISD::EXTLOAD, SrcVT) && TLI.isTypeLegal(SrcVT)) { 1113 SDValue Load = DAG.getLoad(SrcVT, dl, Tmp1, Tmp2, 1114 LD->getPointerInfo(), 1115 LD->isVolatile(), LD->isNonTemporal(), 1116 LD->isInvariant(), LD->getAlignment()); 1117 unsigned ExtendOp; 1118 switch (ExtType) { 1119 case ISD::EXTLOAD: 1120 ExtendOp = (SrcVT.isFloatingPoint() ? 1121 ISD::FP_EXTEND : ISD::ANY_EXTEND); 1122 break; 1123 case ISD::SEXTLOAD: ExtendOp = ISD::SIGN_EXTEND; break; 1124 case ISD::ZEXTLOAD: ExtendOp = ISD::ZERO_EXTEND; break; 1125 default: llvm_unreachable("Unexpected extend load type!"); 1126 } 1127 Tmp1 = DAG.getNode(ExtendOp, dl, Node->getValueType(0), Load); 1128 Tmp2 = Load.getValue(1); 1129 break; 1130 } 1131 1132 assert(!SrcVT.isVector() && 1133 "Vector Loads are handled in LegalizeVectorOps"); 1134 1135 // FIXME: This does not work for vectors on most targets. Sign- and 1136 // zero-extend operations are currently folded into extending loads, 1137 // whether they are legal or not, and then we end up here without any 1138 // support for legalizing them. 1139 assert(ExtType != ISD::EXTLOAD && 1140 "EXTLOAD should always be supported!"); 1141 // Turn the unsupported load into an EXTLOAD followed by an explicit 1142 // zero/sign extend inreg. 1143 SDValue Result = DAG.getExtLoad(ISD::EXTLOAD, dl, Node->getValueType(0), 1144 Tmp1, Tmp2, LD->getPointerInfo(), SrcVT, 1145 LD->isVolatile(), LD->isNonTemporal(), 1146 LD->getAlignment()); 1147 SDValue ValRes; 1148 if (ExtType == ISD::SEXTLOAD) 1149 ValRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, 1150 Result.getValueType(), 1151 Result, DAG.getValueType(SrcVT)); 1152 else 1153 ValRes = DAG.getZeroExtendInReg(Result, dl, SrcVT.getScalarType()); 1154 Tmp1 = ValRes; 1155 Tmp2 = Result.getValue(1); 1156 break; 1157 } 1158 } 1159 1160 // Since loads produce two values, make sure to remember that we legalized 1161 // both of them. 1162 if (Tmp2.getNode() != Node) { 1163 assert(Tmp1.getNode() != Node && "Load must be completely replaced"); 1164 DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 0), Tmp1); 1165 DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), Tmp2); 1166 ReplacedNode(Node); 1167 } 1168 break; 1169 } 1170 case ISD::STORE: { 1171 StoreSDNode *ST = cast<StoreSDNode>(Node); 1172 Tmp1 = ST->getChain(); 1173 Tmp2 = ST->getBasePtr(); 1174 unsigned Alignment = ST->getAlignment(); 1175 bool isVolatile = ST->isVolatile(); 1176 bool isNonTemporal = ST->isNonTemporal(); 1177 1178 if (!ST->isTruncatingStore()) { 1179 if (SDNode *OptStore = OptimizeFloatStore(ST).getNode()) { 1180 ReplaceNode(ST, OptStore); 1181 break; 1182 } 1183 1184 { 1185 Tmp3 = ST->getValue(); 1186 EVT VT = Tmp3.getValueType(); 1187 switch (TLI.getOperationAction(ISD::STORE, VT)) { 1188 default: llvm_unreachable("This action is not supported yet!"); 1189 case TargetLowering::Legal: 1190 // If this is an unaligned store and the target doesn't support it, 1191 // expand it. 1192 if (!TLI.allowsUnalignedMemoryAccesses(ST->getMemoryVT())) { 1193 Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext()); 1194 unsigned ABIAlignment= TLI.getTargetData()->getABITypeAlignment(Ty); 1195 if (ST->getAlignment() < ABIAlignment) 1196 ExpandUnalignedStore(cast<StoreSDNode>(Node), 1197 DAG, TLI, this); 1198 } 1199 break; 1200 case TargetLowering::Custom: 1201 Tmp1 = TLI.LowerOperation(SDValue(Node, 0), DAG); 1202 if (Tmp1.getNode()) 1203 ReplaceNode(SDValue(Node, 0), Tmp1); 1204 break; 1205 case TargetLowering::Promote: { 1206 assert(VT.isVector() && "Unknown legal promote case!"); 1207 Tmp3 = DAG.getNode(ISD::BITCAST, dl, 1208 TLI.getTypeToPromoteTo(ISD::STORE, VT), Tmp3); 1209 SDValue Result = 1210 DAG.getStore(Tmp1, dl, Tmp3, Tmp2, 1211 ST->getPointerInfo(), isVolatile, 1212 isNonTemporal, Alignment); 1213 ReplaceNode(SDValue(Node, 0), Result); 1214 break; 1215 } 1216 } 1217 break; 1218 } 1219 } else { 1220 Tmp3 = ST->getValue(); 1221 1222 EVT StVT = ST->getMemoryVT(); 1223 unsigned StWidth = StVT.getSizeInBits(); 1224 1225 if (StWidth != StVT.getStoreSizeInBits()) { 1226 // Promote to a byte-sized store with upper bits zero if not 1227 // storing an integral number of bytes. For example, promote 1228 // TRUNCSTORE:i1 X -> TRUNCSTORE:i8 (and X, 1) 1229 EVT NVT = EVT::getIntegerVT(*DAG.getContext(), 1230 StVT.getStoreSizeInBits()); 1231 Tmp3 = DAG.getZeroExtendInReg(Tmp3, dl, StVT); 1232 SDValue Result = 1233 DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(), 1234 NVT, isVolatile, isNonTemporal, Alignment); 1235 ReplaceNode(SDValue(Node, 0), Result); 1236 } else if (StWidth & (StWidth - 1)) { 1237 // If not storing a power-of-2 number of bits, expand as two stores. 1238 assert(!StVT.isVector() && "Unsupported truncstore!"); 1239 unsigned RoundWidth = 1 << Log2_32(StWidth); 1240 assert(RoundWidth < StWidth); 1241 unsigned ExtraWidth = StWidth - RoundWidth; 1242 assert(ExtraWidth < RoundWidth); 1243 assert(!(RoundWidth % 8) && !(ExtraWidth % 8) && 1244 "Store size not an integral number of bytes!"); 1245 EVT RoundVT = EVT::getIntegerVT(*DAG.getContext(), RoundWidth); 1246 EVT ExtraVT = EVT::getIntegerVT(*DAG.getContext(), ExtraWidth); 1247 SDValue Lo, Hi; 1248 unsigned IncrementSize; 1249 1250 if (TLI.isLittleEndian()) { 1251 // TRUNCSTORE:i24 X -> TRUNCSTORE:i16 X, TRUNCSTORE@+2:i8 (srl X, 16) 1252 // Store the bottom RoundWidth bits. 1253 Lo = DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(), 1254 RoundVT, 1255 isVolatile, isNonTemporal, Alignment); 1256 1257 // Store the remaining ExtraWidth bits. 1258 IncrementSize = RoundWidth / 8; 1259 Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2, 1260 DAG.getIntPtrConstant(IncrementSize)); 1261 Hi = DAG.getNode(ISD::SRL, dl, Tmp3.getValueType(), Tmp3, 1262 DAG.getConstant(RoundWidth, 1263 TLI.getShiftAmountTy(Tmp3.getValueType()))); 1264 Hi = DAG.getTruncStore(Tmp1, dl, Hi, Tmp2, 1265 ST->getPointerInfo().getWithOffset(IncrementSize), 1266 ExtraVT, isVolatile, isNonTemporal, 1267 MinAlign(Alignment, IncrementSize)); 1268 } else { 1269 // Big endian - avoid unaligned stores. 1270 // TRUNCSTORE:i24 X -> TRUNCSTORE:i16 (srl X, 8), TRUNCSTORE@+2:i8 X 1271 // Store the top RoundWidth bits. 1272 Hi = DAG.getNode(ISD::SRL, dl, Tmp3.getValueType(), Tmp3, 1273 DAG.getConstant(ExtraWidth, 1274 TLI.getShiftAmountTy(Tmp3.getValueType()))); 1275 Hi = DAG.getTruncStore(Tmp1, dl, Hi, Tmp2, ST->getPointerInfo(), 1276 RoundVT, isVolatile, isNonTemporal, Alignment); 1277 1278 // Store the remaining ExtraWidth bits. 1279 IncrementSize = RoundWidth / 8; 1280 Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2, 1281 DAG.getIntPtrConstant(IncrementSize)); 1282 Lo = DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2, 1283 ST->getPointerInfo().getWithOffset(IncrementSize), 1284 ExtraVT, isVolatile, isNonTemporal, 1285 MinAlign(Alignment, IncrementSize)); 1286 } 1287 1288 // The order of the stores doesn't matter. 1289 SDValue Result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo, Hi); 1290 ReplaceNode(SDValue(Node, 0), Result); 1291 } else { 1292 switch (TLI.getTruncStoreAction(ST->getValue().getValueType(), StVT)) { 1293 default: llvm_unreachable("This action is not supported yet!"); 1294 case TargetLowering::Legal: 1295 // If this is an unaligned store and the target doesn't support it, 1296 // expand it. 1297 if (!TLI.allowsUnalignedMemoryAccesses(ST->getMemoryVT())) { 1298 Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext()); 1299 unsigned ABIAlignment= TLI.getTargetData()->getABITypeAlignment(Ty); 1300 if (ST->getAlignment() < ABIAlignment) 1301 ExpandUnalignedStore(cast<StoreSDNode>(Node), DAG, TLI, this); 1302 } 1303 break; 1304 case TargetLowering::Custom: 1305 ReplaceNode(SDValue(Node, 0), 1306 TLI.LowerOperation(SDValue(Node, 0), DAG)); 1307 break; 1308 case TargetLowering::Expand: 1309 assert(!StVT.isVector() && 1310 "Vector Stores are handled in LegalizeVectorOps"); 1311 1312 // TRUNCSTORE:i16 i32 -> STORE i16 1313 assert(TLI.isTypeLegal(StVT) && "Do not know how to expand this store!"); 1314 Tmp3 = DAG.getNode(ISD::TRUNCATE, dl, StVT, Tmp3); 1315 SDValue Result = 1316 DAG.getStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(), 1317 isVolatile, isNonTemporal, Alignment); 1318 ReplaceNode(SDValue(Node, 0), Result); 1319 break; 1320 } 1321 } 1322 } 1323 break; 1324 } 1325 } 1326} 1327 1328SDValue SelectionDAGLegalize::ExpandExtractFromVectorThroughStack(SDValue Op) { 1329 SDValue Vec = Op.getOperand(0); 1330 SDValue Idx = Op.getOperand(1); 1331 DebugLoc dl = Op.getDebugLoc(); 1332 // Store the value to a temporary stack slot, then LOAD the returned part. 1333 SDValue StackPtr = DAG.CreateStackTemporary(Vec.getValueType()); 1334 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, 1335 MachinePointerInfo(), false, false, 0); 1336 1337 // Add the offset to the index. 1338 unsigned EltSize = 1339 Vec.getValueType().getVectorElementType().getSizeInBits()/8; 1340 Idx = DAG.getNode(ISD::MUL, dl, Idx.getValueType(), Idx, 1341 DAG.getConstant(EltSize, Idx.getValueType())); 1342 1343 if (Idx.getValueType().bitsGT(TLI.getPointerTy())) 1344 Idx = DAG.getNode(ISD::TRUNCATE, dl, TLI.getPointerTy(), Idx); 1345 else 1346 Idx = DAG.getNode(ISD::ZERO_EXTEND, dl, TLI.getPointerTy(), Idx); 1347 1348 StackPtr = DAG.getNode(ISD::ADD, dl, Idx.getValueType(), Idx, StackPtr); 1349 1350 if (Op.getValueType().isVector()) 1351 return DAG.getLoad(Op.getValueType(), dl, Ch, StackPtr,MachinePointerInfo(), 1352 false, false, false, 0); 1353 return DAG.getExtLoad(ISD::EXTLOAD, dl, Op.getValueType(), Ch, StackPtr, 1354 MachinePointerInfo(), 1355 Vec.getValueType().getVectorElementType(), 1356 false, false, 0); 1357} 1358 1359SDValue SelectionDAGLegalize::ExpandInsertToVectorThroughStack(SDValue Op) { 1360 assert(Op.getValueType().isVector() && "Non-vector insert subvector!"); 1361 1362 SDValue Vec = Op.getOperand(0); 1363 SDValue Part = Op.getOperand(1); 1364 SDValue Idx = Op.getOperand(2); 1365 DebugLoc dl = Op.getDebugLoc(); 1366 1367 // Store the value to a temporary stack slot, then LOAD the returned part. 1368 1369 SDValue StackPtr = DAG.CreateStackTemporary(Vec.getValueType()); 1370 int FI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex(); 1371 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(FI); 1372 1373 // First store the whole vector. 1374 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, PtrInfo, 1375 false, false, 0); 1376 1377 // Then store the inserted part. 1378 1379 // Add the offset to the index. 1380 unsigned EltSize = 1381 Vec.getValueType().getVectorElementType().getSizeInBits()/8; 1382 1383 Idx = DAG.getNode(ISD::MUL, dl, Idx.getValueType(), Idx, 1384 DAG.getConstant(EltSize, Idx.getValueType())); 1385 1386 if (Idx.getValueType().bitsGT(TLI.getPointerTy())) 1387 Idx = DAG.getNode(ISD::TRUNCATE, dl, TLI.getPointerTy(), Idx); 1388 else 1389 Idx = DAG.getNode(ISD::ZERO_EXTEND, dl, TLI.getPointerTy(), Idx); 1390 1391 SDValue SubStackPtr = DAG.getNode(ISD::ADD, dl, Idx.getValueType(), Idx, 1392 StackPtr); 1393 1394 // Store the subvector. 1395 Ch = DAG.getStore(DAG.getEntryNode(), dl, Part, SubStackPtr, 1396 MachinePointerInfo(), false, false, 0); 1397 1398 // Finally, load the updated vector. 1399 return DAG.getLoad(Op.getValueType(), dl, Ch, StackPtr, PtrInfo, 1400 false, false, false, 0); 1401} 1402 1403SDValue SelectionDAGLegalize::ExpandVectorBuildThroughStack(SDNode* Node) { 1404 // We can't handle this case efficiently. Allocate a sufficiently 1405 // aligned object on the stack, store each element into it, then load 1406 // the result as a vector. 1407 // Create the stack frame object. 1408 EVT VT = Node->getValueType(0); 1409 EVT EltVT = VT.getVectorElementType(); 1410 DebugLoc dl = Node->getDebugLoc(); 1411 SDValue FIPtr = DAG.CreateStackTemporary(VT); 1412 int FI = cast<FrameIndexSDNode>(FIPtr.getNode())->getIndex(); 1413 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(FI); 1414 1415 // Emit a store of each element to the stack slot. 1416 SmallVector<SDValue, 8> Stores; 1417 unsigned TypeByteSize = EltVT.getSizeInBits() / 8; 1418 // Store (in the right endianness) the elements to memory. 1419 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) { 1420 // Ignore undef elements. 1421 if (Node->getOperand(i).getOpcode() == ISD::UNDEF) continue; 1422 1423 unsigned Offset = TypeByteSize*i; 1424 1425 SDValue Idx = DAG.getConstant(Offset, FIPtr.getValueType()); 1426 Idx = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr, Idx); 1427 1428 // If the destination vector element type is narrower than the source 1429 // element type, only store the bits necessary. 1430 if (EltVT.bitsLT(Node->getOperand(i).getValueType().getScalarType())) { 1431 Stores.push_back(DAG.getTruncStore(DAG.getEntryNode(), dl, 1432 Node->getOperand(i), Idx, 1433 PtrInfo.getWithOffset(Offset), 1434 EltVT, false, false, 0)); 1435 } else 1436 Stores.push_back(DAG.getStore(DAG.getEntryNode(), dl, 1437 Node->getOperand(i), Idx, 1438 PtrInfo.getWithOffset(Offset), 1439 false, false, 0)); 1440 } 1441 1442 SDValue StoreChain; 1443 if (!Stores.empty()) // Not all undef elements? 1444 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 1445 &Stores[0], Stores.size()); 1446 else 1447 StoreChain = DAG.getEntryNode(); 1448 1449 // Result is a load from the stack slot. 1450 return DAG.getLoad(VT, dl, StoreChain, FIPtr, PtrInfo, 1451 false, false, false, 0); 1452} 1453 1454SDValue SelectionDAGLegalize::ExpandFCOPYSIGN(SDNode* Node) { 1455 DebugLoc dl = Node->getDebugLoc(); 1456 SDValue Tmp1 = Node->getOperand(0); 1457 SDValue Tmp2 = Node->getOperand(1); 1458 1459 // Get the sign bit of the RHS. First obtain a value that has the same 1460 // sign as the sign bit, i.e. negative if and only if the sign bit is 1. 1461 SDValue SignBit; 1462 EVT FloatVT = Tmp2.getValueType(); 1463 EVT IVT = EVT::getIntegerVT(*DAG.getContext(), FloatVT.getSizeInBits()); 1464 if (TLI.isTypeLegal(IVT)) { 1465 // Convert to an integer with the same sign bit. 1466 SignBit = DAG.getNode(ISD::BITCAST, dl, IVT, Tmp2); 1467 } else { 1468 // Store the float to memory, then load the sign part out as an integer. 1469 MVT LoadTy = TLI.getPointerTy(); 1470 // First create a temporary that is aligned for both the load and store. 1471 SDValue StackPtr = DAG.CreateStackTemporary(FloatVT, LoadTy); 1472 // Then store the float to it. 1473 SDValue Ch = 1474 DAG.getStore(DAG.getEntryNode(), dl, Tmp2, StackPtr, MachinePointerInfo(), 1475 false, false, 0); 1476 if (TLI.isBigEndian()) { 1477 assert(FloatVT.isByteSized() && "Unsupported floating point type!"); 1478 // Load out a legal integer with the same sign bit as the float. 1479 SignBit = DAG.getLoad(LoadTy, dl, Ch, StackPtr, MachinePointerInfo(), 1480 false, false, false, 0); 1481 } else { // Little endian 1482 SDValue LoadPtr = StackPtr; 1483 // The float may be wider than the integer we are going to load. Advance 1484 // the pointer so that the loaded integer will contain the sign bit. 1485 unsigned Strides = (FloatVT.getSizeInBits()-1)/LoadTy.getSizeInBits(); 1486 unsigned ByteOffset = (Strides * LoadTy.getSizeInBits()) / 8; 1487 LoadPtr = DAG.getNode(ISD::ADD, dl, LoadPtr.getValueType(), 1488 LoadPtr, DAG.getIntPtrConstant(ByteOffset)); 1489 // Load a legal integer containing the sign bit. 1490 SignBit = DAG.getLoad(LoadTy, dl, Ch, LoadPtr, MachinePointerInfo(), 1491 false, false, false, 0); 1492 // Move the sign bit to the top bit of the loaded integer. 1493 unsigned BitShift = LoadTy.getSizeInBits() - 1494 (FloatVT.getSizeInBits() - 8 * ByteOffset); 1495 assert(BitShift < LoadTy.getSizeInBits() && "Pointer advanced wrong?"); 1496 if (BitShift) 1497 SignBit = DAG.getNode(ISD::SHL, dl, LoadTy, SignBit, 1498 DAG.getConstant(BitShift, 1499 TLI.getShiftAmountTy(SignBit.getValueType()))); 1500 } 1501 } 1502 // Now get the sign bit proper, by seeing whether the value is negative. 1503 SignBit = DAG.getSetCC(dl, TLI.getSetCCResultType(SignBit.getValueType()), 1504 SignBit, DAG.getConstant(0, SignBit.getValueType()), 1505 ISD::SETLT); 1506 // Get the absolute value of the result. 1507 SDValue AbsVal = DAG.getNode(ISD::FABS, dl, Tmp1.getValueType(), Tmp1); 1508 // Select between the nabs and abs value based on the sign bit of 1509 // the input. 1510 return DAG.getNode(ISD::SELECT, dl, AbsVal.getValueType(), SignBit, 1511 DAG.getNode(ISD::FNEG, dl, AbsVal.getValueType(), AbsVal), 1512 AbsVal); 1513} 1514 1515void SelectionDAGLegalize::ExpandDYNAMIC_STACKALLOC(SDNode* Node, 1516 SmallVectorImpl<SDValue> &Results) { 1517 unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore(); 1518 assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and" 1519 " not tell us which reg is the stack pointer!"); 1520 DebugLoc dl = Node->getDebugLoc(); 1521 EVT VT = Node->getValueType(0); 1522 SDValue Tmp1 = SDValue(Node, 0); 1523 SDValue Tmp2 = SDValue(Node, 1); 1524 SDValue Tmp3 = Node->getOperand(2); 1525 SDValue Chain = Tmp1.getOperand(0); 1526 1527 // Chain the dynamic stack allocation so that it doesn't modify the stack 1528 // pointer when other instructions are using the stack. 1529 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(0, true)); 1530 1531 SDValue Size = Tmp2.getOperand(1); 1532 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT); 1533 Chain = SP.getValue(1); 1534 unsigned Align = cast<ConstantSDNode>(Tmp3)->getZExtValue(); 1535 unsigned StackAlign = TM.getFrameLowering()->getStackAlignment(); 1536 if (Align > StackAlign) 1537 SP = DAG.getNode(ISD::AND, dl, VT, SP, 1538 DAG.getConstant(-(uint64_t)Align, VT)); 1539 Tmp1 = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value 1540 Chain = DAG.getCopyToReg(Chain, dl, SPReg, Tmp1); // Output chain 1541 1542 Tmp2 = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, true), 1543 DAG.getIntPtrConstant(0, true), SDValue()); 1544 1545 Results.push_back(Tmp1); 1546 Results.push_back(Tmp2); 1547} 1548 1549/// LegalizeSetCCCondCode - Legalize a SETCC with given LHS and RHS and 1550/// condition code CC on the current target. This routine expands SETCC with 1551/// illegal condition code into AND / OR of multiple SETCC values. 1552void SelectionDAGLegalize::LegalizeSetCCCondCode(EVT VT, 1553 SDValue &LHS, SDValue &RHS, 1554 SDValue &CC, 1555 DebugLoc dl) { 1556 EVT OpVT = LHS.getValueType(); 1557 ISD::CondCode CCCode = cast<CondCodeSDNode>(CC)->get(); 1558 switch (TLI.getCondCodeAction(CCCode, OpVT)) { 1559 default: llvm_unreachable("Unknown condition code action!"); 1560 case TargetLowering::Legal: 1561 // Nothing to do. 1562 break; 1563 case TargetLowering::Expand: { 1564 ISD::CondCode CC1 = ISD::SETCC_INVALID, CC2 = ISD::SETCC_INVALID; 1565 unsigned Opc = 0; 1566 switch (CCCode) { 1567 default: llvm_unreachable("Don't know how to expand this condition!"); 1568 case ISD::SETOEQ: CC1 = ISD::SETEQ; CC2 = ISD::SETO; Opc = ISD::AND; break; 1569 case ISD::SETOGT: CC1 = ISD::SETGT; CC2 = ISD::SETO; Opc = ISD::AND; break; 1570 case ISD::SETOGE: CC1 = ISD::SETGE; CC2 = ISD::SETO; Opc = ISD::AND; break; 1571 case ISD::SETOLT: CC1 = ISD::SETLT; CC2 = ISD::SETO; Opc = ISD::AND; break; 1572 case ISD::SETOLE: CC1 = ISD::SETLE; CC2 = ISD::SETO; Opc = ISD::AND; break; 1573 case ISD::SETONE: CC1 = ISD::SETNE; CC2 = ISD::SETO; Opc = ISD::AND; break; 1574 case ISD::SETUEQ: CC1 = ISD::SETEQ; CC2 = ISD::SETUO; Opc = ISD::OR; break; 1575 case ISD::SETUGT: CC1 = ISD::SETGT; CC2 = ISD::SETUO; Opc = ISD::OR; break; 1576 case ISD::SETUGE: CC1 = ISD::SETGE; CC2 = ISD::SETUO; Opc = ISD::OR; break; 1577 case ISD::SETULT: CC1 = ISD::SETLT; CC2 = ISD::SETUO; Opc = ISD::OR; break; 1578 case ISD::SETULE: CC1 = ISD::SETLE; CC2 = ISD::SETUO; Opc = ISD::OR; break; 1579 case ISD::SETUNE: CC1 = ISD::SETNE; CC2 = ISD::SETUO; Opc = ISD::OR; break; 1580 // FIXME: Implement more expansions. 1581 } 1582 1583 SDValue SetCC1 = DAG.getSetCC(dl, VT, LHS, RHS, CC1); 1584 SDValue SetCC2 = DAG.getSetCC(dl, VT, LHS, RHS, CC2); 1585 LHS = DAG.getNode(Opc, dl, VT, SetCC1, SetCC2); 1586 RHS = SDValue(); 1587 CC = SDValue(); 1588 break; 1589 } 1590 } 1591} 1592 1593/// EmitStackConvert - Emit a store/load combination to the stack. This stores 1594/// SrcOp to a stack slot of type SlotVT, truncating it if needed. It then does 1595/// a load from the stack slot to DestVT, extending it if needed. 1596/// The resultant code need not be legal. 1597SDValue SelectionDAGLegalize::EmitStackConvert(SDValue SrcOp, 1598 EVT SlotVT, 1599 EVT DestVT, 1600 DebugLoc dl) { 1601 // Create the stack frame object. 1602 unsigned SrcAlign = 1603 TLI.getTargetData()->getPrefTypeAlignment(SrcOp.getValueType(). 1604 getTypeForEVT(*DAG.getContext())); 1605 SDValue FIPtr = DAG.CreateStackTemporary(SlotVT, SrcAlign); 1606 1607 FrameIndexSDNode *StackPtrFI = cast<FrameIndexSDNode>(FIPtr); 1608 int SPFI = StackPtrFI->getIndex(); 1609 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(SPFI); 1610 1611 unsigned SrcSize = SrcOp.getValueType().getSizeInBits(); 1612 unsigned SlotSize = SlotVT.getSizeInBits(); 1613 unsigned DestSize = DestVT.getSizeInBits(); 1614 Type *DestType = DestVT.getTypeForEVT(*DAG.getContext()); 1615 unsigned DestAlign = TLI.getTargetData()->getPrefTypeAlignment(DestType); 1616 1617 // Emit a store to the stack slot. Use a truncstore if the input value is 1618 // later than DestVT. 1619 SDValue Store; 1620 1621 if (SrcSize > SlotSize) 1622 Store = DAG.getTruncStore(DAG.getEntryNode(), dl, SrcOp, FIPtr, 1623 PtrInfo, SlotVT, false, false, SrcAlign); 1624 else { 1625 assert(SrcSize == SlotSize && "Invalid store"); 1626 Store = DAG.getStore(DAG.getEntryNode(), dl, SrcOp, FIPtr, 1627 PtrInfo, false, false, SrcAlign); 1628 } 1629 1630 // Result is a load from the stack slot. 1631 if (SlotSize == DestSize) 1632 return DAG.getLoad(DestVT, dl, Store, FIPtr, PtrInfo, 1633 false, false, false, DestAlign); 1634 1635 assert(SlotSize < DestSize && "Unknown extension!"); 1636 return DAG.getExtLoad(ISD::EXTLOAD, dl, DestVT, Store, FIPtr, 1637 PtrInfo, SlotVT, false, false, DestAlign); 1638} 1639 1640SDValue SelectionDAGLegalize::ExpandSCALAR_TO_VECTOR(SDNode *Node) { 1641 DebugLoc dl = Node->getDebugLoc(); 1642 // Create a vector sized/aligned stack slot, store the value to element #0, 1643 // then load the whole vector back out. 1644 SDValue StackPtr = DAG.CreateStackTemporary(Node->getValueType(0)); 1645 1646 FrameIndexSDNode *StackPtrFI = cast<FrameIndexSDNode>(StackPtr); 1647 int SPFI = StackPtrFI->getIndex(); 1648 1649 SDValue Ch = DAG.getTruncStore(DAG.getEntryNode(), dl, Node->getOperand(0), 1650 StackPtr, 1651 MachinePointerInfo::getFixedStack(SPFI), 1652 Node->getValueType(0).getVectorElementType(), 1653 false, false, 0); 1654 return DAG.getLoad(Node->getValueType(0), dl, Ch, StackPtr, 1655 MachinePointerInfo::getFixedStack(SPFI), 1656 false, false, false, 0); 1657} 1658 1659 1660/// ExpandBUILD_VECTOR - Expand a BUILD_VECTOR node on targets that don't 1661/// support the operation, but do support the resultant vector type. 1662SDValue SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) { 1663 unsigned NumElems = Node->getNumOperands(); 1664 SDValue Value1, Value2; 1665 DebugLoc dl = Node->getDebugLoc(); 1666 EVT VT = Node->getValueType(0); 1667 EVT OpVT = Node->getOperand(0).getValueType(); 1668 EVT EltVT = VT.getVectorElementType(); 1669 1670 // If the only non-undef value is the low element, turn this into a 1671 // SCALAR_TO_VECTOR node. If this is { X, X, X, X }, determine X. 1672 bool isOnlyLowElement = true; 1673 bool MoreThanTwoValues = false; 1674 bool isConstant = true; 1675 for (unsigned i = 0; i < NumElems; ++i) { 1676 SDValue V = Node->getOperand(i); 1677 if (V.getOpcode() == ISD::UNDEF) 1678 continue; 1679 if (i > 0) 1680 isOnlyLowElement = false; 1681 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V)) 1682 isConstant = false; 1683 1684 if (!Value1.getNode()) { 1685 Value1 = V; 1686 } else if (!Value2.getNode()) { 1687 if (V != Value1) 1688 Value2 = V; 1689 } else if (V != Value1 && V != Value2) { 1690 MoreThanTwoValues = true; 1691 } 1692 } 1693 1694 if (!Value1.getNode()) 1695 return DAG.getUNDEF(VT); 1696 1697 if (isOnlyLowElement) 1698 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Node->getOperand(0)); 1699 1700 // If all elements are constants, create a load from the constant pool. 1701 if (isConstant) { 1702 SmallVector<Constant*, 16> CV; 1703 for (unsigned i = 0, e = NumElems; i != e; ++i) { 1704 if (ConstantFPSDNode *V = 1705 dyn_cast<ConstantFPSDNode>(Node->getOperand(i))) { 1706 CV.push_back(const_cast<ConstantFP *>(V->getConstantFPValue())); 1707 } else if (ConstantSDNode *V = 1708 dyn_cast<ConstantSDNode>(Node->getOperand(i))) { 1709 if (OpVT==EltVT) 1710 CV.push_back(const_cast<ConstantInt *>(V->getConstantIntValue())); 1711 else { 1712 // If OpVT and EltVT don't match, EltVT is not legal and the 1713 // element values have been promoted/truncated earlier. Undo this; 1714 // we don't want a v16i8 to become a v16i32 for example. 1715 const ConstantInt *CI = V->getConstantIntValue(); 1716 CV.push_back(ConstantInt::get(EltVT.getTypeForEVT(*DAG.getContext()), 1717 CI->getZExtValue())); 1718 } 1719 } else { 1720 assert(Node->getOperand(i).getOpcode() == ISD::UNDEF); 1721 Type *OpNTy = EltVT.getTypeForEVT(*DAG.getContext()); 1722 CV.push_back(UndefValue::get(OpNTy)); 1723 } 1724 } 1725 Constant *CP = ConstantVector::get(CV); 1726 SDValue CPIdx = DAG.getConstantPool(CP, TLI.getPointerTy()); 1727 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment(); 1728 return DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 1729 MachinePointerInfo::getConstantPool(), 1730 false, false, false, Alignment); 1731 } 1732 1733 if (!MoreThanTwoValues) { 1734 SmallVector<int, 8> ShuffleVec(NumElems, -1); 1735 for (unsigned i = 0; i < NumElems; ++i) { 1736 SDValue V = Node->getOperand(i); 1737 if (V.getOpcode() == ISD::UNDEF) 1738 continue; 1739 ShuffleVec[i] = V == Value1 ? 0 : NumElems; 1740 } 1741 if (TLI.isShuffleMaskLegal(ShuffleVec, Node->getValueType(0))) { 1742 // Get the splatted value into the low element of a vector register. 1743 SDValue Vec1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value1); 1744 SDValue Vec2; 1745 if (Value2.getNode()) 1746 Vec2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value2); 1747 else 1748 Vec2 = DAG.getUNDEF(VT); 1749 1750 // Return shuffle(LowValVec, undef, <0,0,0,0>) 1751 return DAG.getVectorShuffle(VT, dl, Vec1, Vec2, ShuffleVec.data()); 1752 } 1753 } 1754 1755 // Otherwise, we can't handle this case efficiently. 1756 return ExpandVectorBuildThroughStack(Node); 1757} 1758 1759// ExpandLibCall - Expand a node into a call to a libcall. If the result value 1760// does not fit into a register, return the lo part and set the hi part to the 1761// by-reg argument. If it does fit into a single register, return the result 1762// and leave the Hi part unset. 1763SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, SDNode *Node, 1764 bool isSigned) { 1765 // The input chain to this libcall is the entry node of the function. 1766 // Legalizing the call will automatically add the previous call to the 1767 // dependence. 1768 SDValue InChain = DAG.getEntryNode(); 1769 1770 TargetLowering::ArgListTy Args; 1771 TargetLowering::ArgListEntry Entry; 1772 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) { 1773 EVT ArgVT = Node->getOperand(i).getValueType(); 1774 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); 1775 Entry.Node = Node->getOperand(i); Entry.Ty = ArgTy; 1776 Entry.isSExt = isSigned; 1777 Entry.isZExt = !isSigned; 1778 Args.push_back(Entry); 1779 } 1780 SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC), 1781 TLI.getPointerTy()); 1782 1783 Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext()); 1784 1785 // isTailCall may be true since the callee does not reference caller stack 1786 // frame. Check if it's in the right position. 1787 bool isTailCall = isInTailCallPosition(DAG, Node, TLI); 1788 std::pair<SDValue, SDValue> CallInfo = 1789 TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false, 1790 0, TLI.getLibcallCallingConv(LC), isTailCall, 1791 /*doesNotReturn=*/false, /*isReturnValueUsed=*/true, 1792 Callee, Args, DAG, Node->getDebugLoc()); 1793 1794 if (!CallInfo.second.getNode()) 1795 // It's a tailcall, return the chain (which is the DAG root). 1796 return DAG.getRoot(); 1797 1798 return CallInfo.first; 1799} 1800 1801/// ExpandLibCall - Generate a libcall taking the given operands as arguments 1802/// and returning a result of type RetVT. 1803SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, EVT RetVT, 1804 const SDValue *Ops, unsigned NumOps, 1805 bool isSigned, DebugLoc dl) { 1806 TargetLowering::ArgListTy Args; 1807 Args.reserve(NumOps); 1808 1809 TargetLowering::ArgListEntry Entry; 1810 for (unsigned i = 0; i != NumOps; ++i) { 1811 Entry.Node = Ops[i]; 1812 Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.getContext()); 1813 Entry.isSExt = isSigned; 1814 Entry.isZExt = !isSigned; 1815 Args.push_back(Entry); 1816 } 1817 SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC), 1818 TLI.getPointerTy()); 1819 1820 Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext()); 1821 std::pair<SDValue,SDValue> CallInfo = 1822 TLI.LowerCallTo(DAG.getEntryNode(), RetTy, isSigned, !isSigned, false, 1823 false, 0, TLI.getLibcallCallingConv(LC), false, 1824 /*doesNotReturn=*/false, /*isReturnValueUsed=*/true, 1825 Callee, Args, DAG, dl); 1826 1827 return CallInfo.first; 1828} 1829 1830// ExpandChainLibCall - Expand a node into a call to a libcall. Similar to 1831// ExpandLibCall except that the first operand is the in-chain. 1832std::pair<SDValue, SDValue> 1833SelectionDAGLegalize::ExpandChainLibCall(RTLIB::Libcall LC, 1834 SDNode *Node, 1835 bool isSigned) { 1836 SDValue InChain = Node->getOperand(0); 1837 1838 TargetLowering::ArgListTy Args; 1839 TargetLowering::ArgListEntry Entry; 1840 for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i) { 1841 EVT ArgVT = Node->getOperand(i).getValueType(); 1842 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); 1843 Entry.Node = Node->getOperand(i); 1844 Entry.Ty = ArgTy; 1845 Entry.isSExt = isSigned; 1846 Entry.isZExt = !isSigned; 1847 Args.push_back(Entry); 1848 } 1849 SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC), 1850 TLI.getPointerTy()); 1851 1852 Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext()); 1853 std::pair<SDValue, SDValue> CallInfo = 1854 TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false, 1855 0, TLI.getLibcallCallingConv(LC), /*isTailCall=*/false, 1856 /*doesNotReturn=*/false, /*isReturnValueUsed=*/true, 1857 Callee, Args, DAG, Node->getDebugLoc()); 1858 1859 return CallInfo; 1860} 1861 1862SDValue SelectionDAGLegalize::ExpandFPLibCall(SDNode* Node, 1863 RTLIB::Libcall Call_F32, 1864 RTLIB::Libcall Call_F64, 1865 RTLIB::Libcall Call_F80, 1866 RTLIB::Libcall Call_PPCF128) { 1867 RTLIB::Libcall LC; 1868 switch (Node->getValueType(0).getSimpleVT().SimpleTy) { 1869 default: llvm_unreachable("Unexpected request for libcall!"); 1870 case MVT::f32: LC = Call_F32; break; 1871 case MVT::f64: LC = Call_F64; break; 1872 case MVT::f80: LC = Call_F80; break; 1873 case MVT::ppcf128: LC = Call_PPCF128; break; 1874 } 1875 return ExpandLibCall(LC, Node, false); 1876} 1877 1878SDValue SelectionDAGLegalize::ExpandIntLibCall(SDNode* Node, bool isSigned, 1879 RTLIB::Libcall Call_I8, 1880 RTLIB::Libcall Call_I16, 1881 RTLIB::Libcall Call_I32, 1882 RTLIB::Libcall Call_I64, 1883 RTLIB::Libcall Call_I128) { 1884 RTLIB::Libcall LC; 1885 switch (Node->getValueType(0).getSimpleVT().SimpleTy) { 1886 default: llvm_unreachable("Unexpected request for libcall!"); 1887 case MVT::i8: LC = Call_I8; break; 1888 case MVT::i16: LC = Call_I16; break; 1889 case MVT::i32: LC = Call_I32; break; 1890 case MVT::i64: LC = Call_I64; break; 1891 case MVT::i128: LC = Call_I128; break; 1892 } 1893 return ExpandLibCall(LC, Node, isSigned); 1894} 1895 1896/// isDivRemLibcallAvailable - Return true if divmod libcall is available. 1897static bool isDivRemLibcallAvailable(SDNode *Node, bool isSigned, 1898 const TargetLowering &TLI) { 1899 RTLIB::Libcall LC; 1900 switch (Node->getValueType(0).getSimpleVT().SimpleTy) { 1901 default: llvm_unreachable("Unexpected request for libcall!"); 1902 case MVT::i8: LC= isSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8; break; 1903 case MVT::i16: LC= isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break; 1904 case MVT::i32: LC= isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break; 1905 case MVT::i64: LC= isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break; 1906 case MVT::i128: LC= isSigned ? RTLIB::SDIVREM_I128:RTLIB::UDIVREM_I128; break; 1907 } 1908 1909 return TLI.getLibcallName(LC) != 0; 1910} 1911 1912/// UseDivRem - Only issue divrem libcall if both quotient and remainder are 1913/// needed. 1914static bool UseDivRem(SDNode *Node, bool isSigned, bool isDIV) { 1915 unsigned OtherOpcode = 0; 1916 if (isSigned) 1917 OtherOpcode = isDIV ? ISD::SREM : ISD::SDIV; 1918 else 1919 OtherOpcode = isDIV ? ISD::UREM : ISD::UDIV; 1920 1921 SDValue Op0 = Node->getOperand(0); 1922 SDValue Op1 = Node->getOperand(1); 1923 for (SDNode::use_iterator UI = Op0.getNode()->use_begin(), 1924 UE = Op0.getNode()->use_end(); UI != UE; ++UI) { 1925 SDNode *User = *UI; 1926 if (User == Node) 1927 continue; 1928 if (User->getOpcode() == OtherOpcode && 1929 User->getOperand(0) == Op0 && 1930 User->getOperand(1) == Op1) 1931 return true; 1932 } 1933 return false; 1934} 1935 1936/// ExpandDivRemLibCall - Issue libcalls to __{u}divmod to compute div / rem 1937/// pairs. 1938void 1939SelectionDAGLegalize::ExpandDivRemLibCall(SDNode *Node, 1940 SmallVectorImpl<SDValue> &Results) { 1941 unsigned Opcode = Node->getOpcode(); 1942 bool isSigned = Opcode == ISD::SDIVREM; 1943 1944 RTLIB::Libcall LC; 1945 switch (Node->getValueType(0).getSimpleVT().SimpleTy) { 1946 default: llvm_unreachable("Unexpected request for libcall!"); 1947 case MVT::i8: LC= isSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8; break; 1948 case MVT::i16: LC= isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break; 1949 case MVT::i32: LC= isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break; 1950 case MVT::i64: LC= isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break; 1951 case MVT::i128: LC= isSigned ? RTLIB::SDIVREM_I128:RTLIB::UDIVREM_I128; break; 1952 } 1953 1954 // The input chain to this libcall is the entry node of the function. 1955 // Legalizing the call will automatically add the previous call to the 1956 // dependence. 1957 SDValue InChain = DAG.getEntryNode(); 1958 1959 EVT RetVT = Node->getValueType(0); 1960 Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext()); 1961 1962 TargetLowering::ArgListTy Args; 1963 TargetLowering::ArgListEntry Entry; 1964 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) { 1965 EVT ArgVT = Node->getOperand(i).getValueType(); 1966 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); 1967 Entry.Node = Node->getOperand(i); Entry.Ty = ArgTy; 1968 Entry.isSExt = isSigned; 1969 Entry.isZExt = !isSigned; 1970 Args.push_back(Entry); 1971 } 1972 1973 // Also pass the return address of the remainder. 1974 SDValue FIPtr = DAG.CreateStackTemporary(RetVT); 1975 Entry.Node = FIPtr; 1976 Entry.Ty = RetTy->getPointerTo(); 1977 Entry.isSExt = isSigned; 1978 Entry.isZExt = !isSigned; 1979 Args.push_back(Entry); 1980 1981 SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC), 1982 TLI.getPointerTy()); 1983 1984 DebugLoc dl = Node->getDebugLoc(); 1985 std::pair<SDValue, SDValue> CallInfo = 1986 TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false, 1987 0, TLI.getLibcallCallingConv(LC), /*isTailCall=*/false, 1988 /*doesNotReturn=*/false, /*isReturnValueUsed=*/true, 1989 Callee, Args, DAG, dl); 1990 1991 // Remainder is loaded back from the stack frame. 1992 SDValue Rem = DAG.getLoad(RetVT, dl, CallInfo.second, FIPtr, 1993 MachinePointerInfo(), false, false, false, 0); 1994 Results.push_back(CallInfo.first); 1995 Results.push_back(Rem); 1996} 1997 1998/// ExpandLegalINT_TO_FP - This function is responsible for legalizing a 1999/// INT_TO_FP operation of the specified operand when the target requests that 2000/// we expand it. At this point, we know that the result and operand types are 2001/// legal for the target. 2002SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned, 2003 SDValue Op0, 2004 EVT DestVT, 2005 DebugLoc dl) { 2006 if (Op0.getValueType() == MVT::i32) { 2007 // simple 32-bit [signed|unsigned] integer to float/double expansion 2008 2009 // Get the stack frame index of a 8 byte buffer. 2010 SDValue StackSlot = DAG.CreateStackTemporary(MVT::f64); 2011 2012 // word offset constant for Hi/Lo address computation 2013 SDValue WordOff = DAG.getConstant(sizeof(int), TLI.getPointerTy()); 2014 // set up Hi and Lo (into buffer) address based on endian 2015 SDValue Hi = StackSlot; 2016 SDValue Lo = DAG.getNode(ISD::ADD, dl, 2017 TLI.getPointerTy(), StackSlot, WordOff); 2018 if (TLI.isLittleEndian()) 2019 std::swap(Hi, Lo); 2020 2021 // if signed map to unsigned space 2022 SDValue Op0Mapped; 2023 if (isSigned) { 2024 // constant used to invert sign bit (signed to unsigned mapping) 2025 SDValue SignBit = DAG.getConstant(0x80000000u, MVT::i32); 2026 Op0Mapped = DAG.getNode(ISD::XOR, dl, MVT::i32, Op0, SignBit); 2027 } else { 2028 Op0Mapped = Op0; 2029 } 2030 // store the lo of the constructed double - based on integer input 2031 SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, 2032 Op0Mapped, Lo, MachinePointerInfo(), 2033 false, false, 0); 2034 // initial hi portion of constructed double 2035 SDValue InitialHi = DAG.getConstant(0x43300000u, MVT::i32); 2036 // store the hi of the constructed double - biased exponent 2037 SDValue Store2 = DAG.getStore(Store1, dl, InitialHi, Hi, 2038 MachinePointerInfo(), 2039 false, false, 0); 2040 // load the constructed double 2041 SDValue Load = DAG.getLoad(MVT::f64, dl, Store2, StackSlot, 2042 MachinePointerInfo(), false, false, false, 0); 2043 // FP constant to bias correct the final result 2044 SDValue Bias = DAG.getConstantFP(isSigned ? 2045 BitsToDouble(0x4330000080000000ULL) : 2046 BitsToDouble(0x4330000000000000ULL), 2047 MVT::f64); 2048 // subtract the bias 2049 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Load, Bias); 2050 // final result 2051 SDValue Result; 2052 // handle final rounding 2053 if (DestVT == MVT::f64) { 2054 // do nothing 2055 Result = Sub; 2056 } else if (DestVT.bitsLT(MVT::f64)) { 2057 Result = DAG.getNode(ISD::FP_ROUND, dl, DestVT, Sub, 2058 DAG.getIntPtrConstant(0)); 2059 } else if (DestVT.bitsGT(MVT::f64)) { 2060 Result = DAG.getNode(ISD::FP_EXTEND, dl, DestVT, Sub); 2061 } 2062 return Result; 2063 } 2064 assert(!isSigned && "Legalize cannot Expand SINT_TO_FP for i64 yet"); 2065 // Code below here assumes !isSigned without checking again. 2066 2067 // Implementation of unsigned i64 to f64 following the algorithm in 2068 // __floatundidf in compiler_rt. This implementation has the advantage 2069 // of performing rounding correctly, both in the default rounding mode 2070 // and in all alternate rounding modes. 2071 // TODO: Generalize this for use with other types. 2072 if (Op0.getValueType() == MVT::i64 && DestVT == MVT::f64) { 2073 SDValue TwoP52 = 2074 DAG.getConstant(UINT64_C(0x4330000000000000), MVT::i64); 2075 SDValue TwoP84PlusTwoP52 = 2076 DAG.getConstantFP(BitsToDouble(UINT64_C(0x4530000000100000)), MVT::f64); 2077 SDValue TwoP84 = 2078 DAG.getConstant(UINT64_C(0x4530000000000000), MVT::i64); 2079 2080 SDValue Lo = DAG.getZeroExtendInReg(Op0, dl, MVT::i32); 2081 SDValue Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Op0, 2082 DAG.getConstant(32, MVT::i64)); 2083 SDValue LoOr = DAG.getNode(ISD::OR, dl, MVT::i64, Lo, TwoP52); 2084 SDValue HiOr = DAG.getNode(ISD::OR, dl, MVT::i64, Hi, TwoP84); 2085 SDValue LoFlt = DAG.getNode(ISD::BITCAST, dl, MVT::f64, LoOr); 2086 SDValue HiFlt = DAG.getNode(ISD::BITCAST, dl, MVT::f64, HiOr); 2087 SDValue HiSub = DAG.getNode(ISD::FSUB, dl, MVT::f64, HiFlt, 2088 TwoP84PlusTwoP52); 2089 return DAG.getNode(ISD::FADD, dl, MVT::f64, LoFlt, HiSub); 2090 } 2091 2092 // Implementation of unsigned i64 to f32. 2093 // TODO: Generalize this for use with other types. 2094 if (Op0.getValueType() == MVT::i64 && DestVT == MVT::f32) { 2095 // For unsigned conversions, convert them to signed conversions using the 2096 // algorithm from the x86_64 __floatundidf in compiler_rt. 2097 if (!isSigned) { 2098 SDValue Fast = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, Op0); 2099 2100 SDValue ShiftConst = 2101 DAG.getConstant(1, TLI.getShiftAmountTy(Op0.getValueType())); 2102 SDValue Shr = DAG.getNode(ISD::SRL, dl, MVT::i64, Op0, ShiftConst); 2103 SDValue AndConst = DAG.getConstant(1, MVT::i64); 2104 SDValue And = DAG.getNode(ISD::AND, dl, MVT::i64, Op0, AndConst); 2105 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::i64, And, Shr); 2106 2107 SDValue SignCvt = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, Or); 2108 SDValue Slow = DAG.getNode(ISD::FADD, dl, MVT::f32, SignCvt, SignCvt); 2109 2110 // TODO: This really should be implemented using a branch rather than a 2111 // select. We happen to get lucky and machinesink does the right 2112 // thing most of the time. This would be a good candidate for a 2113 //pseudo-op, or, even better, for whole-function isel. 2114 SDValue SignBitTest = DAG.getSetCC(dl, TLI.getSetCCResultType(MVT::i64), 2115 Op0, DAG.getConstant(0, MVT::i64), ISD::SETLT); 2116 return DAG.getNode(ISD::SELECT, dl, MVT::f32, SignBitTest, Slow, Fast); 2117 } 2118 2119 // Otherwise, implement the fully general conversion. 2120 2121 SDValue And = DAG.getNode(ISD::AND, dl, MVT::i64, Op0, 2122 DAG.getConstant(UINT64_C(0xfffffffffffff800), MVT::i64)); 2123 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::i64, And, 2124 DAG.getConstant(UINT64_C(0x800), MVT::i64)); 2125 SDValue And2 = DAG.getNode(ISD::AND, dl, MVT::i64, Op0, 2126 DAG.getConstant(UINT64_C(0x7ff), MVT::i64)); 2127 SDValue Ne = DAG.getSetCC(dl, TLI.getSetCCResultType(MVT::i64), 2128 And2, DAG.getConstant(UINT64_C(0), MVT::i64), ISD::SETNE); 2129 SDValue Sel = DAG.getNode(ISD::SELECT, dl, MVT::i64, Ne, Or, Op0); 2130 SDValue Ge = DAG.getSetCC(dl, TLI.getSetCCResultType(MVT::i64), 2131 Op0, DAG.getConstant(UINT64_C(0x0020000000000000), MVT::i64), 2132 ISD::SETUGE); 2133 SDValue Sel2 = DAG.getNode(ISD::SELECT, dl, MVT::i64, Ge, Sel, Op0); 2134 EVT SHVT = TLI.getShiftAmountTy(Sel2.getValueType()); 2135 2136 SDValue Sh = DAG.getNode(ISD::SRL, dl, MVT::i64, Sel2, 2137 DAG.getConstant(32, SHVT)); 2138 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Sh); 2139 SDValue Fcvt = DAG.getNode(ISD::UINT_TO_FP, dl, MVT::f64, Trunc); 2140 SDValue TwoP32 = 2141 DAG.getConstantFP(BitsToDouble(UINT64_C(0x41f0000000000000)), MVT::f64); 2142 SDValue Fmul = DAG.getNode(ISD::FMUL, dl, MVT::f64, TwoP32, Fcvt); 2143 SDValue Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Sel2); 2144 SDValue Fcvt2 = DAG.getNode(ISD::UINT_TO_FP, dl, MVT::f64, Lo); 2145 SDValue Fadd = DAG.getNode(ISD::FADD, dl, MVT::f64, Fmul, Fcvt2); 2146 return DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, Fadd, 2147 DAG.getIntPtrConstant(0)); 2148 } 2149 2150 SDValue Tmp1 = DAG.getNode(ISD::SINT_TO_FP, dl, DestVT, Op0); 2151 2152 SDValue SignSet = DAG.getSetCC(dl, TLI.getSetCCResultType(Op0.getValueType()), 2153 Op0, DAG.getConstant(0, Op0.getValueType()), 2154 ISD::SETLT); 2155 SDValue Zero = DAG.getIntPtrConstant(0), Four = DAG.getIntPtrConstant(4); 2156 SDValue CstOffset = DAG.getNode(ISD::SELECT, dl, Zero.getValueType(), 2157 SignSet, Four, Zero); 2158 2159 // If the sign bit of the integer is set, the large number will be treated 2160 // as a negative number. To counteract this, the dynamic code adds an 2161 // offset depending on the data type. 2162 uint64_t FF; 2163 switch (Op0.getValueType().getSimpleVT().SimpleTy) { 2164 default: llvm_unreachable("Unsupported integer type!"); 2165 case MVT::i8 : FF = 0x43800000ULL; break; // 2^8 (as a float) 2166 case MVT::i16: FF = 0x47800000ULL; break; // 2^16 (as a float) 2167 case MVT::i32: FF = 0x4F800000ULL; break; // 2^32 (as a float) 2168 case MVT::i64: FF = 0x5F800000ULL; break; // 2^64 (as a float) 2169 } 2170 if (TLI.isLittleEndian()) FF <<= 32; 2171 Constant *FudgeFactor = ConstantInt::get( 2172 Type::getInt64Ty(*DAG.getContext()), FF); 2173 2174 SDValue CPIdx = DAG.getConstantPool(FudgeFactor, TLI.getPointerTy()); 2175 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment(); 2176 CPIdx = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), CPIdx, CstOffset); 2177 Alignment = std::min(Alignment, 4u); 2178 SDValue FudgeInReg; 2179 if (DestVT == MVT::f32) 2180 FudgeInReg = DAG.getLoad(MVT::f32, dl, DAG.getEntryNode(), CPIdx, 2181 MachinePointerInfo::getConstantPool(), 2182 false, false, false, Alignment); 2183 else { 2184 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, DestVT, 2185 DAG.getEntryNode(), CPIdx, 2186 MachinePointerInfo::getConstantPool(), 2187 MVT::f32, false, false, Alignment); 2188 HandleSDNode Handle(Load); 2189 LegalizeOp(Load.getNode()); 2190 FudgeInReg = Handle.getValue(); 2191 } 2192 2193 return DAG.getNode(ISD::FADD, dl, DestVT, Tmp1, FudgeInReg); 2194} 2195 2196/// PromoteLegalINT_TO_FP - This function is responsible for legalizing a 2197/// *INT_TO_FP operation of the specified operand when the target requests that 2198/// we promote it. At this point, we know that the result and operand types are 2199/// legal for the target, and that there is a legal UINT_TO_FP or SINT_TO_FP 2200/// operation that takes a larger input. 2201SDValue SelectionDAGLegalize::PromoteLegalINT_TO_FP(SDValue LegalOp, 2202 EVT DestVT, 2203 bool isSigned, 2204 DebugLoc dl) { 2205 // First step, figure out the appropriate *INT_TO_FP operation to use. 2206 EVT NewInTy = LegalOp.getValueType(); 2207 2208 unsigned OpToUse = 0; 2209 2210 // Scan for the appropriate larger type to use. 2211 while (1) { 2212 NewInTy = (MVT::SimpleValueType)(NewInTy.getSimpleVT().SimpleTy+1); 2213 assert(NewInTy.isInteger() && "Ran out of possibilities!"); 2214 2215 // If the target supports SINT_TO_FP of this type, use it. 2216 if (TLI.isOperationLegalOrCustom(ISD::SINT_TO_FP, NewInTy)) { 2217 OpToUse = ISD::SINT_TO_FP; 2218 break; 2219 } 2220 if (isSigned) continue; 2221 2222 // If the target supports UINT_TO_FP of this type, use it. 2223 if (TLI.isOperationLegalOrCustom(ISD::UINT_TO_FP, NewInTy)) { 2224 OpToUse = ISD::UINT_TO_FP; 2225 break; 2226 } 2227 2228 // Otherwise, try a larger type. 2229 } 2230 2231 // Okay, we found the operation and type to use. Zero extend our input to the 2232 // desired type then run the operation on it. 2233 return DAG.getNode(OpToUse, dl, DestVT, 2234 DAG.getNode(isSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, 2235 dl, NewInTy, LegalOp)); 2236} 2237 2238/// PromoteLegalFP_TO_INT - This function is responsible for legalizing a 2239/// FP_TO_*INT operation of the specified operand when the target requests that 2240/// we promote it. At this point, we know that the result and operand types are 2241/// legal for the target, and that there is a legal FP_TO_UINT or FP_TO_SINT 2242/// operation that returns a larger result. 2243SDValue SelectionDAGLegalize::PromoteLegalFP_TO_INT(SDValue LegalOp, 2244 EVT DestVT, 2245 bool isSigned, 2246 DebugLoc dl) { 2247 // First step, figure out the appropriate FP_TO*INT operation to use. 2248 EVT NewOutTy = DestVT; 2249 2250 unsigned OpToUse = 0; 2251 2252 // Scan for the appropriate larger type to use. 2253 while (1) { 2254 NewOutTy = (MVT::SimpleValueType)(NewOutTy.getSimpleVT().SimpleTy+1); 2255 assert(NewOutTy.isInteger() && "Ran out of possibilities!"); 2256 2257 if (TLI.isOperationLegalOrCustom(ISD::FP_TO_SINT, NewOutTy)) { 2258 OpToUse = ISD::FP_TO_SINT; 2259 break; 2260 } 2261 2262 if (TLI.isOperationLegalOrCustom(ISD::FP_TO_UINT, NewOutTy)) { 2263 OpToUse = ISD::FP_TO_UINT; 2264 break; 2265 } 2266 2267 // Otherwise, try a larger type. 2268 } 2269 2270 2271 // Okay, we found the operation and type to use. 2272 SDValue Operation = DAG.getNode(OpToUse, dl, NewOutTy, LegalOp); 2273 2274 // Truncate the result of the extended FP_TO_*INT operation to the desired 2275 // size. 2276 return DAG.getNode(ISD::TRUNCATE, dl, DestVT, Operation); 2277} 2278 2279/// ExpandBSWAP - Open code the operations for BSWAP of the specified operation. 2280/// 2281SDValue SelectionDAGLegalize::ExpandBSWAP(SDValue Op, DebugLoc dl) { 2282 EVT VT = Op.getValueType(); 2283 EVT SHVT = TLI.getShiftAmountTy(VT); 2284 SDValue Tmp1, Tmp2, Tmp3, Tmp4, Tmp5, Tmp6, Tmp7, Tmp8; 2285 switch (VT.getSimpleVT().SimpleTy) { 2286 default: llvm_unreachable("Unhandled Expand type in BSWAP!"); 2287 case MVT::i16: 2288 Tmp2 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(8, SHVT)); 2289 Tmp1 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(8, SHVT)); 2290 return DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 2291 case MVT::i32: 2292 Tmp4 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(24, SHVT)); 2293 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(8, SHVT)); 2294 Tmp2 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(8, SHVT)); 2295 Tmp1 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(24, SHVT)); 2296 Tmp3 = DAG.getNode(ISD::AND, dl, VT, Tmp3, DAG.getConstant(0xFF0000, VT)); 2297 Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, DAG.getConstant(0xFF00, VT)); 2298 Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp3); 2299 Tmp2 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp1); 2300 return DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp2); 2301 case MVT::i64: 2302 Tmp8 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(56, SHVT)); 2303 Tmp7 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(40, SHVT)); 2304 Tmp6 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(24, SHVT)); 2305 Tmp5 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(8, SHVT)); 2306 Tmp4 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(8, SHVT)); 2307 Tmp3 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(24, SHVT)); 2308 Tmp2 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(40, SHVT)); 2309 Tmp1 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(56, SHVT)); 2310 Tmp7 = DAG.getNode(ISD::AND, dl, VT, Tmp7, DAG.getConstant(255ULL<<48, VT)); 2311 Tmp6 = DAG.getNode(ISD::AND, dl, VT, Tmp6, DAG.getConstant(255ULL<<40, VT)); 2312 Tmp5 = DAG.getNode(ISD::AND, dl, VT, Tmp5, DAG.getConstant(255ULL<<32, VT)); 2313 Tmp4 = DAG.getNode(ISD::AND, dl, VT, Tmp4, DAG.getConstant(255ULL<<24, VT)); 2314 Tmp3 = DAG.getNode(ISD::AND, dl, VT, Tmp3, DAG.getConstant(255ULL<<16, VT)); 2315 Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, DAG.getConstant(255ULL<<8 , VT)); 2316 Tmp8 = DAG.getNode(ISD::OR, dl, VT, Tmp8, Tmp7); 2317 Tmp6 = DAG.getNode(ISD::OR, dl, VT, Tmp6, Tmp5); 2318 Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp3); 2319 Tmp2 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp1); 2320 Tmp8 = DAG.getNode(ISD::OR, dl, VT, Tmp8, Tmp6); 2321 Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp2); 2322 return DAG.getNode(ISD::OR, dl, VT, Tmp8, Tmp4); 2323 } 2324} 2325 2326/// SplatByte - Distribute ByteVal over NumBits bits. 2327// FIXME: Move this helper to a common place. 2328static APInt SplatByte(unsigned NumBits, uint8_t ByteVal) { 2329 APInt Val = APInt(NumBits, ByteVal); 2330 unsigned Shift = 8; 2331 for (unsigned i = NumBits; i > 8; i >>= 1) { 2332 Val = (Val << Shift) | Val; 2333 Shift <<= 1; 2334 } 2335 return Val; 2336} 2337 2338/// ExpandBitCount - Expand the specified bitcount instruction into operations. 2339/// 2340SDValue SelectionDAGLegalize::ExpandBitCount(unsigned Opc, SDValue Op, 2341 DebugLoc dl) { 2342 switch (Opc) { 2343 default: llvm_unreachable("Cannot expand this yet!"); 2344 case ISD::CTPOP: { 2345 EVT VT = Op.getValueType(); 2346 EVT ShVT = TLI.getShiftAmountTy(VT); 2347 unsigned Len = VT.getSizeInBits(); 2348 2349 assert(VT.isInteger() && Len <= 128 && Len % 8 == 0 && 2350 "CTPOP not implemented for this type."); 2351 2352 // This is the "best" algorithm from 2353 // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel 2354 2355 SDValue Mask55 = DAG.getConstant(SplatByte(Len, 0x55), VT); 2356 SDValue Mask33 = DAG.getConstant(SplatByte(Len, 0x33), VT); 2357 SDValue Mask0F = DAG.getConstant(SplatByte(Len, 0x0F), VT); 2358 SDValue Mask01 = DAG.getConstant(SplatByte(Len, 0x01), VT); 2359 2360 // v = v - ((v >> 1) & 0x55555555...) 2361 Op = DAG.getNode(ISD::SUB, dl, VT, Op, 2362 DAG.getNode(ISD::AND, dl, VT, 2363 DAG.getNode(ISD::SRL, dl, VT, Op, 2364 DAG.getConstant(1, ShVT)), 2365 Mask55)); 2366 // v = (v & 0x33333333...) + ((v >> 2) & 0x33333333...) 2367 Op = DAG.getNode(ISD::ADD, dl, VT, 2368 DAG.getNode(ISD::AND, dl, VT, Op, Mask33), 2369 DAG.getNode(ISD::AND, dl, VT, 2370 DAG.getNode(ISD::SRL, dl, VT, Op, 2371 DAG.getConstant(2, ShVT)), 2372 Mask33)); 2373 // v = (v + (v >> 4)) & 0x0F0F0F0F... 2374 Op = DAG.getNode(ISD::AND, dl, VT, 2375 DAG.getNode(ISD::ADD, dl, VT, Op, 2376 DAG.getNode(ISD::SRL, dl, VT, Op, 2377 DAG.getConstant(4, ShVT))), 2378 Mask0F); 2379 // v = (v * 0x01010101...) >> (Len - 8) 2380 Op = DAG.getNode(ISD::SRL, dl, VT, 2381 DAG.getNode(ISD::MUL, dl, VT, Op, Mask01), 2382 DAG.getConstant(Len - 8, ShVT)); 2383 2384 return Op; 2385 } 2386 case ISD::CTLZ_ZERO_UNDEF: 2387 // This trivially expands to CTLZ. 2388 return DAG.getNode(ISD::CTLZ, dl, Op.getValueType(), Op); 2389 case ISD::CTLZ: { 2390 // for now, we do this: 2391 // x = x | (x >> 1); 2392 // x = x | (x >> 2); 2393 // ... 2394 // x = x | (x >>16); 2395 // x = x | (x >>32); // for 64-bit input 2396 // return popcount(~x); 2397 // 2398 // but see also: http://www.hackersdelight.org/HDcode/nlz.cc 2399 EVT VT = Op.getValueType(); 2400 EVT ShVT = TLI.getShiftAmountTy(VT); 2401 unsigned len = VT.getSizeInBits(); 2402 for (unsigned i = 0; (1U << i) <= (len / 2); ++i) { 2403 SDValue Tmp3 = DAG.getConstant(1ULL << i, ShVT); 2404 Op = DAG.getNode(ISD::OR, dl, VT, Op, 2405 DAG.getNode(ISD::SRL, dl, VT, Op, Tmp3)); 2406 } 2407 Op = DAG.getNOT(dl, Op, VT); 2408 return DAG.getNode(ISD::CTPOP, dl, VT, Op); 2409 } 2410 case ISD::CTTZ_ZERO_UNDEF: 2411 // This trivially expands to CTTZ. 2412 return DAG.getNode(ISD::CTTZ, dl, Op.getValueType(), Op); 2413 case ISD::CTTZ: { 2414 // for now, we use: { return popcount(~x & (x - 1)); } 2415 // unless the target has ctlz but not ctpop, in which case we use: 2416 // { return 32 - nlz(~x & (x-1)); } 2417 // see also http://www.hackersdelight.org/HDcode/ntz.cc 2418 EVT VT = Op.getValueType(); 2419 SDValue Tmp3 = DAG.getNode(ISD::AND, dl, VT, 2420 DAG.getNOT(dl, Op, VT), 2421 DAG.getNode(ISD::SUB, dl, VT, Op, 2422 DAG.getConstant(1, VT))); 2423 // If ISD::CTLZ is legal and CTPOP isn't, then do that instead. 2424 if (!TLI.isOperationLegalOrCustom(ISD::CTPOP, VT) && 2425 TLI.isOperationLegalOrCustom(ISD::CTLZ, VT)) 2426 return DAG.getNode(ISD::SUB, dl, VT, 2427 DAG.getConstant(VT.getSizeInBits(), VT), 2428 DAG.getNode(ISD::CTLZ, dl, VT, Tmp3)); 2429 return DAG.getNode(ISD::CTPOP, dl, VT, Tmp3); 2430 } 2431 } 2432} 2433 2434std::pair <SDValue, SDValue> SelectionDAGLegalize::ExpandAtomic(SDNode *Node) { 2435 unsigned Opc = Node->getOpcode(); 2436 MVT VT = cast<AtomicSDNode>(Node)->getMemoryVT().getSimpleVT(); 2437 RTLIB::Libcall LC; 2438 2439 switch (Opc) { 2440 default: 2441 llvm_unreachable("Unhandled atomic intrinsic Expand!"); 2442 case ISD::ATOMIC_SWAP: 2443 switch (VT.SimpleTy) { 2444 default: llvm_unreachable("Unexpected value type for atomic!"); 2445 case MVT::i8: LC = RTLIB::SYNC_LOCK_TEST_AND_SET_1; break; 2446 case MVT::i16: LC = RTLIB::SYNC_LOCK_TEST_AND_SET_2; break; 2447 case MVT::i32: LC = RTLIB::SYNC_LOCK_TEST_AND_SET_4; break; 2448 case MVT::i64: LC = RTLIB::SYNC_LOCK_TEST_AND_SET_8; break; 2449 } 2450 break; 2451 case ISD::ATOMIC_CMP_SWAP: 2452 switch (VT.SimpleTy) { 2453 default: llvm_unreachable("Unexpected value type for atomic!"); 2454 case MVT::i8: LC = RTLIB::SYNC_VAL_COMPARE_AND_SWAP_1; break; 2455 case MVT::i16: LC = RTLIB::SYNC_VAL_COMPARE_AND_SWAP_2; break; 2456 case MVT::i32: LC = RTLIB::SYNC_VAL_COMPARE_AND_SWAP_4; break; 2457 case MVT::i64: LC = RTLIB::SYNC_VAL_COMPARE_AND_SWAP_8; break; 2458 } 2459 break; 2460 case ISD::ATOMIC_LOAD_ADD: 2461 switch (VT.SimpleTy) { 2462 default: llvm_unreachable("Unexpected value type for atomic!"); 2463 case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_ADD_1; break; 2464 case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_ADD_2; break; 2465 case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_ADD_4; break; 2466 case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_ADD_8; break; 2467 } 2468 break; 2469 case ISD::ATOMIC_LOAD_SUB: 2470 switch (VT.SimpleTy) { 2471 default: llvm_unreachable("Unexpected value type for atomic!"); 2472 case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_SUB_1; break; 2473 case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_SUB_2; break; 2474 case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_SUB_4; break; 2475 case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_SUB_8; break; 2476 } 2477 break; 2478 case ISD::ATOMIC_LOAD_AND: 2479 switch (VT.SimpleTy) { 2480 default: llvm_unreachable("Unexpected value type for atomic!"); 2481 case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_AND_1; break; 2482 case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_AND_2; break; 2483 case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_AND_4; break; 2484 case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_AND_8; break; 2485 } 2486 break; 2487 case ISD::ATOMIC_LOAD_OR: 2488 switch (VT.SimpleTy) { 2489 default: llvm_unreachable("Unexpected value type for atomic!"); 2490 case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_OR_1; break; 2491 case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_OR_2; break; 2492 case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_OR_4; break; 2493 case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_OR_8; break; 2494 } 2495 break; 2496 case ISD::ATOMIC_LOAD_XOR: 2497 switch (VT.SimpleTy) { 2498 default: llvm_unreachable("Unexpected value type for atomic!"); 2499 case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_XOR_1; break; 2500 case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_XOR_2; break; 2501 case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_XOR_4; break; 2502 case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_XOR_8; break; 2503 } 2504 break; 2505 case ISD::ATOMIC_LOAD_NAND: 2506 switch (VT.SimpleTy) { 2507 default: llvm_unreachable("Unexpected value type for atomic!"); 2508 case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_NAND_1; break; 2509 case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_NAND_2; break; 2510 case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_NAND_4; break; 2511 case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_NAND_8; break; 2512 } 2513 break; 2514 } 2515 2516 return ExpandChainLibCall(LC, Node, false); 2517} 2518 2519void SelectionDAGLegalize::ExpandNode(SDNode *Node) { 2520 SmallVector<SDValue, 8> Results; 2521 DebugLoc dl = Node->getDebugLoc(); 2522 SDValue Tmp1, Tmp2, Tmp3, Tmp4; 2523 switch (Node->getOpcode()) { 2524 case ISD::CTPOP: 2525 case ISD::CTLZ: 2526 case ISD::CTLZ_ZERO_UNDEF: 2527 case ISD::CTTZ: 2528 case ISD::CTTZ_ZERO_UNDEF: 2529 Tmp1 = ExpandBitCount(Node->getOpcode(), Node->getOperand(0), dl); 2530 Results.push_back(Tmp1); 2531 break; 2532 case ISD::BSWAP: 2533 Results.push_back(ExpandBSWAP(Node->getOperand(0), dl)); 2534 break; 2535 case ISD::FRAMEADDR: 2536 case ISD::RETURNADDR: 2537 case ISD::FRAME_TO_ARGS_OFFSET: 2538 Results.push_back(DAG.getConstant(0, Node->getValueType(0))); 2539 break; 2540 case ISD::FLT_ROUNDS_: 2541 Results.push_back(DAG.getConstant(1, Node->getValueType(0))); 2542 break; 2543 case ISD::EH_RETURN: 2544 case ISD::EH_LABEL: 2545 case ISD::PREFETCH: 2546 case ISD::VAEND: 2547 case ISD::EH_SJLJ_LONGJMP: 2548 // If the target didn't expand these, there's nothing to do, so just 2549 // preserve the chain and be done. 2550 Results.push_back(Node->getOperand(0)); 2551 break; 2552 case ISD::EH_SJLJ_SETJMP: 2553 // If the target didn't expand this, just return 'zero' and preserve the 2554 // chain. 2555 Results.push_back(DAG.getConstant(0, MVT::i32)); 2556 Results.push_back(Node->getOperand(0)); 2557 break; 2558 case ISD::ATOMIC_FENCE: 2559 case ISD::MEMBARRIER: { 2560 // If the target didn't lower this, lower it to '__sync_synchronize()' call 2561 // FIXME: handle "fence singlethread" more efficiently. 2562 TargetLowering::ArgListTy Args; 2563 std::pair<SDValue, SDValue> CallResult = 2564 TLI.LowerCallTo(Node->getOperand(0), Type::getVoidTy(*DAG.getContext()), 2565 false, false, false, false, 0, CallingConv::C, 2566 /*isTailCall=*/false, 2567 /*doesNotReturn=*/false, /*isReturnValueUsed=*/true, 2568 DAG.getExternalSymbol("__sync_synchronize", 2569 TLI.getPointerTy()), 2570 Args, DAG, dl); 2571 Results.push_back(CallResult.second); 2572 break; 2573 } 2574 case ISD::ATOMIC_LOAD: { 2575 // There is no libcall for atomic load; fake it with ATOMIC_CMP_SWAP. 2576 SDValue Zero = DAG.getConstant(0, Node->getValueType(0)); 2577 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_CMP_SWAP, dl, 2578 cast<AtomicSDNode>(Node)->getMemoryVT(), 2579 Node->getOperand(0), 2580 Node->getOperand(1), Zero, Zero, 2581 cast<AtomicSDNode>(Node)->getMemOperand(), 2582 cast<AtomicSDNode>(Node)->getOrdering(), 2583 cast<AtomicSDNode>(Node)->getSynchScope()); 2584 Results.push_back(Swap.getValue(0)); 2585 Results.push_back(Swap.getValue(1)); 2586 break; 2587 } 2588 case ISD::ATOMIC_STORE: { 2589 // There is no libcall for atomic store; fake it with ATOMIC_SWAP. 2590 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl, 2591 cast<AtomicSDNode>(Node)->getMemoryVT(), 2592 Node->getOperand(0), 2593 Node->getOperand(1), Node->getOperand(2), 2594 cast<AtomicSDNode>(Node)->getMemOperand(), 2595 cast<AtomicSDNode>(Node)->getOrdering(), 2596 cast<AtomicSDNode>(Node)->getSynchScope()); 2597 Results.push_back(Swap.getValue(1)); 2598 break; 2599 } 2600 // By default, atomic intrinsics are marked Legal and lowered. Targets 2601 // which don't support them directly, however, may want libcalls, in which 2602 // case they mark them Expand, and we get here. 2603 case ISD::ATOMIC_SWAP: 2604 case ISD::ATOMIC_LOAD_ADD: 2605 case ISD::ATOMIC_LOAD_SUB: 2606 case ISD::ATOMIC_LOAD_AND: 2607 case ISD::ATOMIC_LOAD_OR: 2608 case ISD::ATOMIC_LOAD_XOR: 2609 case ISD::ATOMIC_LOAD_NAND: 2610 case ISD::ATOMIC_LOAD_MIN: 2611 case ISD::ATOMIC_LOAD_MAX: 2612 case ISD::ATOMIC_LOAD_UMIN: 2613 case ISD::ATOMIC_LOAD_UMAX: 2614 case ISD::ATOMIC_CMP_SWAP: { 2615 std::pair<SDValue, SDValue> Tmp = ExpandAtomic(Node); 2616 Results.push_back(Tmp.first); 2617 Results.push_back(Tmp.second); 2618 break; 2619 } 2620 case ISD::DYNAMIC_STACKALLOC: 2621 ExpandDYNAMIC_STACKALLOC(Node, Results); 2622 break; 2623 case ISD::MERGE_VALUES: 2624 for (unsigned i = 0; i < Node->getNumValues(); i++) 2625 Results.push_back(Node->getOperand(i)); 2626 break; 2627 case ISD::UNDEF: { 2628 EVT VT = Node->getValueType(0); 2629 if (VT.isInteger()) 2630 Results.push_back(DAG.getConstant(0, VT)); 2631 else { 2632 assert(VT.isFloatingPoint() && "Unknown value type!"); 2633 Results.push_back(DAG.getConstantFP(0, VT)); 2634 } 2635 break; 2636 } 2637 case ISD::TRAP: { 2638 // If this operation is not supported, lower it to 'abort()' call 2639 TargetLowering::ArgListTy Args; 2640 std::pair<SDValue, SDValue> CallResult = 2641 TLI.LowerCallTo(Node->getOperand(0), Type::getVoidTy(*DAG.getContext()), 2642 false, false, false, false, 0, CallingConv::C, 2643 /*isTailCall=*/false, 2644 /*doesNotReturn=*/false, /*isReturnValueUsed=*/true, 2645 DAG.getExternalSymbol("abort", TLI.getPointerTy()), 2646 Args, DAG, dl); 2647 Results.push_back(CallResult.second); 2648 break; 2649 } 2650 case ISD::FP_ROUND: 2651 case ISD::BITCAST: 2652 Tmp1 = EmitStackConvert(Node->getOperand(0), Node->getValueType(0), 2653 Node->getValueType(0), dl); 2654 Results.push_back(Tmp1); 2655 break; 2656 case ISD::FP_EXTEND: 2657 Tmp1 = EmitStackConvert(Node->getOperand(0), 2658 Node->getOperand(0).getValueType(), 2659 Node->getValueType(0), dl); 2660 Results.push_back(Tmp1); 2661 break; 2662 case ISD::SIGN_EXTEND_INREG: { 2663 // NOTE: we could fall back on load/store here too for targets without 2664 // SAR. However, it is doubtful that any exist. 2665 EVT ExtraVT = cast<VTSDNode>(Node->getOperand(1))->getVT(); 2666 EVT VT = Node->getValueType(0); 2667 EVT ShiftAmountTy = TLI.getShiftAmountTy(VT); 2668 if (VT.isVector()) 2669 ShiftAmountTy = VT; 2670 unsigned BitsDiff = VT.getScalarType().getSizeInBits() - 2671 ExtraVT.getScalarType().getSizeInBits(); 2672 SDValue ShiftCst = DAG.getConstant(BitsDiff, ShiftAmountTy); 2673 Tmp1 = DAG.getNode(ISD::SHL, dl, Node->getValueType(0), 2674 Node->getOperand(0), ShiftCst); 2675 Tmp1 = DAG.getNode(ISD::SRA, dl, Node->getValueType(0), Tmp1, ShiftCst); 2676 Results.push_back(Tmp1); 2677 break; 2678 } 2679 case ISD::FP_ROUND_INREG: { 2680 // The only way we can lower this is to turn it into a TRUNCSTORE, 2681 // EXTLOAD pair, targeting a temporary location (a stack slot). 2682 2683 // NOTE: there is a choice here between constantly creating new stack 2684 // slots and always reusing the same one. We currently always create 2685 // new ones, as reuse may inhibit scheduling. 2686 EVT ExtraVT = cast<VTSDNode>(Node->getOperand(1))->getVT(); 2687 Tmp1 = EmitStackConvert(Node->getOperand(0), ExtraVT, 2688 Node->getValueType(0), dl); 2689 Results.push_back(Tmp1); 2690 break; 2691 } 2692 case ISD::SINT_TO_FP: 2693 case ISD::UINT_TO_FP: 2694 Tmp1 = ExpandLegalINT_TO_FP(Node->getOpcode() == ISD::SINT_TO_FP, 2695 Node->getOperand(0), Node->getValueType(0), dl); 2696 Results.push_back(Tmp1); 2697 break; 2698 case ISD::FP_TO_UINT: { 2699 SDValue True, False; 2700 EVT VT = Node->getOperand(0).getValueType(); 2701 EVT NVT = Node->getValueType(0); 2702 APFloat apf(APInt::getNullValue(VT.getSizeInBits())); 2703 APInt x = APInt::getSignBit(NVT.getSizeInBits()); 2704 (void)apf.convertFromAPInt(x, false, APFloat::rmNearestTiesToEven); 2705 Tmp1 = DAG.getConstantFP(apf, VT); 2706 Tmp2 = DAG.getSetCC(dl, TLI.getSetCCResultType(VT), 2707 Node->getOperand(0), 2708 Tmp1, ISD::SETLT); 2709 True = DAG.getNode(ISD::FP_TO_SINT, dl, NVT, Node->getOperand(0)); 2710 False = DAG.getNode(ISD::FP_TO_SINT, dl, NVT, 2711 DAG.getNode(ISD::FSUB, dl, VT, 2712 Node->getOperand(0), Tmp1)); 2713 False = DAG.getNode(ISD::XOR, dl, NVT, False, 2714 DAG.getConstant(x, NVT)); 2715 Tmp1 = DAG.getNode(ISD::SELECT, dl, NVT, Tmp2, True, False); 2716 Results.push_back(Tmp1); 2717 break; 2718 } 2719 case ISD::VAARG: { 2720 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 2721 EVT VT = Node->getValueType(0); 2722 Tmp1 = Node->getOperand(0); 2723 Tmp2 = Node->getOperand(1); 2724 unsigned Align = Node->getConstantOperandVal(3); 2725 2726 SDValue VAListLoad = DAG.getLoad(TLI.getPointerTy(), dl, Tmp1, Tmp2, 2727 MachinePointerInfo(V), 2728 false, false, false, 0); 2729 SDValue VAList = VAListLoad; 2730 2731 if (Align > TLI.getMinStackArgumentAlignment()) { 2732 assert(((Align & (Align-1)) == 0) && "Expected Align to be a power of 2"); 2733 2734 VAList = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), VAList, 2735 DAG.getConstant(Align - 1, 2736 TLI.getPointerTy())); 2737 2738 VAList = DAG.getNode(ISD::AND, dl, TLI.getPointerTy(), VAList, 2739 DAG.getConstant(-(int64_t)Align, 2740 TLI.getPointerTy())); 2741 } 2742 2743 // Increment the pointer, VAList, to the next vaarg 2744 Tmp3 = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), VAList, 2745 DAG.getConstant(TLI.getTargetData()-> 2746 getTypeAllocSize(VT.getTypeForEVT(*DAG.getContext())), 2747 TLI.getPointerTy())); 2748 // Store the incremented VAList to the legalized pointer 2749 Tmp3 = DAG.getStore(VAListLoad.getValue(1), dl, Tmp3, Tmp2, 2750 MachinePointerInfo(V), false, false, 0); 2751 // Load the actual argument out of the pointer VAList 2752 Results.push_back(DAG.getLoad(VT, dl, Tmp3, VAList, MachinePointerInfo(), 2753 false, false, false, 0)); 2754 Results.push_back(Results[0].getValue(1)); 2755 break; 2756 } 2757 case ISD::VACOPY: { 2758 // This defaults to loading a pointer from the input and storing it to the 2759 // output, returning the chain. 2760 const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue(); 2761 const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue(); 2762 Tmp1 = DAG.getLoad(TLI.getPointerTy(), dl, Node->getOperand(0), 2763 Node->getOperand(2), MachinePointerInfo(VS), 2764 false, false, false, 0); 2765 Tmp1 = DAG.getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1), 2766 MachinePointerInfo(VD), false, false, 0); 2767 Results.push_back(Tmp1); 2768 break; 2769 } 2770 case ISD::EXTRACT_VECTOR_ELT: 2771 if (Node->getOperand(0).getValueType().getVectorNumElements() == 1) 2772 // This must be an access of the only element. Return it. 2773 Tmp1 = DAG.getNode(ISD::BITCAST, dl, Node->getValueType(0), 2774 Node->getOperand(0)); 2775 else 2776 Tmp1 = ExpandExtractFromVectorThroughStack(SDValue(Node, 0)); 2777 Results.push_back(Tmp1); 2778 break; 2779 case ISD::EXTRACT_SUBVECTOR: 2780 Results.push_back(ExpandExtractFromVectorThroughStack(SDValue(Node, 0))); 2781 break; 2782 case ISD::INSERT_SUBVECTOR: 2783 Results.push_back(ExpandInsertToVectorThroughStack(SDValue(Node, 0))); 2784 break; 2785 case ISD::CONCAT_VECTORS: { 2786 Results.push_back(ExpandVectorBuildThroughStack(Node)); 2787 break; 2788 } 2789 case ISD::SCALAR_TO_VECTOR: 2790 Results.push_back(ExpandSCALAR_TO_VECTOR(Node)); 2791 break; 2792 case ISD::INSERT_VECTOR_ELT: 2793 Results.push_back(ExpandINSERT_VECTOR_ELT(Node->getOperand(0), 2794 Node->getOperand(1), 2795 Node->getOperand(2), dl)); 2796 break; 2797 case ISD::VECTOR_SHUFFLE: { 2798 SmallVector<int, 32> NewMask; 2799 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Node)->getMask(); 2800 2801 EVT VT = Node->getValueType(0); 2802 EVT EltVT = VT.getVectorElementType(); 2803 SDValue Op0 = Node->getOperand(0); 2804 SDValue Op1 = Node->getOperand(1); 2805 if (!TLI.isTypeLegal(EltVT)) { 2806 2807 EVT NewEltVT = TLI.getTypeToTransformTo(*DAG.getContext(), EltVT); 2808 2809 // BUILD_VECTOR operands are allowed to be wider than the element type. 2810 // But if NewEltVT is smaller that EltVT the BUILD_VECTOR does not accept it 2811 if (NewEltVT.bitsLT(EltVT)) { 2812 2813 // Convert shuffle node. 2814 // If original node was v4i64 and the new EltVT is i32, 2815 // cast operands to v8i32 and re-build the mask. 2816 2817 // Calculate new VT, the size of the new VT should be equal to original. 2818 EVT NewVT = EVT::getVectorVT(*DAG.getContext(), NewEltVT, 2819 VT.getSizeInBits()/NewEltVT.getSizeInBits()); 2820 assert(NewVT.bitsEq(VT)); 2821 2822 // cast operands to new VT 2823 Op0 = DAG.getNode(ISD::BITCAST, dl, NewVT, Op0); 2824 Op1 = DAG.getNode(ISD::BITCAST, dl, NewVT, Op1); 2825 2826 // Convert the shuffle mask 2827 unsigned int factor = NewVT.getVectorNumElements()/VT.getVectorNumElements(); 2828 2829 // EltVT gets smaller 2830 assert(factor > 0); 2831 2832 for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) { 2833 if (Mask[i] < 0) { 2834 for (unsigned fi = 0; fi < factor; ++fi) 2835 NewMask.push_back(Mask[i]); 2836 } 2837 else { 2838 for (unsigned fi = 0; fi < factor; ++fi) 2839 NewMask.push_back(Mask[i]*factor+fi); 2840 } 2841 } 2842 Mask = NewMask; 2843 VT = NewVT; 2844 } 2845 EltVT = NewEltVT; 2846 } 2847 unsigned NumElems = VT.getVectorNumElements(); 2848 SmallVector<SDValue, 16> Ops; 2849 for (unsigned i = 0; i != NumElems; ++i) { 2850 if (Mask[i] < 0) { 2851 Ops.push_back(DAG.getUNDEF(EltVT)); 2852 continue; 2853 } 2854 unsigned Idx = Mask[i]; 2855 if (Idx < NumElems) 2856 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, 2857 Op0, 2858 DAG.getIntPtrConstant(Idx))); 2859 else 2860 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, 2861 Op1, 2862 DAG.getIntPtrConstant(Idx - NumElems))); 2863 } 2864 2865 Tmp1 = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &Ops[0], Ops.size()); 2866 // We may have changed the BUILD_VECTOR type. Cast it back to the Node type. 2867 Tmp1 = DAG.getNode(ISD::BITCAST, dl, Node->getValueType(0), Tmp1); 2868 Results.push_back(Tmp1); 2869 break; 2870 } 2871 case ISD::EXTRACT_ELEMENT: { 2872 EVT OpTy = Node->getOperand(0).getValueType(); 2873 if (cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue()) { 2874 // 1 -> Hi 2875 Tmp1 = DAG.getNode(ISD::SRL, dl, OpTy, Node->getOperand(0), 2876 DAG.getConstant(OpTy.getSizeInBits()/2, 2877 TLI.getShiftAmountTy(Node->getOperand(0).getValueType()))); 2878 Tmp1 = DAG.getNode(ISD::TRUNCATE, dl, Node->getValueType(0), Tmp1); 2879 } else { 2880 // 0 -> Lo 2881 Tmp1 = DAG.getNode(ISD::TRUNCATE, dl, Node->getValueType(0), 2882 Node->getOperand(0)); 2883 } 2884 Results.push_back(Tmp1); 2885 break; 2886 } 2887 case ISD::STACKSAVE: 2888 // Expand to CopyFromReg if the target set 2889 // StackPointerRegisterToSaveRestore. 2890 if (unsigned SP = TLI.getStackPointerRegisterToSaveRestore()) { 2891 Results.push_back(DAG.getCopyFromReg(Node->getOperand(0), dl, SP, 2892 Node->getValueType(0))); 2893 Results.push_back(Results[0].getValue(1)); 2894 } else { 2895 Results.push_back(DAG.getUNDEF(Node->getValueType(0))); 2896 Results.push_back(Node->getOperand(0)); 2897 } 2898 break; 2899 case ISD::STACKRESTORE: 2900 // Expand to CopyToReg if the target set 2901 // StackPointerRegisterToSaveRestore. 2902 if (unsigned SP = TLI.getStackPointerRegisterToSaveRestore()) { 2903 Results.push_back(DAG.getCopyToReg(Node->getOperand(0), dl, SP, 2904 Node->getOperand(1))); 2905 } else { 2906 Results.push_back(Node->getOperand(0)); 2907 } 2908 break; 2909 case ISD::FCOPYSIGN: 2910 Results.push_back(ExpandFCOPYSIGN(Node)); 2911 break; 2912 case ISD::FNEG: 2913 // Expand Y = FNEG(X) -> Y = SUB -0.0, X 2914 Tmp1 = DAG.getConstantFP(-0.0, Node->getValueType(0)); 2915 Tmp1 = DAG.getNode(ISD::FSUB, dl, Node->getValueType(0), Tmp1, 2916 Node->getOperand(0)); 2917 Results.push_back(Tmp1); 2918 break; 2919 case ISD::FABS: { 2920 // Expand Y = FABS(X) -> Y = (X >u 0.0) ? X : fneg(X). 2921 EVT VT = Node->getValueType(0); 2922 Tmp1 = Node->getOperand(0); 2923 Tmp2 = DAG.getConstantFP(0.0, VT); 2924 Tmp2 = DAG.getSetCC(dl, TLI.getSetCCResultType(Tmp1.getValueType()), 2925 Tmp1, Tmp2, ISD::SETUGT); 2926 Tmp3 = DAG.getNode(ISD::FNEG, dl, VT, Tmp1); 2927 Tmp1 = DAG.getNode(ISD::SELECT, dl, VT, Tmp2, Tmp1, Tmp3); 2928 Results.push_back(Tmp1); 2929 break; 2930 } 2931 case ISD::FSQRT: 2932 Results.push_back(ExpandFPLibCall(Node, RTLIB::SQRT_F32, RTLIB::SQRT_F64, 2933 RTLIB::SQRT_F80, RTLIB::SQRT_PPCF128)); 2934 break; 2935 case ISD::FSIN: 2936 Results.push_back(ExpandFPLibCall(Node, RTLIB::SIN_F32, RTLIB::SIN_F64, 2937 RTLIB::SIN_F80, RTLIB::SIN_PPCF128)); 2938 break; 2939 case ISD::FCOS: 2940 Results.push_back(ExpandFPLibCall(Node, RTLIB::COS_F32, RTLIB::COS_F64, 2941 RTLIB::COS_F80, RTLIB::COS_PPCF128)); 2942 break; 2943 case ISD::FLOG: 2944 Results.push_back(ExpandFPLibCall(Node, RTLIB::LOG_F32, RTLIB::LOG_F64, 2945 RTLIB::LOG_F80, RTLIB::LOG_PPCF128)); 2946 break; 2947 case ISD::FLOG2: 2948 Results.push_back(ExpandFPLibCall(Node, RTLIB::LOG2_F32, RTLIB::LOG2_F64, 2949 RTLIB::LOG2_F80, RTLIB::LOG2_PPCF128)); 2950 break; 2951 case ISD::FLOG10: 2952 Results.push_back(ExpandFPLibCall(Node, RTLIB::LOG10_F32, RTLIB::LOG10_F64, 2953 RTLIB::LOG10_F80, RTLIB::LOG10_PPCF128)); 2954 break; 2955 case ISD::FEXP: 2956 Results.push_back(ExpandFPLibCall(Node, RTLIB::EXP_F32, RTLIB::EXP_F64, 2957 RTLIB::EXP_F80, RTLIB::EXP_PPCF128)); 2958 break; 2959 case ISD::FEXP2: 2960 Results.push_back(ExpandFPLibCall(Node, RTLIB::EXP2_F32, RTLIB::EXP2_F64, 2961 RTLIB::EXP2_F80, RTLIB::EXP2_PPCF128)); 2962 break; 2963 case ISD::FTRUNC: 2964 Results.push_back(ExpandFPLibCall(Node, RTLIB::TRUNC_F32, RTLIB::TRUNC_F64, 2965 RTLIB::TRUNC_F80, RTLIB::TRUNC_PPCF128)); 2966 break; 2967 case ISD::FFLOOR: 2968 Results.push_back(ExpandFPLibCall(Node, RTLIB::FLOOR_F32, RTLIB::FLOOR_F64, 2969 RTLIB::FLOOR_F80, RTLIB::FLOOR_PPCF128)); 2970 break; 2971 case ISD::FCEIL: 2972 Results.push_back(ExpandFPLibCall(Node, RTLIB::CEIL_F32, RTLIB::CEIL_F64, 2973 RTLIB::CEIL_F80, RTLIB::CEIL_PPCF128)); 2974 break; 2975 case ISD::FRINT: 2976 Results.push_back(ExpandFPLibCall(Node, RTLIB::RINT_F32, RTLIB::RINT_F64, 2977 RTLIB::RINT_F80, RTLIB::RINT_PPCF128)); 2978 break; 2979 case ISD::FNEARBYINT: 2980 Results.push_back(ExpandFPLibCall(Node, RTLIB::NEARBYINT_F32, 2981 RTLIB::NEARBYINT_F64, 2982 RTLIB::NEARBYINT_F80, 2983 RTLIB::NEARBYINT_PPCF128)); 2984 break; 2985 case ISD::FPOWI: 2986 Results.push_back(ExpandFPLibCall(Node, RTLIB::POWI_F32, RTLIB::POWI_F64, 2987 RTLIB::POWI_F80, RTLIB::POWI_PPCF128)); 2988 break; 2989 case ISD::FPOW: 2990 Results.push_back(ExpandFPLibCall(Node, RTLIB::POW_F32, RTLIB::POW_F64, 2991 RTLIB::POW_F80, RTLIB::POW_PPCF128)); 2992 break; 2993 case ISD::FDIV: 2994 Results.push_back(ExpandFPLibCall(Node, RTLIB::DIV_F32, RTLIB::DIV_F64, 2995 RTLIB::DIV_F80, RTLIB::DIV_PPCF128)); 2996 break; 2997 case ISD::FREM: 2998 Results.push_back(ExpandFPLibCall(Node, RTLIB::REM_F32, RTLIB::REM_F64, 2999 RTLIB::REM_F80, RTLIB::REM_PPCF128)); 3000 break; 3001 case ISD::FMA: 3002 Results.push_back(ExpandFPLibCall(Node, RTLIB::FMA_F32, RTLIB::FMA_F64, 3003 RTLIB::FMA_F80, RTLIB::FMA_PPCF128)); 3004 break; 3005 case ISD::FP16_TO_FP32: 3006 Results.push_back(ExpandLibCall(RTLIB::FPEXT_F16_F32, Node, false)); 3007 break; 3008 case ISD::FP32_TO_FP16: 3009 Results.push_back(ExpandLibCall(RTLIB::FPROUND_F32_F16, Node, false)); 3010 break; 3011 case ISD::ConstantFP: { 3012 ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Node); 3013 // Check to see if this FP immediate is already legal. 3014 // If this is a legal constant, turn it into a TargetConstantFP node. 3015 if (!TLI.isFPImmLegal(CFP->getValueAPF(), Node->getValueType(0))) 3016 Results.push_back(ExpandConstantFP(CFP, true)); 3017 break; 3018 } 3019 case ISD::EHSELECTION: { 3020 unsigned Reg = TLI.getExceptionSelectorRegister(); 3021 assert(Reg && "Can't expand to unknown register!"); 3022 Results.push_back(DAG.getCopyFromReg(Node->getOperand(1), dl, Reg, 3023 Node->getValueType(0))); 3024 Results.push_back(Results[0].getValue(1)); 3025 break; 3026 } 3027 case ISD::EXCEPTIONADDR: { 3028 unsigned Reg = TLI.getExceptionPointerRegister(); 3029 assert(Reg && "Can't expand to unknown register!"); 3030 Results.push_back(DAG.getCopyFromReg(Node->getOperand(0), dl, Reg, 3031 Node->getValueType(0))); 3032 Results.push_back(Results[0].getValue(1)); 3033 break; 3034 } 3035 case ISD::SUB: { 3036 EVT VT = Node->getValueType(0); 3037 assert(TLI.isOperationLegalOrCustom(ISD::ADD, VT) && 3038 TLI.isOperationLegalOrCustom(ISD::XOR, VT) && 3039 "Don't know how to expand this subtraction!"); 3040 Tmp1 = DAG.getNode(ISD::XOR, dl, VT, Node->getOperand(1), 3041 DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), VT)); 3042 Tmp1 = DAG.getNode(ISD::ADD, dl, VT, Tmp2, DAG.getConstant(1, VT)); 3043 Results.push_back(DAG.getNode(ISD::ADD, dl, VT, Node->getOperand(0), Tmp1)); 3044 break; 3045 } 3046 case ISD::UREM: 3047 case ISD::SREM: { 3048 EVT VT = Node->getValueType(0); 3049 SDVTList VTs = DAG.getVTList(VT, VT); 3050 bool isSigned = Node->getOpcode() == ISD::SREM; 3051 unsigned DivOpc = isSigned ? ISD::SDIV : ISD::UDIV; 3052 unsigned DivRemOpc = isSigned ? ISD::SDIVREM : ISD::UDIVREM; 3053 Tmp2 = Node->getOperand(0); 3054 Tmp3 = Node->getOperand(1); 3055 if (TLI.isOperationLegalOrCustom(DivRemOpc, VT) || 3056 (isDivRemLibcallAvailable(Node, isSigned, TLI) && 3057 UseDivRem(Node, isSigned, false))) { 3058 Tmp1 = DAG.getNode(DivRemOpc, dl, VTs, Tmp2, Tmp3).getValue(1); 3059 } else if (TLI.isOperationLegalOrCustom(DivOpc, VT)) { 3060 // X % Y -> X-X/Y*Y 3061 Tmp1 = DAG.getNode(DivOpc, dl, VT, Tmp2, Tmp3); 3062 Tmp1 = DAG.getNode(ISD::MUL, dl, VT, Tmp1, Tmp3); 3063 Tmp1 = DAG.getNode(ISD::SUB, dl, VT, Tmp2, Tmp1); 3064 } else if (isSigned) 3065 Tmp1 = ExpandIntLibCall(Node, true, 3066 RTLIB::SREM_I8, 3067 RTLIB::SREM_I16, RTLIB::SREM_I32, 3068 RTLIB::SREM_I64, RTLIB::SREM_I128); 3069 else 3070 Tmp1 = ExpandIntLibCall(Node, false, 3071 RTLIB::UREM_I8, 3072 RTLIB::UREM_I16, RTLIB::UREM_I32, 3073 RTLIB::UREM_I64, RTLIB::UREM_I128); 3074 Results.push_back(Tmp1); 3075 break; 3076 } 3077 case ISD::UDIV: 3078 case ISD::SDIV: { 3079 bool isSigned = Node->getOpcode() == ISD::SDIV; 3080 unsigned DivRemOpc = isSigned ? ISD::SDIVREM : ISD::UDIVREM; 3081 EVT VT = Node->getValueType(0); 3082 SDVTList VTs = DAG.getVTList(VT, VT); 3083 if (TLI.isOperationLegalOrCustom(DivRemOpc, VT) || 3084 (isDivRemLibcallAvailable(Node, isSigned, TLI) && 3085 UseDivRem(Node, isSigned, true))) 3086 Tmp1 = DAG.getNode(DivRemOpc, dl, VTs, Node->getOperand(0), 3087 Node->getOperand(1)); 3088 else if (isSigned) 3089 Tmp1 = ExpandIntLibCall(Node, true, 3090 RTLIB::SDIV_I8, 3091 RTLIB::SDIV_I16, RTLIB::SDIV_I32, 3092 RTLIB::SDIV_I64, RTLIB::SDIV_I128); 3093 else 3094 Tmp1 = ExpandIntLibCall(Node, false, 3095 RTLIB::UDIV_I8, 3096 RTLIB::UDIV_I16, RTLIB::UDIV_I32, 3097 RTLIB::UDIV_I64, RTLIB::UDIV_I128); 3098 Results.push_back(Tmp1); 3099 break; 3100 } 3101 case ISD::MULHU: 3102 case ISD::MULHS: { 3103 unsigned ExpandOpcode = Node->getOpcode() == ISD::MULHU ? ISD::UMUL_LOHI : 3104 ISD::SMUL_LOHI; 3105 EVT VT = Node->getValueType(0); 3106 SDVTList VTs = DAG.getVTList(VT, VT); 3107 assert(TLI.isOperationLegalOrCustom(ExpandOpcode, VT) && 3108 "If this wasn't legal, it shouldn't have been created!"); 3109 Tmp1 = DAG.getNode(ExpandOpcode, dl, VTs, Node->getOperand(0), 3110 Node->getOperand(1)); 3111 Results.push_back(Tmp1.getValue(1)); 3112 break; 3113 } 3114 case ISD::SDIVREM: 3115 case ISD::UDIVREM: 3116 // Expand into divrem libcall 3117 ExpandDivRemLibCall(Node, Results); 3118 break; 3119 case ISD::MUL: { 3120 EVT VT = Node->getValueType(0); 3121 SDVTList VTs = DAG.getVTList(VT, VT); 3122 // See if multiply or divide can be lowered using two-result operations. 3123 // We just need the low half of the multiply; try both the signed 3124 // and unsigned forms. If the target supports both SMUL_LOHI and 3125 // UMUL_LOHI, form a preference by checking which forms of plain 3126 // MULH it supports. 3127 bool HasSMUL_LOHI = TLI.isOperationLegalOrCustom(ISD::SMUL_LOHI, VT); 3128 bool HasUMUL_LOHI = TLI.isOperationLegalOrCustom(ISD::UMUL_LOHI, VT); 3129 bool HasMULHS = TLI.isOperationLegalOrCustom(ISD::MULHS, VT); 3130 bool HasMULHU = TLI.isOperationLegalOrCustom(ISD::MULHU, VT); 3131 unsigned OpToUse = 0; 3132 if (HasSMUL_LOHI && !HasMULHS) { 3133 OpToUse = ISD::SMUL_LOHI; 3134 } else if (HasUMUL_LOHI && !HasMULHU) { 3135 OpToUse = ISD::UMUL_LOHI; 3136 } else if (HasSMUL_LOHI) { 3137 OpToUse = ISD::SMUL_LOHI; 3138 } else if (HasUMUL_LOHI) { 3139 OpToUse = ISD::UMUL_LOHI; 3140 } 3141 if (OpToUse) { 3142 Results.push_back(DAG.getNode(OpToUse, dl, VTs, Node->getOperand(0), 3143 Node->getOperand(1))); 3144 break; 3145 } 3146 Tmp1 = ExpandIntLibCall(Node, false, 3147 RTLIB::MUL_I8, 3148 RTLIB::MUL_I16, RTLIB::MUL_I32, 3149 RTLIB::MUL_I64, RTLIB::MUL_I128); 3150 Results.push_back(Tmp1); 3151 break; 3152 } 3153 case ISD::SADDO: 3154 case ISD::SSUBO: { 3155 SDValue LHS = Node->getOperand(0); 3156 SDValue RHS = Node->getOperand(1); 3157 SDValue Sum = DAG.getNode(Node->getOpcode() == ISD::SADDO ? 3158 ISD::ADD : ISD::SUB, dl, LHS.getValueType(), 3159 LHS, RHS); 3160 Results.push_back(Sum); 3161 EVT OType = Node->getValueType(1); 3162 3163 SDValue Zero = DAG.getConstant(0, LHS.getValueType()); 3164 3165 // LHSSign -> LHS >= 0 3166 // RHSSign -> RHS >= 0 3167 // SumSign -> Sum >= 0 3168 // 3169 // Add: 3170 // Overflow -> (LHSSign == RHSSign) && (LHSSign != SumSign) 3171 // Sub: 3172 // Overflow -> (LHSSign != RHSSign) && (LHSSign != SumSign) 3173 // 3174 SDValue LHSSign = DAG.getSetCC(dl, OType, LHS, Zero, ISD::SETGE); 3175 SDValue RHSSign = DAG.getSetCC(dl, OType, RHS, Zero, ISD::SETGE); 3176 SDValue SignsMatch = DAG.getSetCC(dl, OType, LHSSign, RHSSign, 3177 Node->getOpcode() == ISD::SADDO ? 3178 ISD::SETEQ : ISD::SETNE); 3179 3180 SDValue SumSign = DAG.getSetCC(dl, OType, Sum, Zero, ISD::SETGE); 3181 SDValue SumSignNE = DAG.getSetCC(dl, OType, LHSSign, SumSign, ISD::SETNE); 3182 3183 SDValue Cmp = DAG.getNode(ISD::AND, dl, OType, SignsMatch, SumSignNE); 3184 Results.push_back(Cmp); 3185 break; 3186 } 3187 case ISD::UADDO: 3188 case ISD::USUBO: { 3189 SDValue LHS = Node->getOperand(0); 3190 SDValue RHS = Node->getOperand(1); 3191 SDValue Sum = DAG.getNode(Node->getOpcode() == ISD::UADDO ? 3192 ISD::ADD : ISD::SUB, dl, LHS.getValueType(), 3193 LHS, RHS); 3194 Results.push_back(Sum); 3195 Results.push_back(DAG.getSetCC(dl, Node->getValueType(1), Sum, LHS, 3196 Node->getOpcode () == ISD::UADDO ? 3197 ISD::SETULT : ISD::SETUGT)); 3198 break; 3199 } 3200 case ISD::UMULO: 3201 case ISD::SMULO: { 3202 EVT VT = Node->getValueType(0); 3203 EVT WideVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits() * 2); 3204 SDValue LHS = Node->getOperand(0); 3205 SDValue RHS = Node->getOperand(1); 3206 SDValue BottomHalf; 3207 SDValue TopHalf; 3208 static const unsigned Ops[2][3] = 3209 { { ISD::MULHU, ISD::UMUL_LOHI, ISD::ZERO_EXTEND }, 3210 { ISD::MULHS, ISD::SMUL_LOHI, ISD::SIGN_EXTEND }}; 3211 bool isSigned = Node->getOpcode() == ISD::SMULO; 3212 if (TLI.isOperationLegalOrCustom(Ops[isSigned][0], VT)) { 3213 BottomHalf = DAG.getNode(ISD::MUL, dl, VT, LHS, RHS); 3214 TopHalf = DAG.getNode(Ops[isSigned][0], dl, VT, LHS, RHS); 3215 } else if (TLI.isOperationLegalOrCustom(Ops[isSigned][1], VT)) { 3216 BottomHalf = DAG.getNode(Ops[isSigned][1], dl, DAG.getVTList(VT, VT), LHS, 3217 RHS); 3218 TopHalf = BottomHalf.getValue(1); 3219 } else if (TLI.isTypeLegal(EVT::getIntegerVT(*DAG.getContext(), 3220 VT.getSizeInBits() * 2))) { 3221 LHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, LHS); 3222 RHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, RHS); 3223 Tmp1 = DAG.getNode(ISD::MUL, dl, WideVT, LHS, RHS); 3224 BottomHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, Tmp1, 3225 DAG.getIntPtrConstant(0)); 3226 TopHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, Tmp1, 3227 DAG.getIntPtrConstant(1)); 3228 } else { 3229 // We can fall back to a libcall with an illegal type for the MUL if we 3230 // have a libcall big enough. 3231 // Also, we can fall back to a division in some cases, but that's a big 3232 // performance hit in the general case. 3233 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 3234 if (WideVT == MVT::i16) 3235 LC = RTLIB::MUL_I16; 3236 else if (WideVT == MVT::i32) 3237 LC = RTLIB::MUL_I32; 3238 else if (WideVT == MVT::i64) 3239 LC = RTLIB::MUL_I64; 3240 else if (WideVT == MVT::i128) 3241 LC = RTLIB::MUL_I128; 3242 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Cannot expand this operation!"); 3243 3244 // The high part is obtained by SRA'ing all but one of the bits of low 3245 // part. 3246 unsigned LoSize = VT.getSizeInBits(); 3247 SDValue HiLHS = DAG.getNode(ISD::SRA, dl, VT, RHS, 3248 DAG.getConstant(LoSize-1, TLI.getPointerTy())); 3249 SDValue HiRHS = DAG.getNode(ISD::SRA, dl, VT, LHS, 3250 DAG.getConstant(LoSize-1, TLI.getPointerTy())); 3251 3252 // Here we're passing the 2 arguments explicitly as 4 arguments that are 3253 // pre-lowered to the correct types. This all depends upon WideVT not 3254 // being a legal type for the architecture and thus has to be split to 3255 // two arguments. 3256 SDValue Args[] = { LHS, HiLHS, RHS, HiRHS }; 3257 SDValue Ret = ExpandLibCall(LC, WideVT, Args, 4, isSigned, dl); 3258 BottomHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, Ret, 3259 DAG.getIntPtrConstant(0)); 3260 TopHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, Ret, 3261 DAG.getIntPtrConstant(1)); 3262 // Ret is a node with an illegal type. Because such things are not 3263 // generally permitted during this phase of legalization, delete the 3264 // node. The above EXTRACT_ELEMENT nodes should have been folded. 3265 DAG.DeleteNode(Ret.getNode()); 3266 } 3267 3268 if (isSigned) { 3269 Tmp1 = DAG.getConstant(VT.getSizeInBits() - 1, 3270 TLI.getShiftAmountTy(BottomHalf.getValueType())); 3271 Tmp1 = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, Tmp1); 3272 TopHalf = DAG.getSetCC(dl, TLI.getSetCCResultType(VT), TopHalf, Tmp1, 3273 ISD::SETNE); 3274 } else { 3275 TopHalf = DAG.getSetCC(dl, TLI.getSetCCResultType(VT), TopHalf, 3276 DAG.getConstant(0, VT), ISD::SETNE); 3277 } 3278 Results.push_back(BottomHalf); 3279 Results.push_back(TopHalf); 3280 break; 3281 } 3282 case ISD::BUILD_PAIR: { 3283 EVT PairTy = Node->getValueType(0); 3284 Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, dl, PairTy, Node->getOperand(0)); 3285 Tmp2 = DAG.getNode(ISD::ANY_EXTEND, dl, PairTy, Node->getOperand(1)); 3286 Tmp2 = DAG.getNode(ISD::SHL, dl, PairTy, Tmp2, 3287 DAG.getConstant(PairTy.getSizeInBits()/2, 3288 TLI.getShiftAmountTy(PairTy))); 3289 Results.push_back(DAG.getNode(ISD::OR, dl, PairTy, Tmp1, Tmp2)); 3290 break; 3291 } 3292 case ISD::SELECT: 3293 Tmp1 = Node->getOperand(0); 3294 Tmp2 = Node->getOperand(1); 3295 Tmp3 = Node->getOperand(2); 3296 if (Tmp1.getOpcode() == ISD::SETCC) { 3297 Tmp1 = DAG.getSelectCC(dl, Tmp1.getOperand(0), Tmp1.getOperand(1), 3298 Tmp2, Tmp3, 3299 cast<CondCodeSDNode>(Tmp1.getOperand(2))->get()); 3300 } else { 3301 Tmp1 = DAG.getSelectCC(dl, Tmp1, 3302 DAG.getConstant(0, Tmp1.getValueType()), 3303 Tmp2, Tmp3, ISD::SETNE); 3304 } 3305 Results.push_back(Tmp1); 3306 break; 3307 case ISD::BR_JT: { 3308 SDValue Chain = Node->getOperand(0); 3309 SDValue Table = Node->getOperand(1); 3310 SDValue Index = Node->getOperand(2); 3311 3312 EVT PTy = TLI.getPointerTy(); 3313 3314 const TargetData &TD = *TLI.getTargetData(); 3315 unsigned EntrySize = 3316 DAG.getMachineFunction().getJumpTableInfo()->getEntrySize(TD); 3317 3318 Index = DAG.getNode(ISD::MUL, dl, PTy, 3319 Index, DAG.getConstant(EntrySize, PTy)); 3320 SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Index, Table); 3321 3322 EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), EntrySize * 8); 3323 SDValue LD = DAG.getExtLoad(ISD::SEXTLOAD, dl, PTy, Chain, Addr, 3324 MachinePointerInfo::getJumpTable(), MemVT, 3325 false, false, 0); 3326 Addr = LD; 3327 if (TM.getRelocationModel() == Reloc::PIC_) { 3328 // For PIC, the sequence is: 3329 // BRIND(load(Jumptable + index) + RelocBase) 3330 // RelocBase can be JumpTable, GOT or some sort of global base. 3331 Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr, 3332 TLI.getPICJumpTableRelocBase(Table, DAG)); 3333 } 3334 Tmp1 = DAG.getNode(ISD::BRIND, dl, MVT::Other, LD.getValue(1), Addr); 3335 Results.push_back(Tmp1); 3336 break; 3337 } 3338 case ISD::BRCOND: 3339 // Expand brcond's setcc into its constituent parts and create a BR_CC 3340 // Node. 3341 Tmp1 = Node->getOperand(0); 3342 Tmp2 = Node->getOperand(1); 3343 if (Tmp2.getOpcode() == ISD::SETCC) { 3344 Tmp1 = DAG.getNode(ISD::BR_CC, dl, MVT::Other, 3345 Tmp1, Tmp2.getOperand(2), 3346 Tmp2.getOperand(0), Tmp2.getOperand(1), 3347 Node->getOperand(2)); 3348 } else { 3349 // We test only the i1 bit. Skip the AND if UNDEF. 3350 Tmp3 = (Tmp2.getOpcode() == ISD::UNDEF) ? Tmp2 : 3351 DAG.getNode(ISD::AND, dl, Tmp2.getValueType(), Tmp2, 3352 DAG.getConstant(1, Tmp2.getValueType())); 3353 Tmp1 = DAG.getNode(ISD::BR_CC, dl, MVT::Other, Tmp1, 3354 DAG.getCondCode(ISD::SETNE), Tmp3, 3355 DAG.getConstant(0, Tmp3.getValueType()), 3356 Node->getOperand(2)); 3357 } 3358 Results.push_back(Tmp1); 3359 break; 3360 case ISD::SETCC: { 3361 Tmp1 = Node->getOperand(0); 3362 Tmp2 = Node->getOperand(1); 3363 Tmp3 = Node->getOperand(2); 3364 LegalizeSetCCCondCode(Node->getValueType(0), Tmp1, Tmp2, Tmp3, dl); 3365 3366 // If we expanded the SETCC into an AND/OR, return the new node 3367 if (Tmp2.getNode() == 0) { 3368 Results.push_back(Tmp1); 3369 break; 3370 } 3371 3372 // Otherwise, SETCC for the given comparison type must be completely 3373 // illegal; expand it into a SELECT_CC. 3374 EVT VT = Node->getValueType(0); 3375 Tmp1 = DAG.getNode(ISD::SELECT_CC, dl, VT, Tmp1, Tmp2, 3376 DAG.getConstant(1, VT), DAG.getConstant(0, VT), Tmp3); 3377 Results.push_back(Tmp1); 3378 break; 3379 } 3380 case ISD::SELECT_CC: { 3381 Tmp1 = Node->getOperand(0); // LHS 3382 Tmp2 = Node->getOperand(1); // RHS 3383 Tmp3 = Node->getOperand(2); // True 3384 Tmp4 = Node->getOperand(3); // False 3385 SDValue CC = Node->getOperand(4); 3386 3387 LegalizeSetCCCondCode(TLI.getSetCCResultType(Tmp1.getValueType()), 3388 Tmp1, Tmp2, CC, dl); 3389 3390 assert(!Tmp2.getNode() && "Can't legalize SELECT_CC with legal condition!"); 3391 Tmp2 = DAG.getConstant(0, Tmp1.getValueType()); 3392 CC = DAG.getCondCode(ISD::SETNE); 3393 Tmp1 = DAG.getNode(ISD::SELECT_CC, dl, Node->getValueType(0), Tmp1, Tmp2, 3394 Tmp3, Tmp4, CC); 3395 Results.push_back(Tmp1); 3396 break; 3397 } 3398 case ISD::BR_CC: { 3399 Tmp1 = Node->getOperand(0); // Chain 3400 Tmp2 = Node->getOperand(2); // LHS 3401 Tmp3 = Node->getOperand(3); // RHS 3402 Tmp4 = Node->getOperand(1); // CC 3403 3404 LegalizeSetCCCondCode(TLI.getSetCCResultType(Tmp2.getValueType()), 3405 Tmp2, Tmp3, Tmp4, dl); 3406 3407 assert(!Tmp3.getNode() && "Can't legalize BR_CC with legal condition!"); 3408 Tmp3 = DAG.getConstant(0, Tmp2.getValueType()); 3409 Tmp4 = DAG.getCondCode(ISD::SETNE); 3410 Tmp1 = DAG.getNode(ISD::BR_CC, dl, Node->getValueType(0), Tmp1, Tmp4, Tmp2, 3411 Tmp3, Node->getOperand(4)); 3412 Results.push_back(Tmp1); 3413 break; 3414 } 3415 case ISD::BUILD_VECTOR: 3416 Results.push_back(ExpandBUILD_VECTOR(Node)); 3417 break; 3418 case ISD::SRA: 3419 case ISD::SRL: 3420 case ISD::SHL: { 3421 // Scalarize vector SRA/SRL/SHL. 3422 EVT VT = Node->getValueType(0); 3423 assert(VT.isVector() && "Unable to legalize non-vector shift"); 3424 assert(TLI.isTypeLegal(VT.getScalarType())&& "Element type must be legal"); 3425 unsigned NumElem = VT.getVectorNumElements(); 3426 3427 SmallVector<SDValue, 8> Scalars; 3428 for (unsigned Idx = 0; Idx < NumElem; Idx++) { 3429 SDValue Ex = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, 3430 VT.getScalarType(), 3431 Node->getOperand(0), DAG.getIntPtrConstant(Idx)); 3432 SDValue Sh = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, 3433 VT.getScalarType(), 3434 Node->getOperand(1), DAG.getIntPtrConstant(Idx)); 3435 Scalars.push_back(DAG.getNode(Node->getOpcode(), dl, 3436 VT.getScalarType(), Ex, Sh)); 3437 } 3438 SDValue Result = 3439 DAG.getNode(ISD::BUILD_VECTOR, dl, Node->getValueType(0), 3440 &Scalars[0], Scalars.size()); 3441 ReplaceNode(SDValue(Node, 0), Result); 3442 break; 3443 } 3444 case ISD::GLOBAL_OFFSET_TABLE: 3445 case ISD::GlobalAddress: 3446 case ISD::GlobalTLSAddress: 3447 case ISD::ExternalSymbol: 3448 case ISD::ConstantPool: 3449 case ISD::JumpTable: 3450 case ISD::INTRINSIC_W_CHAIN: 3451 case ISD::INTRINSIC_WO_CHAIN: 3452 case ISD::INTRINSIC_VOID: 3453 // FIXME: Custom lowering for these operations shouldn't return null! 3454 break; 3455 } 3456 3457 // Replace the original node with the legalized result. 3458 if (!Results.empty()) 3459 ReplaceNode(Node, Results.data()); 3460} 3461 3462void SelectionDAGLegalize::PromoteNode(SDNode *Node) { 3463 SmallVector<SDValue, 8> Results; 3464 EVT OVT = Node->getValueType(0); 3465 if (Node->getOpcode() == ISD::UINT_TO_FP || 3466 Node->getOpcode() == ISD::SINT_TO_FP || 3467 Node->getOpcode() == ISD::SETCC) { 3468 OVT = Node->getOperand(0).getValueType(); 3469 } 3470 EVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), OVT); 3471 DebugLoc dl = Node->getDebugLoc(); 3472 SDValue Tmp1, Tmp2, Tmp3; 3473 switch (Node->getOpcode()) { 3474 case ISD::CTTZ: 3475 case ISD::CTTZ_ZERO_UNDEF: 3476 case ISD::CTLZ: 3477 case ISD::CTLZ_ZERO_UNDEF: 3478 case ISD::CTPOP: 3479 // Zero extend the argument. 3480 Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, Node->getOperand(0)); 3481 // Perform the larger operation. For CTPOP and CTTZ_ZERO_UNDEF, this is 3482 // already the correct result. 3483 Tmp1 = DAG.getNode(Node->getOpcode(), dl, NVT, Tmp1); 3484 if (Node->getOpcode() == ISD::CTTZ) { 3485 // FIXME: This should set a bit in the zero extended value instead. 3486 Tmp2 = DAG.getSetCC(dl, TLI.getSetCCResultType(NVT), 3487 Tmp1, DAG.getConstant(NVT.getSizeInBits(), NVT), 3488 ISD::SETEQ); 3489 Tmp1 = DAG.getNode(ISD::SELECT, dl, NVT, Tmp2, 3490 DAG.getConstant(OVT.getSizeInBits(), NVT), Tmp1); 3491 } else if (Node->getOpcode() == ISD::CTLZ || 3492 Node->getOpcode() == ISD::CTLZ_ZERO_UNDEF) { 3493 // Tmp1 = Tmp1 - (sizeinbits(NVT) - sizeinbits(Old VT)) 3494 Tmp1 = DAG.getNode(ISD::SUB, dl, NVT, Tmp1, 3495 DAG.getConstant(NVT.getSizeInBits() - 3496 OVT.getSizeInBits(), NVT)); 3497 } 3498 Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, OVT, Tmp1)); 3499 break; 3500 case ISD::BSWAP: { 3501 unsigned DiffBits = NVT.getSizeInBits() - OVT.getSizeInBits(); 3502 Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, Node->getOperand(0)); 3503 Tmp1 = DAG.getNode(ISD::BSWAP, dl, NVT, Tmp1); 3504 Tmp1 = DAG.getNode(ISD::SRL, dl, NVT, Tmp1, 3505 DAG.getConstant(DiffBits, TLI.getShiftAmountTy(NVT))); 3506 Results.push_back(Tmp1); 3507 break; 3508 } 3509 case ISD::FP_TO_UINT: 3510 case ISD::FP_TO_SINT: 3511 Tmp1 = PromoteLegalFP_TO_INT(Node->getOperand(0), Node->getValueType(0), 3512 Node->getOpcode() == ISD::FP_TO_SINT, dl); 3513 Results.push_back(Tmp1); 3514 break; 3515 case ISD::UINT_TO_FP: 3516 case ISD::SINT_TO_FP: 3517 Tmp1 = PromoteLegalINT_TO_FP(Node->getOperand(0), Node->getValueType(0), 3518 Node->getOpcode() == ISD::SINT_TO_FP, dl); 3519 Results.push_back(Tmp1); 3520 break; 3521 case ISD::AND: 3522 case ISD::OR: 3523 case ISD::XOR: { 3524 unsigned ExtOp, TruncOp; 3525 if (OVT.isVector()) { 3526 ExtOp = ISD::BITCAST; 3527 TruncOp = ISD::BITCAST; 3528 } else { 3529 assert(OVT.isInteger() && "Cannot promote logic operation"); 3530 ExtOp = ISD::ANY_EXTEND; 3531 TruncOp = ISD::TRUNCATE; 3532 } 3533 // Promote each of the values to the new type. 3534 Tmp1 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(0)); 3535 Tmp2 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(1)); 3536 // Perform the larger operation, then convert back 3537 Tmp1 = DAG.getNode(Node->getOpcode(), dl, NVT, Tmp1, Tmp2); 3538 Results.push_back(DAG.getNode(TruncOp, dl, OVT, Tmp1)); 3539 break; 3540 } 3541 case ISD::SELECT: { 3542 unsigned ExtOp, TruncOp; 3543 if (Node->getValueType(0).isVector()) { 3544 ExtOp = ISD::BITCAST; 3545 TruncOp = ISD::BITCAST; 3546 } else if (Node->getValueType(0).isInteger()) { 3547 ExtOp = ISD::ANY_EXTEND; 3548 TruncOp = ISD::TRUNCATE; 3549 } else { 3550 ExtOp = ISD::FP_EXTEND; 3551 TruncOp = ISD::FP_ROUND; 3552 } 3553 Tmp1 = Node->getOperand(0); 3554 // Promote each of the values to the new type. 3555 Tmp2 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(1)); 3556 Tmp3 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(2)); 3557 // Perform the larger operation, then round down. 3558 Tmp1 = DAG.getNode(ISD::SELECT, dl, NVT, Tmp1, Tmp2, Tmp3); 3559 if (TruncOp != ISD::FP_ROUND) 3560 Tmp1 = DAG.getNode(TruncOp, dl, Node->getValueType(0), Tmp1); 3561 else 3562 Tmp1 = DAG.getNode(TruncOp, dl, Node->getValueType(0), Tmp1, 3563 DAG.getIntPtrConstant(0)); 3564 Results.push_back(Tmp1); 3565 break; 3566 } 3567 case ISD::VECTOR_SHUFFLE: { 3568 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Node)->getMask(); 3569 3570 // Cast the two input vectors. 3571 Tmp1 = DAG.getNode(ISD::BITCAST, dl, NVT, Node->getOperand(0)); 3572 Tmp2 = DAG.getNode(ISD::BITCAST, dl, NVT, Node->getOperand(1)); 3573 3574 // Convert the shuffle mask to the right # elements. 3575 Tmp1 = ShuffleWithNarrowerEltType(NVT, OVT, dl, Tmp1, Tmp2, Mask); 3576 Tmp1 = DAG.getNode(ISD::BITCAST, dl, OVT, Tmp1); 3577 Results.push_back(Tmp1); 3578 break; 3579 } 3580 case ISD::SETCC: { 3581 unsigned ExtOp = ISD::FP_EXTEND; 3582 if (NVT.isInteger()) { 3583 ISD::CondCode CCCode = 3584 cast<CondCodeSDNode>(Node->getOperand(2))->get(); 3585 ExtOp = isSignedIntSetCC(CCCode) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 3586 } 3587 Tmp1 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(0)); 3588 Tmp2 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(1)); 3589 Results.push_back(DAG.getNode(ISD::SETCC, dl, Node->getValueType(0), 3590 Tmp1, Tmp2, Node->getOperand(2))); 3591 break; 3592 } 3593 case ISD::FPOW: { 3594 Tmp1 = DAG.getNode(ISD::FP_EXTEND, dl, NVT, Node->getOperand(0)); 3595 Tmp2 = DAG.getNode(ISD::FP_EXTEND, dl, NVT, Node->getOperand(1)); 3596 Tmp3 = DAG.getNode(ISD::FPOW, dl, NVT, Tmp1, Tmp2); 3597 Results.push_back(DAG.getNode(ISD::FP_ROUND, dl, OVT, 3598 Tmp3, DAG.getIntPtrConstant(0))); 3599 break; 3600 } 3601 case ISD::FLOG2: 3602 case ISD::FEXP2: 3603 case ISD::FLOG: 3604 case ISD::FEXP: { 3605 Tmp1 = DAG.getNode(ISD::FP_EXTEND, dl, NVT, Node->getOperand(0)); 3606 Tmp2 = DAG.getNode(Node->getOpcode(), dl, NVT, Tmp1); 3607 Results.push_back(DAG.getNode(ISD::FP_ROUND, dl, OVT, 3608 Tmp2, DAG.getIntPtrConstant(0))); 3609 break; 3610 } 3611 } 3612 3613 // Replace the original node with the legalized result. 3614 if (!Results.empty()) 3615 ReplaceNode(Node, Results.data()); 3616} 3617 3618// SelectionDAG::Legalize - This is the entry point for the file. 3619// 3620void SelectionDAG::Legalize() { 3621 /// run - This is the main entry point to this class. 3622 /// 3623 SelectionDAGLegalize(*this).LegalizeDAG(); 3624} 3625