LegalizeDAG.cpp revision d36696c4e0ccd10a91bad2e3383c50347e2ea5ec
1//===-- LegalizeDAG.cpp - Implement SelectionDAG::Legalize ----------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the SelectionDAG::Legalize method. 11// 12//===----------------------------------------------------------------------===// 13 14#include "llvm/CallingConv.h" 15#include "llvm/Constants.h" 16#include "llvm/DebugInfo.h" 17#include "llvm/DerivedTypes.h" 18#include "llvm/LLVMContext.h" 19#include "llvm/CodeGen/Analysis.h" 20#include "llvm/CodeGen/MachineFunction.h" 21#include "llvm/CodeGen/MachineJumpTableInfo.h" 22#include "llvm/CodeGen/SelectionDAG.h" 23#include "llvm/Target/TargetFrameLowering.h" 24#include "llvm/Target/TargetLowering.h" 25#include "llvm/DataLayout.h" 26#include "llvm/Target/TargetMachine.h" 27#include "llvm/Support/Debug.h" 28#include "llvm/Support/ErrorHandling.h" 29#include "llvm/Support/MathExtras.h" 30#include "llvm/Support/raw_ostream.h" 31#include "llvm/ADT/DenseMap.h" 32#include "llvm/ADT/SmallVector.h" 33#include "llvm/ADT/SmallPtrSet.h" 34using namespace llvm; 35 36//===----------------------------------------------------------------------===// 37/// SelectionDAGLegalize - This takes an arbitrary SelectionDAG as input and 38/// hacks on it until the target machine can handle it. This involves 39/// eliminating value sizes the machine cannot handle (promoting small sizes to 40/// large sizes or splitting up large values into small values) as well as 41/// eliminating operations the machine cannot handle. 42/// 43/// This code also does a small amount of optimization and recognition of idioms 44/// as part of its processing. For example, if a target does not support a 45/// 'setcc' instruction efficiently, but does support 'brcc' instruction, this 46/// will attempt merge setcc and brc instructions into brcc's. 47/// 48namespace { 49class SelectionDAGLegalize : public SelectionDAG::DAGUpdateListener { 50 const TargetMachine &TM; 51 const TargetLowering &TLI; 52 SelectionDAG &DAG; 53 54 /// LegalizePosition - The iterator for walking through the node list. 55 SelectionDAG::allnodes_iterator LegalizePosition; 56 57 /// LegalizedNodes - The set of nodes which have already been legalized. 58 SmallPtrSet<SDNode *, 16> LegalizedNodes; 59 60 // Libcall insertion helpers. 61 62public: 63 explicit SelectionDAGLegalize(SelectionDAG &DAG); 64 65 void LegalizeDAG(); 66 67private: 68 /// LegalizeOp - Legalizes the given operation. 69 void LegalizeOp(SDNode *Node); 70 71 SDValue OptimizeFloatStore(StoreSDNode *ST); 72 73 void LegalizeLoadOps(SDNode *Node); 74 void LegalizeStoreOps(SDNode *Node); 75 76 /// PerformInsertVectorEltInMemory - Some target cannot handle a variable 77 /// insertion index for the INSERT_VECTOR_ELT instruction. In this case, it 78 /// is necessary to spill the vector being inserted into to memory, perform 79 /// the insert there, and then read the result back. 80 SDValue PerformInsertVectorEltInMemory(SDValue Vec, SDValue Val, 81 SDValue Idx, DebugLoc dl); 82 SDValue ExpandINSERT_VECTOR_ELT(SDValue Vec, SDValue Val, 83 SDValue Idx, DebugLoc dl); 84 85 /// ShuffleWithNarrowerEltType - Return a vector shuffle operation which 86 /// performs the same shuffe in terms of order or result bytes, but on a type 87 /// whose vector element type is narrower than the original shuffle type. 88 /// e.g. <v4i32> <0, 1, 0, 1> -> v8i16 <0, 1, 2, 3, 0, 1, 2, 3> 89 SDValue ShuffleWithNarrowerEltType(EVT NVT, EVT VT, DebugLoc dl, 90 SDValue N1, SDValue N2, 91 ArrayRef<int> Mask) const; 92 93 void LegalizeSetCCCondCode(EVT VT, SDValue &LHS, SDValue &RHS, SDValue &CC, 94 DebugLoc dl); 95 96 SDValue ExpandLibCall(RTLIB::Libcall LC, SDNode *Node, bool isSigned); 97 SDValue ExpandLibCall(RTLIB::Libcall LC, EVT RetVT, const SDValue *Ops, 98 unsigned NumOps, bool isSigned, DebugLoc dl); 99 100 std::pair<SDValue, SDValue> ExpandChainLibCall(RTLIB::Libcall LC, 101 SDNode *Node, bool isSigned); 102 SDValue ExpandFPLibCall(SDNode *Node, RTLIB::Libcall Call_F32, 103 RTLIB::Libcall Call_F64, RTLIB::Libcall Call_F80, 104 RTLIB::Libcall Call_PPCF128); 105 SDValue ExpandIntLibCall(SDNode *Node, bool isSigned, 106 RTLIB::Libcall Call_I8, 107 RTLIB::Libcall Call_I16, 108 RTLIB::Libcall Call_I32, 109 RTLIB::Libcall Call_I64, 110 RTLIB::Libcall Call_I128); 111 void ExpandDivRemLibCall(SDNode *Node, SmallVectorImpl<SDValue> &Results); 112 113 SDValue EmitStackConvert(SDValue SrcOp, EVT SlotVT, EVT DestVT, DebugLoc dl); 114 SDValue ExpandBUILD_VECTOR(SDNode *Node); 115 SDValue ExpandSCALAR_TO_VECTOR(SDNode *Node); 116 void ExpandDYNAMIC_STACKALLOC(SDNode *Node, 117 SmallVectorImpl<SDValue> &Results); 118 SDValue ExpandFCOPYSIGN(SDNode *Node); 119 SDValue ExpandLegalINT_TO_FP(bool isSigned, SDValue LegalOp, EVT DestVT, 120 DebugLoc dl); 121 SDValue PromoteLegalINT_TO_FP(SDValue LegalOp, EVT DestVT, bool isSigned, 122 DebugLoc dl); 123 SDValue PromoteLegalFP_TO_INT(SDValue LegalOp, EVT DestVT, bool isSigned, 124 DebugLoc dl); 125 126 SDValue ExpandBSWAP(SDValue Op, DebugLoc dl); 127 SDValue ExpandBitCount(unsigned Opc, SDValue Op, DebugLoc dl); 128 129 SDValue ExpandExtractFromVectorThroughStack(SDValue Op); 130 SDValue ExpandInsertToVectorThroughStack(SDValue Op); 131 SDValue ExpandVectorBuildThroughStack(SDNode* Node); 132 133 SDValue ExpandConstantFP(ConstantFPSDNode *CFP, bool UseCP); 134 135 std::pair<SDValue, SDValue> ExpandAtomic(SDNode *Node); 136 137 void ExpandNode(SDNode *Node); 138 void PromoteNode(SDNode *Node); 139 140 void ForgetNode(SDNode *N) { 141 LegalizedNodes.erase(N); 142 if (LegalizePosition == SelectionDAG::allnodes_iterator(N)) 143 ++LegalizePosition; 144 } 145 146public: 147 // DAGUpdateListener implementation. 148 virtual void NodeDeleted(SDNode *N, SDNode *E) { 149 ForgetNode(N); 150 } 151 virtual void NodeUpdated(SDNode *N) {} 152 153 // Node replacement helpers 154 void ReplacedNode(SDNode *N) { 155 if (N->use_empty()) { 156 DAG.RemoveDeadNode(N); 157 } else { 158 ForgetNode(N); 159 } 160 } 161 void ReplaceNode(SDNode *Old, SDNode *New) { 162 DAG.ReplaceAllUsesWith(Old, New); 163 ReplacedNode(Old); 164 } 165 void ReplaceNode(SDValue Old, SDValue New) { 166 DAG.ReplaceAllUsesWith(Old, New); 167 ReplacedNode(Old.getNode()); 168 } 169 void ReplaceNode(SDNode *Old, const SDValue *New) { 170 DAG.ReplaceAllUsesWith(Old, New); 171 ReplacedNode(Old); 172 } 173}; 174} 175 176/// ShuffleWithNarrowerEltType - Return a vector shuffle operation which 177/// performs the same shuffe in terms of order or result bytes, but on a type 178/// whose vector element type is narrower than the original shuffle type. 179/// e.g. <v4i32> <0, 1, 0, 1> -> v8i16 <0, 1, 2, 3, 0, 1, 2, 3> 180SDValue 181SelectionDAGLegalize::ShuffleWithNarrowerEltType(EVT NVT, EVT VT, DebugLoc dl, 182 SDValue N1, SDValue N2, 183 ArrayRef<int> Mask) const { 184 unsigned NumMaskElts = VT.getVectorNumElements(); 185 unsigned NumDestElts = NVT.getVectorNumElements(); 186 unsigned NumEltsGrowth = NumDestElts / NumMaskElts; 187 188 assert(NumEltsGrowth && "Cannot promote to vector type with fewer elts!"); 189 190 if (NumEltsGrowth == 1) 191 return DAG.getVectorShuffle(NVT, dl, N1, N2, &Mask[0]); 192 193 SmallVector<int, 8> NewMask; 194 for (unsigned i = 0; i != NumMaskElts; ++i) { 195 int Idx = Mask[i]; 196 for (unsigned j = 0; j != NumEltsGrowth; ++j) { 197 if (Idx < 0) 198 NewMask.push_back(-1); 199 else 200 NewMask.push_back(Idx * NumEltsGrowth + j); 201 } 202 } 203 assert(NewMask.size() == NumDestElts && "Non-integer NumEltsGrowth?"); 204 assert(TLI.isShuffleMaskLegal(NewMask, NVT) && "Shuffle not legal?"); 205 return DAG.getVectorShuffle(NVT, dl, N1, N2, &NewMask[0]); 206} 207 208SelectionDAGLegalize::SelectionDAGLegalize(SelectionDAG &dag) 209 : SelectionDAG::DAGUpdateListener(dag), 210 TM(dag.getTarget()), TLI(dag.getTargetLoweringInfo()), 211 DAG(dag) { 212} 213 214void SelectionDAGLegalize::LegalizeDAG() { 215 DAG.AssignTopologicalOrder(); 216 217 // Visit all the nodes. We start in topological order, so that we see 218 // nodes with their original operands intact. Legalization can produce 219 // new nodes which may themselves need to be legalized. Iterate until all 220 // nodes have been legalized. 221 for (;;) { 222 bool AnyLegalized = false; 223 for (LegalizePosition = DAG.allnodes_end(); 224 LegalizePosition != DAG.allnodes_begin(); ) { 225 --LegalizePosition; 226 227 SDNode *N = LegalizePosition; 228 if (LegalizedNodes.insert(N)) { 229 AnyLegalized = true; 230 LegalizeOp(N); 231 } 232 } 233 if (!AnyLegalized) 234 break; 235 236 } 237 238 // Remove dead nodes now. 239 DAG.RemoveDeadNodes(); 240} 241 242/// ExpandConstantFP - Expands the ConstantFP node to an integer constant or 243/// a load from the constant pool. 244SDValue 245SelectionDAGLegalize::ExpandConstantFP(ConstantFPSDNode *CFP, bool UseCP) { 246 bool Extend = false; 247 DebugLoc dl = CFP->getDebugLoc(); 248 249 // If a FP immediate is precise when represented as a float and if the 250 // target can do an extending load from float to double, we put it into 251 // the constant pool as a float, even if it's is statically typed as a 252 // double. This shrinks FP constants and canonicalizes them for targets where 253 // an FP extending load is the same cost as a normal load (such as on the x87 254 // fp stack or PPC FP unit). 255 EVT VT = CFP->getValueType(0); 256 ConstantFP *LLVMC = const_cast<ConstantFP*>(CFP->getConstantFPValue()); 257 if (!UseCP) { 258 assert((VT == MVT::f64 || VT == MVT::f32) && "Invalid type expansion"); 259 return DAG.getConstant(LLVMC->getValueAPF().bitcastToAPInt(), 260 (VT == MVT::f64) ? MVT::i64 : MVT::i32); 261 } 262 263 EVT OrigVT = VT; 264 EVT SVT = VT; 265 while (SVT != MVT::f32) { 266 SVT = (MVT::SimpleValueType)(SVT.getSimpleVT().SimpleTy - 1); 267 if (ConstantFPSDNode::isValueValidForType(SVT, CFP->getValueAPF()) && 268 // Only do this if the target has a native EXTLOAD instruction from 269 // smaller type. 270 TLI.isLoadExtLegal(ISD::EXTLOAD, SVT) && 271 TLI.ShouldShrinkFPConstant(OrigVT)) { 272 Type *SType = SVT.getTypeForEVT(*DAG.getContext()); 273 LLVMC = cast<ConstantFP>(ConstantExpr::getFPTrunc(LLVMC, SType)); 274 VT = SVT; 275 Extend = true; 276 } 277 } 278 279 SDValue CPIdx = DAG.getConstantPool(LLVMC, TLI.getPointerTy()); 280 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment(); 281 if (Extend) { 282 SDValue Result = 283 DAG.getExtLoad(ISD::EXTLOAD, dl, OrigVT, 284 DAG.getEntryNode(), 285 CPIdx, MachinePointerInfo::getConstantPool(), 286 VT, false, false, Alignment); 287 return Result; 288 } 289 SDValue Result = 290 DAG.getLoad(OrigVT, dl, DAG.getEntryNode(), CPIdx, 291 MachinePointerInfo::getConstantPool(), false, false, false, 292 Alignment); 293 return Result; 294} 295 296/// ExpandUnalignedStore - Expands an unaligned store to 2 half-size stores. 297static void ExpandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG, 298 const TargetLowering &TLI, 299 SelectionDAGLegalize *DAGLegalize) { 300 assert(ST->getAddressingMode() == ISD::UNINDEXED && 301 "unaligned indexed stores not implemented!"); 302 SDValue Chain = ST->getChain(); 303 SDValue Ptr = ST->getBasePtr(); 304 SDValue Val = ST->getValue(); 305 EVT VT = Val.getValueType(); 306 int Alignment = ST->getAlignment(); 307 DebugLoc dl = ST->getDebugLoc(); 308 if (ST->getMemoryVT().isFloatingPoint() || 309 ST->getMemoryVT().isVector()) { 310 EVT intVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits()); 311 if (TLI.isTypeLegal(intVT)) { 312 // Expand to a bitconvert of the value to the integer type of the 313 // same size, then a (misaligned) int store. 314 // FIXME: Does not handle truncating floating point stores! 315 SDValue Result = DAG.getNode(ISD::BITCAST, dl, intVT, Val); 316 Result = DAG.getStore(Chain, dl, Result, Ptr, ST->getPointerInfo(), 317 ST->isVolatile(), ST->isNonTemporal(), Alignment); 318 DAGLegalize->ReplaceNode(SDValue(ST, 0), Result); 319 return; 320 } 321 // Do a (aligned) store to a stack slot, then copy from the stack slot 322 // to the final destination using (unaligned) integer loads and stores. 323 EVT StoredVT = ST->getMemoryVT(); 324 EVT RegVT = 325 TLI.getRegisterType(*DAG.getContext(), 326 EVT::getIntegerVT(*DAG.getContext(), 327 StoredVT.getSizeInBits())); 328 unsigned StoredBytes = StoredVT.getSizeInBits() / 8; 329 unsigned RegBytes = RegVT.getSizeInBits() / 8; 330 unsigned NumRegs = (StoredBytes + RegBytes - 1) / RegBytes; 331 332 // Make sure the stack slot is also aligned for the register type. 333 SDValue StackPtr = DAG.CreateStackTemporary(StoredVT, RegVT); 334 335 // Perform the original store, only redirected to the stack slot. 336 SDValue Store = DAG.getTruncStore(Chain, dl, 337 Val, StackPtr, MachinePointerInfo(), 338 StoredVT, false, false, 0); 339 SDValue Increment = DAG.getConstant(RegBytes, TLI.getPointerTy()); 340 SmallVector<SDValue, 8> Stores; 341 unsigned Offset = 0; 342 343 // Do all but one copies using the full register width. 344 for (unsigned i = 1; i < NumRegs; i++) { 345 // Load one integer register's worth from the stack slot. 346 SDValue Load = DAG.getLoad(RegVT, dl, Store, StackPtr, 347 MachinePointerInfo(), 348 false, false, false, 0); 349 // Store it to the final location. Remember the store. 350 Stores.push_back(DAG.getStore(Load.getValue(1), dl, Load, Ptr, 351 ST->getPointerInfo().getWithOffset(Offset), 352 ST->isVolatile(), ST->isNonTemporal(), 353 MinAlign(ST->getAlignment(), Offset))); 354 // Increment the pointers. 355 Offset += RegBytes; 356 StackPtr = DAG.getNode(ISD::ADD, dl, StackPtr.getValueType(), StackPtr, 357 Increment); 358 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 359 } 360 361 // The last store may be partial. Do a truncating store. On big-endian 362 // machines this requires an extending load from the stack slot to ensure 363 // that the bits are in the right place. 364 EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), 365 8 * (StoredBytes - Offset)); 366 367 // Load from the stack slot. 368 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Store, StackPtr, 369 MachinePointerInfo(), 370 MemVT, false, false, 0); 371 372 Stores.push_back(DAG.getTruncStore(Load.getValue(1), dl, Load, Ptr, 373 ST->getPointerInfo() 374 .getWithOffset(Offset), 375 MemVT, ST->isVolatile(), 376 ST->isNonTemporal(), 377 MinAlign(ST->getAlignment(), Offset))); 378 // The order of the stores doesn't matter - say it with a TokenFactor. 379 SDValue Result = 380 DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Stores[0], 381 Stores.size()); 382 DAGLegalize->ReplaceNode(SDValue(ST, 0), Result); 383 return; 384 } 385 assert(ST->getMemoryVT().isInteger() && 386 !ST->getMemoryVT().isVector() && 387 "Unaligned store of unknown type."); 388 // Get the half-size VT 389 EVT NewStoredVT = ST->getMemoryVT().getHalfSizedIntegerVT(*DAG.getContext()); 390 int NumBits = NewStoredVT.getSizeInBits(); 391 int IncrementSize = NumBits / 8; 392 393 // Divide the stored value in two parts. 394 SDValue ShiftAmount = DAG.getConstant(NumBits, 395 TLI.getShiftAmountTy(Val.getValueType())); 396 SDValue Lo = Val; 397 SDValue Hi = DAG.getNode(ISD::SRL, dl, VT, Val, ShiftAmount); 398 399 // Store the two parts 400 SDValue Store1, Store2; 401 Store1 = DAG.getTruncStore(Chain, dl, TLI.isLittleEndian()?Lo:Hi, Ptr, 402 ST->getPointerInfo(), NewStoredVT, 403 ST->isVolatile(), ST->isNonTemporal(), Alignment); 404 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, 405 DAG.getConstant(IncrementSize, TLI.getPointerTy())); 406 Alignment = MinAlign(Alignment, IncrementSize); 407 Store2 = DAG.getTruncStore(Chain, dl, TLI.isLittleEndian()?Hi:Lo, Ptr, 408 ST->getPointerInfo().getWithOffset(IncrementSize), 409 NewStoredVT, ST->isVolatile(), ST->isNonTemporal(), 410 Alignment); 411 412 SDValue Result = 413 DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Store1, Store2); 414 DAGLegalize->ReplaceNode(SDValue(ST, 0), Result); 415} 416 417/// ExpandUnalignedLoad - Expands an unaligned load to 2 half-size loads. 418static void 419ExpandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG, 420 const TargetLowering &TLI, 421 SDValue &ValResult, SDValue &ChainResult) { 422 assert(LD->getAddressingMode() == ISD::UNINDEXED && 423 "unaligned indexed loads not implemented!"); 424 SDValue Chain = LD->getChain(); 425 SDValue Ptr = LD->getBasePtr(); 426 EVT VT = LD->getValueType(0); 427 EVT LoadedVT = LD->getMemoryVT(); 428 DebugLoc dl = LD->getDebugLoc(); 429 if (VT.isFloatingPoint() || VT.isVector()) { 430 EVT intVT = EVT::getIntegerVT(*DAG.getContext(), LoadedVT.getSizeInBits()); 431 if (TLI.isTypeLegal(intVT) && TLI.isTypeLegal(LoadedVT)) { 432 // Expand to a (misaligned) integer load of the same size, 433 // then bitconvert to floating point or vector. 434 SDValue newLoad = DAG.getLoad(intVT, dl, Chain, Ptr, LD->getPointerInfo(), 435 LD->isVolatile(), 436 LD->isNonTemporal(), 437 LD->isInvariant(), LD->getAlignment()); 438 SDValue Result = DAG.getNode(ISD::BITCAST, dl, LoadedVT, newLoad); 439 if (LoadedVT != VT) 440 Result = DAG.getNode(VT.isFloatingPoint() ? ISD::FP_EXTEND : 441 ISD::ANY_EXTEND, dl, VT, Result); 442 443 ValResult = Result; 444 ChainResult = Chain; 445 return; 446 } 447 448 // Copy the value to a (aligned) stack slot using (unaligned) integer 449 // loads and stores, then do a (aligned) load from the stack slot. 450 EVT RegVT = TLI.getRegisterType(*DAG.getContext(), intVT); 451 unsigned LoadedBytes = LoadedVT.getSizeInBits() / 8; 452 unsigned RegBytes = RegVT.getSizeInBits() / 8; 453 unsigned NumRegs = (LoadedBytes + RegBytes - 1) / RegBytes; 454 455 // Make sure the stack slot is also aligned for the register type. 456 SDValue StackBase = DAG.CreateStackTemporary(LoadedVT, RegVT); 457 458 SDValue Increment = DAG.getConstant(RegBytes, TLI.getPointerTy()); 459 SmallVector<SDValue, 8> Stores; 460 SDValue StackPtr = StackBase; 461 unsigned Offset = 0; 462 463 // Do all but one copies using the full register width. 464 for (unsigned i = 1; i < NumRegs; i++) { 465 // Load one integer register's worth from the original location. 466 SDValue Load = DAG.getLoad(RegVT, dl, Chain, Ptr, 467 LD->getPointerInfo().getWithOffset(Offset), 468 LD->isVolatile(), LD->isNonTemporal(), 469 LD->isInvariant(), 470 MinAlign(LD->getAlignment(), Offset)); 471 // Follow the load with a store to the stack slot. Remember the store. 472 Stores.push_back(DAG.getStore(Load.getValue(1), dl, Load, StackPtr, 473 MachinePointerInfo(), false, false, 0)); 474 // Increment the pointers. 475 Offset += RegBytes; 476 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 477 StackPtr = DAG.getNode(ISD::ADD, dl, StackPtr.getValueType(), StackPtr, 478 Increment); 479 } 480 481 // The last copy may be partial. Do an extending load. 482 EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), 483 8 * (LoadedBytes - Offset)); 484 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Chain, Ptr, 485 LD->getPointerInfo().getWithOffset(Offset), 486 MemVT, LD->isVolatile(), 487 LD->isNonTemporal(), 488 MinAlign(LD->getAlignment(), Offset)); 489 // Follow the load with a store to the stack slot. Remember the store. 490 // On big-endian machines this requires a truncating store to ensure 491 // that the bits end up in the right place. 492 Stores.push_back(DAG.getTruncStore(Load.getValue(1), dl, Load, StackPtr, 493 MachinePointerInfo(), MemVT, 494 false, false, 0)); 495 496 // The order of the stores doesn't matter - say it with a TokenFactor. 497 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Stores[0], 498 Stores.size()); 499 500 // Finally, perform the original load only redirected to the stack slot. 501 Load = DAG.getExtLoad(LD->getExtensionType(), dl, VT, TF, StackBase, 502 MachinePointerInfo(), LoadedVT, false, false, 0); 503 504 // Callers expect a MERGE_VALUES node. 505 ValResult = Load; 506 ChainResult = TF; 507 return; 508 } 509 assert(LoadedVT.isInteger() && !LoadedVT.isVector() && 510 "Unaligned load of unsupported type."); 511 512 // Compute the new VT that is half the size of the old one. This is an 513 // integer MVT. 514 unsigned NumBits = LoadedVT.getSizeInBits(); 515 EVT NewLoadedVT; 516 NewLoadedVT = EVT::getIntegerVT(*DAG.getContext(), NumBits/2); 517 NumBits >>= 1; 518 519 unsigned Alignment = LD->getAlignment(); 520 unsigned IncrementSize = NumBits / 8; 521 ISD::LoadExtType HiExtType = LD->getExtensionType(); 522 523 // If the original load is NON_EXTLOAD, the hi part load must be ZEXTLOAD. 524 if (HiExtType == ISD::NON_EXTLOAD) 525 HiExtType = ISD::ZEXTLOAD; 526 527 // Load the value in two parts 528 SDValue Lo, Hi; 529 if (TLI.isLittleEndian()) { 530 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, LD->getPointerInfo(), 531 NewLoadedVT, LD->isVolatile(), 532 LD->isNonTemporal(), Alignment); 533 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, 534 DAG.getConstant(IncrementSize, TLI.getPointerTy())); 535 Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, 536 LD->getPointerInfo().getWithOffset(IncrementSize), 537 NewLoadedVT, LD->isVolatile(), 538 LD->isNonTemporal(), MinAlign(Alignment,IncrementSize)); 539 } else { 540 Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, LD->getPointerInfo(), 541 NewLoadedVT, LD->isVolatile(), 542 LD->isNonTemporal(), Alignment); 543 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, 544 DAG.getConstant(IncrementSize, TLI.getPointerTy())); 545 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, 546 LD->getPointerInfo().getWithOffset(IncrementSize), 547 NewLoadedVT, LD->isVolatile(), 548 LD->isNonTemporal(), MinAlign(Alignment,IncrementSize)); 549 } 550 551 // aggregate the two parts 552 SDValue ShiftAmount = DAG.getConstant(NumBits, 553 TLI.getShiftAmountTy(Hi.getValueType())); 554 SDValue Result = DAG.getNode(ISD::SHL, dl, VT, Hi, ShiftAmount); 555 Result = DAG.getNode(ISD::OR, dl, VT, Result, Lo); 556 557 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1), 558 Hi.getValue(1)); 559 560 ValResult = Result; 561 ChainResult = TF; 562} 563 564/// PerformInsertVectorEltInMemory - Some target cannot handle a variable 565/// insertion index for the INSERT_VECTOR_ELT instruction. In this case, it 566/// is necessary to spill the vector being inserted into to memory, perform 567/// the insert there, and then read the result back. 568SDValue SelectionDAGLegalize:: 569PerformInsertVectorEltInMemory(SDValue Vec, SDValue Val, SDValue Idx, 570 DebugLoc dl) { 571 SDValue Tmp1 = Vec; 572 SDValue Tmp2 = Val; 573 SDValue Tmp3 = Idx; 574 575 // If the target doesn't support this, we have to spill the input vector 576 // to a temporary stack slot, update the element, then reload it. This is 577 // badness. We could also load the value into a vector register (either 578 // with a "move to register" or "extload into register" instruction, then 579 // permute it into place, if the idx is a constant and if the idx is 580 // supported by the target. 581 EVT VT = Tmp1.getValueType(); 582 EVT EltVT = VT.getVectorElementType(); 583 EVT IdxVT = Tmp3.getValueType(); 584 EVT PtrVT = TLI.getPointerTy(); 585 SDValue StackPtr = DAG.CreateStackTemporary(VT); 586 587 int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex(); 588 589 // Store the vector. 590 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, Tmp1, StackPtr, 591 MachinePointerInfo::getFixedStack(SPFI), 592 false, false, 0); 593 594 // Truncate or zero extend offset to target pointer type. 595 unsigned CastOpc = IdxVT.bitsGT(PtrVT) ? ISD::TRUNCATE : ISD::ZERO_EXTEND; 596 Tmp3 = DAG.getNode(CastOpc, dl, PtrVT, Tmp3); 597 // Add the offset to the index. 598 unsigned EltSize = EltVT.getSizeInBits()/8; 599 Tmp3 = DAG.getNode(ISD::MUL, dl, IdxVT, Tmp3,DAG.getConstant(EltSize, IdxVT)); 600 SDValue StackPtr2 = DAG.getNode(ISD::ADD, dl, IdxVT, Tmp3, StackPtr); 601 // Store the scalar value. 602 Ch = DAG.getTruncStore(Ch, dl, Tmp2, StackPtr2, MachinePointerInfo(), EltVT, 603 false, false, 0); 604 // Load the updated vector. 605 return DAG.getLoad(VT, dl, Ch, StackPtr, 606 MachinePointerInfo::getFixedStack(SPFI), false, false, 607 false, 0); 608} 609 610 611SDValue SelectionDAGLegalize:: 612ExpandINSERT_VECTOR_ELT(SDValue Vec, SDValue Val, SDValue Idx, DebugLoc dl) { 613 if (ConstantSDNode *InsertPos = dyn_cast<ConstantSDNode>(Idx)) { 614 // SCALAR_TO_VECTOR requires that the type of the value being inserted 615 // match the element type of the vector being created, except for 616 // integers in which case the inserted value can be over width. 617 EVT EltVT = Vec.getValueType().getVectorElementType(); 618 if (Val.getValueType() == EltVT || 619 (EltVT.isInteger() && Val.getValueType().bitsGE(EltVT))) { 620 SDValue ScVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 621 Vec.getValueType(), Val); 622 623 unsigned NumElts = Vec.getValueType().getVectorNumElements(); 624 // We generate a shuffle of InVec and ScVec, so the shuffle mask 625 // should be 0,1,2,3,4,5... with the appropriate element replaced with 626 // elt 0 of the RHS. 627 SmallVector<int, 8> ShufOps; 628 for (unsigned i = 0; i != NumElts; ++i) 629 ShufOps.push_back(i != InsertPos->getZExtValue() ? i : NumElts); 630 631 return DAG.getVectorShuffle(Vec.getValueType(), dl, Vec, ScVec, 632 &ShufOps[0]); 633 } 634 } 635 return PerformInsertVectorEltInMemory(Vec, Val, Idx, dl); 636} 637 638SDValue SelectionDAGLegalize::OptimizeFloatStore(StoreSDNode* ST) { 639 // Turn 'store float 1.0, Ptr' -> 'store int 0x12345678, Ptr' 640 // FIXME: We shouldn't do this for TargetConstantFP's. 641 // FIXME: move this to the DAG Combiner! Note that we can't regress due 642 // to phase ordering between legalized code and the dag combiner. This 643 // probably means that we need to integrate dag combiner and legalizer 644 // together. 645 // We generally can't do this one for long doubles. 646 SDValue Chain = ST->getChain(); 647 SDValue Ptr = ST->getBasePtr(); 648 unsigned Alignment = ST->getAlignment(); 649 bool isVolatile = ST->isVolatile(); 650 bool isNonTemporal = ST->isNonTemporal(); 651 DebugLoc dl = ST->getDebugLoc(); 652 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(ST->getValue())) { 653 if (CFP->getValueType(0) == MVT::f32 && 654 TLI.isTypeLegal(MVT::i32)) { 655 SDValue Con = DAG.getConstant(CFP->getValueAPF(). 656 bitcastToAPInt().zextOrTrunc(32), 657 MVT::i32); 658 return DAG.getStore(Chain, dl, Con, Ptr, ST->getPointerInfo(), 659 isVolatile, isNonTemporal, Alignment); 660 } 661 662 if (CFP->getValueType(0) == MVT::f64) { 663 // If this target supports 64-bit registers, do a single 64-bit store. 664 if (TLI.isTypeLegal(MVT::i64)) { 665 SDValue Con = DAG.getConstant(CFP->getValueAPF().bitcastToAPInt(). 666 zextOrTrunc(64), MVT::i64); 667 return DAG.getStore(Chain, dl, Con, Ptr, ST->getPointerInfo(), 668 isVolatile, isNonTemporal, Alignment); 669 } 670 671 if (TLI.isTypeLegal(MVT::i32) && !ST->isVolatile()) { 672 // Otherwise, if the target supports 32-bit registers, use 2 32-bit 673 // stores. If the target supports neither 32- nor 64-bits, this 674 // xform is certainly not worth it. 675 const APInt &IntVal =CFP->getValueAPF().bitcastToAPInt(); 676 SDValue Lo = DAG.getConstant(IntVal.trunc(32), MVT::i32); 677 SDValue Hi = DAG.getConstant(IntVal.lshr(32).trunc(32), MVT::i32); 678 if (TLI.isBigEndian()) std::swap(Lo, Hi); 679 680 Lo = DAG.getStore(Chain, dl, Lo, Ptr, ST->getPointerInfo(), isVolatile, 681 isNonTemporal, Alignment); 682 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, 683 DAG.getIntPtrConstant(4)); 684 Hi = DAG.getStore(Chain, dl, Hi, Ptr, 685 ST->getPointerInfo().getWithOffset(4), 686 isVolatile, isNonTemporal, MinAlign(Alignment, 4U)); 687 688 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo, Hi); 689 } 690 } 691 } 692 return SDValue(0, 0); 693} 694 695void SelectionDAGLegalize::LegalizeStoreOps(SDNode *Node) { 696 StoreSDNode *ST = cast<StoreSDNode>(Node); 697 SDValue Chain = ST->getChain(); 698 SDValue Ptr = ST->getBasePtr(); 699 DebugLoc dl = Node->getDebugLoc(); 700 701 unsigned Alignment = ST->getAlignment(); 702 bool isVolatile = ST->isVolatile(); 703 bool isNonTemporal = ST->isNonTemporal(); 704 705 if (!ST->isTruncatingStore()) { 706 if (SDNode *OptStore = OptimizeFloatStore(ST).getNode()) { 707 ReplaceNode(ST, OptStore); 708 return; 709 } 710 711 { 712 SDValue Value = ST->getValue(); 713 EVT VT = Value.getValueType(); 714 switch (TLI.getOperationAction(ISD::STORE, VT)) { 715 default: llvm_unreachable("This action is not supported yet!"); 716 case TargetLowering::Legal: 717 // If this is an unaligned store and the target doesn't support it, 718 // expand it. 719 if (!TLI.allowsUnalignedMemoryAccesses(ST->getMemoryVT())) { 720 Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext()); 721 unsigned ABIAlignment= TLI.getDataLayout()->getABITypeAlignment(Ty); 722 if (ST->getAlignment() < ABIAlignment) 723 ExpandUnalignedStore(cast<StoreSDNode>(Node), 724 DAG, TLI, this); 725 } 726 break; 727 case TargetLowering::Custom: { 728 SDValue Res = TLI.LowerOperation(SDValue(Node, 0), DAG); 729 if (Res.getNode()) 730 ReplaceNode(SDValue(Node, 0), Res); 731 return; 732 } 733 case TargetLowering::Promote: { 734 assert(VT.isVector() && "Unknown legal promote case!"); 735 Value = DAG.getNode(ISD::BITCAST, dl, 736 TLI.getTypeToPromoteTo(ISD::STORE, VT), Value); 737 SDValue Result = 738 DAG.getStore(Chain, dl, Value, Ptr, 739 ST->getPointerInfo(), isVolatile, 740 isNonTemporal, Alignment); 741 ReplaceNode(SDValue(Node, 0), Result); 742 break; 743 } 744 } 745 return; 746 } 747 } else { 748 SDValue Value = ST->getValue(); 749 750 EVT StVT = ST->getMemoryVT(); 751 unsigned StWidth = StVT.getSizeInBits(); 752 753 if (StWidth != StVT.getStoreSizeInBits()) { 754 // Promote to a byte-sized store with upper bits zero if not 755 // storing an integral number of bytes. For example, promote 756 // TRUNCSTORE:i1 X -> TRUNCSTORE:i8 (and X, 1) 757 EVT NVT = EVT::getIntegerVT(*DAG.getContext(), 758 StVT.getStoreSizeInBits()); 759 Value = DAG.getZeroExtendInReg(Value, dl, StVT); 760 SDValue Result = 761 DAG.getTruncStore(Chain, dl, Value, Ptr, ST->getPointerInfo(), 762 NVT, isVolatile, isNonTemporal, Alignment); 763 ReplaceNode(SDValue(Node, 0), Result); 764 } else if (StWidth & (StWidth - 1)) { 765 // If not storing a power-of-2 number of bits, expand as two stores. 766 assert(!StVT.isVector() && "Unsupported truncstore!"); 767 unsigned RoundWidth = 1 << Log2_32(StWidth); 768 assert(RoundWidth < StWidth); 769 unsigned ExtraWidth = StWidth - RoundWidth; 770 assert(ExtraWidth < RoundWidth); 771 assert(!(RoundWidth % 8) && !(ExtraWidth % 8) && 772 "Store size not an integral number of bytes!"); 773 EVT RoundVT = EVT::getIntegerVT(*DAG.getContext(), RoundWidth); 774 EVT ExtraVT = EVT::getIntegerVT(*DAG.getContext(), ExtraWidth); 775 SDValue Lo, Hi; 776 unsigned IncrementSize; 777 778 if (TLI.isLittleEndian()) { 779 // TRUNCSTORE:i24 X -> TRUNCSTORE:i16 X, TRUNCSTORE@+2:i8 (srl X, 16) 780 // Store the bottom RoundWidth bits. 781 Lo = DAG.getTruncStore(Chain, dl, Value, Ptr, ST->getPointerInfo(), 782 RoundVT, 783 isVolatile, isNonTemporal, Alignment); 784 785 // Store the remaining ExtraWidth bits. 786 IncrementSize = RoundWidth / 8; 787 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, 788 DAG.getIntPtrConstant(IncrementSize)); 789 Hi = DAG.getNode(ISD::SRL, dl, Value.getValueType(), Value, 790 DAG.getConstant(RoundWidth, 791 TLI.getShiftAmountTy(Value.getValueType()))); 792 Hi = DAG.getTruncStore(Chain, dl, Hi, Ptr, 793 ST->getPointerInfo().getWithOffset(IncrementSize), 794 ExtraVT, isVolatile, isNonTemporal, 795 MinAlign(Alignment, IncrementSize)); 796 } else { 797 // Big endian - avoid unaligned stores. 798 // TRUNCSTORE:i24 X -> TRUNCSTORE:i16 (srl X, 8), TRUNCSTORE@+2:i8 X 799 // Store the top RoundWidth bits. 800 Hi = DAG.getNode(ISD::SRL, dl, Value.getValueType(), Value, 801 DAG.getConstant(ExtraWidth, 802 TLI.getShiftAmountTy(Value.getValueType()))); 803 Hi = DAG.getTruncStore(Chain, dl, Hi, Ptr, ST->getPointerInfo(), 804 RoundVT, isVolatile, isNonTemporal, Alignment); 805 806 // Store the remaining ExtraWidth bits. 807 IncrementSize = RoundWidth / 8; 808 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, 809 DAG.getIntPtrConstant(IncrementSize)); 810 Lo = DAG.getTruncStore(Chain, dl, Value, Ptr, 811 ST->getPointerInfo().getWithOffset(IncrementSize), 812 ExtraVT, isVolatile, isNonTemporal, 813 MinAlign(Alignment, IncrementSize)); 814 } 815 816 // The order of the stores doesn't matter. 817 SDValue Result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo, Hi); 818 ReplaceNode(SDValue(Node, 0), Result); 819 } else { 820 switch (TLI.getTruncStoreAction(ST->getValue().getValueType(), StVT)) { 821 default: llvm_unreachable("This action is not supported yet!"); 822 case TargetLowering::Legal: 823 // If this is an unaligned store and the target doesn't support it, 824 // expand it. 825 if (!TLI.allowsUnalignedMemoryAccesses(ST->getMemoryVT())) { 826 Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext()); 827 unsigned ABIAlignment= TLI.getDataLayout()->getABITypeAlignment(Ty); 828 if (ST->getAlignment() < ABIAlignment) 829 ExpandUnalignedStore(cast<StoreSDNode>(Node), DAG, TLI, this); 830 } 831 break; 832 case TargetLowering::Custom: { 833 SDValue Res = TLI.LowerOperation(SDValue(Node, 0), DAG); 834 if (Res.getNode()) 835 ReplaceNode(SDValue(Node, 0), Res); 836 return; 837 } 838 case TargetLowering::Expand: 839 assert(!StVT.isVector() && 840 "Vector Stores are handled in LegalizeVectorOps"); 841 842 // TRUNCSTORE:i16 i32 -> STORE i16 843 assert(TLI.isTypeLegal(StVT) && 844 "Do not know how to expand this store!"); 845 Value = DAG.getNode(ISD::TRUNCATE, dl, StVT, Value); 846 SDValue Result = 847 DAG.getStore(Chain, dl, Value, Ptr, ST->getPointerInfo(), 848 isVolatile, isNonTemporal, Alignment); 849 ReplaceNode(SDValue(Node, 0), Result); 850 break; 851 } 852 } 853 } 854} 855 856void SelectionDAGLegalize::LegalizeLoadOps(SDNode *Node) { 857 LoadSDNode *LD = cast<LoadSDNode>(Node); 858 SDValue Chain = LD->getChain(); // The chain. 859 SDValue Ptr = LD->getBasePtr(); // The base pointer. 860 SDValue Value; // The value returned by the load op. 861 DebugLoc dl = Node->getDebugLoc(); 862 863 ISD::LoadExtType ExtType = LD->getExtensionType(); 864 if (ExtType == ISD::NON_EXTLOAD) { 865 EVT VT = Node->getValueType(0); 866 SDValue RVal = SDValue(Node, 0); 867 SDValue RChain = SDValue(Node, 1); 868 869 switch (TLI.getOperationAction(Node->getOpcode(), VT)) { 870 default: llvm_unreachable("This action is not supported yet!"); 871 case TargetLowering::Legal: 872 // If this is an unaligned load and the target doesn't support it, 873 // expand it. 874 if (!TLI.allowsUnalignedMemoryAccesses(LD->getMemoryVT())) { 875 Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext()); 876 unsigned ABIAlignment = 877 TLI.getDataLayout()->getABITypeAlignment(Ty); 878 if (LD->getAlignment() < ABIAlignment){ 879 ExpandUnalignedLoad(cast<LoadSDNode>(Node), DAG, TLI, RVal, RChain); 880 } 881 } 882 break; 883 case TargetLowering::Custom: { 884 SDValue Res = TLI.LowerOperation(RVal, DAG); 885 if (Res.getNode()) { 886 RVal = Res; 887 RChain = Res.getValue(1); 888 } 889 break; 890 } 891 case TargetLowering::Promote: { 892 // Only promote a load of vector type to another. 893 assert(VT.isVector() && "Cannot promote this load!"); 894 // Change base type to a different vector type. 895 EVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), VT); 896 897 SDValue Res = DAG.getLoad(NVT, dl, Chain, Ptr, LD->getPointerInfo(), 898 LD->isVolatile(), LD->isNonTemporal(), 899 LD->isInvariant(), LD->getAlignment()); 900 RVal = DAG.getNode(ISD::BITCAST, dl, VT, Res); 901 RChain = Res.getValue(1); 902 break; 903 } 904 } 905 if (RChain.getNode() != Node) { 906 assert(RVal.getNode() != Node && "Load must be completely replaced"); 907 DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 0), RVal); 908 DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), RChain); 909 ReplacedNode(Node); 910 } 911 return; 912 } 913 914 EVT SrcVT = LD->getMemoryVT(); 915 unsigned SrcWidth = SrcVT.getSizeInBits(); 916 unsigned Alignment = LD->getAlignment(); 917 bool isVolatile = LD->isVolatile(); 918 bool isNonTemporal = LD->isNonTemporal(); 919 920 if (SrcWidth != SrcVT.getStoreSizeInBits() && 921 // Some targets pretend to have an i1 loading operation, and actually 922 // load an i8. This trick is correct for ZEXTLOAD because the top 7 923 // bits are guaranteed to be zero; it helps the optimizers understand 924 // that these bits are zero. It is also useful for EXTLOAD, since it 925 // tells the optimizers that those bits are undefined. It would be 926 // nice to have an effective generic way of getting these benefits... 927 // Until such a way is found, don't insist on promoting i1 here. 928 (SrcVT != MVT::i1 || 929 TLI.getLoadExtAction(ExtType, MVT::i1) == TargetLowering::Promote)) { 930 // Promote to a byte-sized load if not loading an integral number of 931 // bytes. For example, promote EXTLOAD:i20 -> EXTLOAD:i24. 932 unsigned NewWidth = SrcVT.getStoreSizeInBits(); 933 EVT NVT = EVT::getIntegerVT(*DAG.getContext(), NewWidth); 934 SDValue Ch; 935 936 // The extra bits are guaranteed to be zero, since we stored them that 937 // way. A zext load from NVT thus automatically gives zext from SrcVT. 938 939 ISD::LoadExtType NewExtType = 940 ExtType == ISD::ZEXTLOAD ? ISD::ZEXTLOAD : ISD::EXTLOAD; 941 942 SDValue Result = 943 DAG.getExtLoad(NewExtType, dl, Node->getValueType(0), 944 Chain, Ptr, LD->getPointerInfo(), 945 NVT, isVolatile, isNonTemporal, Alignment); 946 947 Ch = Result.getValue(1); // The chain. 948 949 if (ExtType == ISD::SEXTLOAD) 950 // Having the top bits zero doesn't help when sign extending. 951 Result = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, 952 Result.getValueType(), 953 Result, DAG.getValueType(SrcVT)); 954 else if (ExtType == ISD::ZEXTLOAD || NVT == Result.getValueType()) 955 // All the top bits are guaranteed to be zero - inform the optimizers. 956 Result = DAG.getNode(ISD::AssertZext, dl, 957 Result.getValueType(), Result, 958 DAG.getValueType(SrcVT)); 959 960 Value = Result; 961 Chain = Ch; 962 } else if (SrcWidth & (SrcWidth - 1)) { 963 // If not loading a power-of-2 number of bits, expand as two loads. 964 assert(!SrcVT.isVector() && "Unsupported extload!"); 965 unsigned RoundWidth = 1 << Log2_32(SrcWidth); 966 assert(RoundWidth < SrcWidth); 967 unsigned ExtraWidth = SrcWidth - RoundWidth; 968 assert(ExtraWidth < RoundWidth); 969 assert(!(RoundWidth % 8) && !(ExtraWidth % 8) && 970 "Load size not an integral number of bytes!"); 971 EVT RoundVT = EVT::getIntegerVT(*DAG.getContext(), RoundWidth); 972 EVT ExtraVT = EVT::getIntegerVT(*DAG.getContext(), ExtraWidth); 973 SDValue Lo, Hi, Ch; 974 unsigned IncrementSize; 975 976 if (TLI.isLittleEndian()) { 977 // EXTLOAD:i24 -> ZEXTLOAD:i16 | (shl EXTLOAD@+2:i8, 16) 978 // Load the bottom RoundWidth bits. 979 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, Node->getValueType(0), 980 Chain, Ptr, 981 LD->getPointerInfo(), RoundVT, isVolatile, 982 isNonTemporal, Alignment); 983 984 // Load the remaining ExtraWidth bits. 985 IncrementSize = RoundWidth / 8; 986 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, 987 DAG.getIntPtrConstant(IncrementSize)); 988 Hi = DAG.getExtLoad(ExtType, dl, Node->getValueType(0), Chain, Ptr, 989 LD->getPointerInfo().getWithOffset(IncrementSize), 990 ExtraVT, isVolatile, isNonTemporal, 991 MinAlign(Alignment, IncrementSize)); 992 993 // Build a factor node to remember that this load is independent of 994 // the other one. 995 Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1), 996 Hi.getValue(1)); 997 998 // Move the top bits to the right place. 999 Hi = DAG.getNode(ISD::SHL, dl, Hi.getValueType(), Hi, 1000 DAG.getConstant(RoundWidth, 1001 TLI.getShiftAmountTy(Hi.getValueType()))); 1002 1003 // Join the hi and lo parts. 1004 Value = DAG.getNode(ISD::OR, dl, Node->getValueType(0), Lo, Hi); 1005 } else { 1006 // Big endian - avoid unaligned loads. 1007 // EXTLOAD:i24 -> (shl EXTLOAD:i16, 8) | ZEXTLOAD@+2:i8 1008 // Load the top RoundWidth bits. 1009 Hi = DAG.getExtLoad(ExtType, dl, Node->getValueType(0), Chain, Ptr, 1010 LD->getPointerInfo(), RoundVT, isVolatile, 1011 isNonTemporal, Alignment); 1012 1013 // Load the remaining ExtraWidth bits. 1014 IncrementSize = RoundWidth / 8; 1015 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, 1016 DAG.getIntPtrConstant(IncrementSize)); 1017 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, 1018 dl, Node->getValueType(0), Chain, Ptr, 1019 LD->getPointerInfo().getWithOffset(IncrementSize), 1020 ExtraVT, isVolatile, isNonTemporal, 1021 MinAlign(Alignment, IncrementSize)); 1022 1023 // Build a factor node to remember that this load is independent of 1024 // the other one. 1025 Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1), 1026 Hi.getValue(1)); 1027 1028 // Move the top bits to the right place. 1029 Hi = DAG.getNode(ISD::SHL, dl, Hi.getValueType(), Hi, 1030 DAG.getConstant(ExtraWidth, 1031 TLI.getShiftAmountTy(Hi.getValueType()))); 1032 1033 // Join the hi and lo parts. 1034 Value = DAG.getNode(ISD::OR, dl, Node->getValueType(0), Lo, Hi); 1035 } 1036 1037 Chain = Ch; 1038 } else { 1039 bool isCustom = false; 1040 switch (TLI.getLoadExtAction(ExtType, SrcVT)) { 1041 default: llvm_unreachable("This action is not supported yet!"); 1042 case TargetLowering::Custom: 1043 isCustom = true; 1044 // FALLTHROUGH 1045 case TargetLowering::Legal: { 1046 Value = SDValue(Node, 0); 1047 Chain = SDValue(Node, 1); 1048 1049 if (isCustom) { 1050 SDValue Res = TLI.LowerOperation(SDValue(Node, 0), DAG); 1051 if (Res.getNode()) { 1052 Value = Res; 1053 Chain = Res.getValue(1); 1054 } 1055 } else { 1056 // If this is an unaligned load and the target doesn't support it, 1057 // expand it. 1058 if (!TLI.allowsUnalignedMemoryAccesses(LD->getMemoryVT())) { 1059 Type *Ty = 1060 LD->getMemoryVT().getTypeForEVT(*DAG.getContext()); 1061 unsigned ABIAlignment = 1062 TLI.getDataLayout()->getABITypeAlignment(Ty); 1063 if (LD->getAlignment() < ABIAlignment){ 1064 ExpandUnalignedLoad(cast<LoadSDNode>(Node), 1065 DAG, TLI, Value, Chain); 1066 } 1067 } 1068 } 1069 break; 1070 } 1071 case TargetLowering::Expand: 1072 if (!TLI.isLoadExtLegal(ISD::EXTLOAD, SrcVT) && TLI.isTypeLegal(SrcVT)) { 1073 SDValue Load = DAG.getLoad(SrcVT, dl, Chain, Ptr, 1074 LD->getPointerInfo(), 1075 LD->isVolatile(), LD->isNonTemporal(), 1076 LD->isInvariant(), LD->getAlignment()); 1077 unsigned ExtendOp; 1078 switch (ExtType) { 1079 case ISD::EXTLOAD: 1080 ExtendOp = (SrcVT.isFloatingPoint() ? 1081 ISD::FP_EXTEND : ISD::ANY_EXTEND); 1082 break; 1083 case ISD::SEXTLOAD: ExtendOp = ISD::SIGN_EXTEND; break; 1084 case ISD::ZEXTLOAD: ExtendOp = ISD::ZERO_EXTEND; break; 1085 default: llvm_unreachable("Unexpected extend load type!"); 1086 } 1087 Value = DAG.getNode(ExtendOp, dl, Node->getValueType(0), Load); 1088 Chain = Load.getValue(1); 1089 break; 1090 } 1091 1092 assert(!SrcVT.isVector() && 1093 "Vector Loads are handled in LegalizeVectorOps"); 1094 1095 // FIXME: This does not work for vectors on most targets. Sign- and 1096 // zero-extend operations are currently folded into extending loads, 1097 // whether they are legal or not, and then we end up here without any 1098 // support for legalizing them. 1099 assert(ExtType != ISD::EXTLOAD && 1100 "EXTLOAD should always be supported!"); 1101 // Turn the unsupported load into an EXTLOAD followed by an explicit 1102 // zero/sign extend inreg. 1103 SDValue Result = DAG.getExtLoad(ISD::EXTLOAD, dl, Node->getValueType(0), 1104 Chain, Ptr, LD->getPointerInfo(), SrcVT, 1105 LD->isVolatile(), LD->isNonTemporal(), 1106 LD->getAlignment()); 1107 SDValue ValRes; 1108 if (ExtType == ISD::SEXTLOAD) 1109 ValRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, 1110 Result.getValueType(), 1111 Result, DAG.getValueType(SrcVT)); 1112 else 1113 ValRes = DAG.getZeroExtendInReg(Result, dl, SrcVT.getScalarType()); 1114 Value = ValRes; 1115 Chain = Result.getValue(1); 1116 break; 1117 } 1118 } 1119 1120 // Since loads produce two values, make sure to remember that we legalized 1121 // both of them. 1122 if (Chain.getNode() != Node) { 1123 assert(Value.getNode() != Node && "Load must be completely replaced"); 1124 DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 0), Value); 1125 DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), Chain); 1126 ReplacedNode(Node); 1127 } 1128} 1129 1130/// LegalizeOp - Return a legal replacement for the given operation, with 1131/// all legal operands. 1132void SelectionDAGLegalize::LegalizeOp(SDNode *Node) { 1133 if (Node->getOpcode() == ISD::TargetConstant) // Allow illegal target nodes. 1134 return; 1135 1136 for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i) 1137 assert(TLI.getTypeAction(*DAG.getContext(), Node->getValueType(i)) == 1138 TargetLowering::TypeLegal && 1139 "Unexpected illegal type!"); 1140 1141 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) 1142 assert((TLI.getTypeAction(*DAG.getContext(), 1143 Node->getOperand(i).getValueType()) == 1144 TargetLowering::TypeLegal || 1145 Node->getOperand(i).getOpcode() == ISD::TargetConstant) && 1146 "Unexpected illegal type!"); 1147 1148 // Figure out the correct action; the way to query this varies by opcode 1149 TargetLowering::LegalizeAction Action = TargetLowering::Legal; 1150 bool SimpleFinishLegalizing = true; 1151 switch (Node->getOpcode()) { 1152 case ISD::INTRINSIC_W_CHAIN: 1153 case ISD::INTRINSIC_WO_CHAIN: 1154 case ISD::INTRINSIC_VOID: 1155 case ISD::STACKSAVE: 1156 Action = TLI.getOperationAction(Node->getOpcode(), MVT::Other); 1157 break; 1158 case ISD::VAARG: 1159 Action = TLI.getOperationAction(Node->getOpcode(), 1160 Node->getValueType(0)); 1161 if (Action != TargetLowering::Promote) 1162 Action = TLI.getOperationAction(Node->getOpcode(), MVT::Other); 1163 break; 1164 case ISD::SINT_TO_FP: 1165 case ISD::UINT_TO_FP: 1166 case ISD::EXTRACT_VECTOR_ELT: 1167 Action = TLI.getOperationAction(Node->getOpcode(), 1168 Node->getOperand(0).getValueType()); 1169 break; 1170 case ISD::FP_ROUND_INREG: 1171 case ISD::SIGN_EXTEND_INREG: { 1172 EVT InnerType = cast<VTSDNode>(Node->getOperand(1))->getVT(); 1173 Action = TLI.getOperationAction(Node->getOpcode(), InnerType); 1174 break; 1175 } 1176 case ISD::ATOMIC_STORE: { 1177 Action = TLI.getOperationAction(Node->getOpcode(), 1178 Node->getOperand(2).getValueType()); 1179 break; 1180 } 1181 case ISD::SELECT_CC: 1182 case ISD::SETCC: 1183 case ISD::BR_CC: { 1184 unsigned CCOperand = Node->getOpcode() == ISD::SELECT_CC ? 4 : 1185 Node->getOpcode() == ISD::SETCC ? 2 : 1; 1186 unsigned CompareOperand = Node->getOpcode() == ISD::BR_CC ? 2 : 0; 1187 EVT OpVT = Node->getOperand(CompareOperand).getValueType(); 1188 ISD::CondCode CCCode = 1189 cast<CondCodeSDNode>(Node->getOperand(CCOperand))->get(); 1190 Action = TLI.getCondCodeAction(CCCode, OpVT); 1191 if (Action == TargetLowering::Legal) { 1192 if (Node->getOpcode() == ISD::SELECT_CC) 1193 Action = TLI.getOperationAction(Node->getOpcode(), 1194 Node->getValueType(0)); 1195 else 1196 Action = TLI.getOperationAction(Node->getOpcode(), OpVT); 1197 } 1198 break; 1199 } 1200 case ISD::LOAD: 1201 case ISD::STORE: 1202 // FIXME: Model these properly. LOAD and STORE are complicated, and 1203 // STORE expects the unlegalized operand in some cases. 1204 SimpleFinishLegalizing = false; 1205 break; 1206 case ISD::CALLSEQ_START: 1207 case ISD::CALLSEQ_END: 1208 // FIXME: This shouldn't be necessary. These nodes have special properties 1209 // dealing with the recursive nature of legalization. Removing this 1210 // special case should be done as part of making LegalizeDAG non-recursive. 1211 SimpleFinishLegalizing = false; 1212 break; 1213 case ISD::EXTRACT_ELEMENT: 1214 case ISD::FLT_ROUNDS_: 1215 case ISD::SADDO: 1216 case ISD::SSUBO: 1217 case ISD::UADDO: 1218 case ISD::USUBO: 1219 case ISD::SMULO: 1220 case ISD::UMULO: 1221 case ISD::FPOWI: 1222 case ISD::MERGE_VALUES: 1223 case ISD::EH_RETURN: 1224 case ISD::FRAME_TO_ARGS_OFFSET: 1225 case ISD::EH_SJLJ_SETJMP: 1226 case ISD::EH_SJLJ_LONGJMP: 1227 // These operations lie about being legal: when they claim to be legal, 1228 // they should actually be expanded. 1229 Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0)); 1230 if (Action == TargetLowering::Legal) 1231 Action = TargetLowering::Expand; 1232 break; 1233 case ISD::INIT_TRAMPOLINE: 1234 case ISD::ADJUST_TRAMPOLINE: 1235 case ISD::FRAMEADDR: 1236 case ISD::RETURNADDR: 1237 // These operations lie about being legal: when they claim to be legal, 1238 // they should actually be custom-lowered. 1239 Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0)); 1240 if (Action == TargetLowering::Legal) 1241 Action = TargetLowering::Custom; 1242 break; 1243 default: 1244 if (Node->getOpcode() >= ISD::BUILTIN_OP_END) { 1245 Action = TargetLowering::Legal; 1246 } else { 1247 Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0)); 1248 } 1249 break; 1250 } 1251 1252 if (SimpleFinishLegalizing) { 1253 SDNode *NewNode = Node; 1254 switch (Node->getOpcode()) { 1255 default: break; 1256 case ISD::SHL: 1257 case ISD::SRL: 1258 case ISD::SRA: 1259 case ISD::ROTL: 1260 case ISD::ROTR: 1261 // Legalizing shifts/rotates requires adjusting the shift amount 1262 // to the appropriate width. 1263 if (!Node->getOperand(1).getValueType().isVector()) { 1264 SDValue SAO = 1265 DAG.getShiftAmountOperand(Node->getOperand(0).getValueType(), 1266 Node->getOperand(1)); 1267 HandleSDNode Handle(SAO); 1268 LegalizeOp(SAO.getNode()); 1269 NewNode = DAG.UpdateNodeOperands(Node, Node->getOperand(0), 1270 Handle.getValue()); 1271 } 1272 break; 1273 case ISD::SRL_PARTS: 1274 case ISD::SRA_PARTS: 1275 case ISD::SHL_PARTS: 1276 // Legalizing shifts/rotates requires adjusting the shift amount 1277 // to the appropriate width. 1278 if (!Node->getOperand(2).getValueType().isVector()) { 1279 SDValue SAO = 1280 DAG.getShiftAmountOperand(Node->getOperand(0).getValueType(), 1281 Node->getOperand(2)); 1282 HandleSDNode Handle(SAO); 1283 LegalizeOp(SAO.getNode()); 1284 NewNode = DAG.UpdateNodeOperands(Node, Node->getOperand(0), 1285 Node->getOperand(1), 1286 Handle.getValue()); 1287 } 1288 break; 1289 } 1290 1291 if (NewNode != Node) { 1292 DAG.ReplaceAllUsesWith(Node, NewNode); 1293 for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i) 1294 DAG.TransferDbgValues(SDValue(Node, i), SDValue(NewNode, i)); 1295 ReplacedNode(Node); 1296 Node = NewNode; 1297 } 1298 switch (Action) { 1299 case TargetLowering::Legal: 1300 return; 1301 case TargetLowering::Custom: { 1302 // FIXME: The handling for custom lowering with multiple results is 1303 // a complete mess. 1304 SDValue Res = TLI.LowerOperation(SDValue(Node, 0), DAG); 1305 if (Res.getNode()) { 1306 SmallVector<SDValue, 8> ResultVals; 1307 for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i) { 1308 if (e == 1) 1309 ResultVals.push_back(Res); 1310 else 1311 ResultVals.push_back(Res.getValue(i)); 1312 } 1313 if (Res.getNode() != Node || Res.getResNo() != 0) { 1314 DAG.ReplaceAllUsesWith(Node, ResultVals.data()); 1315 for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i) 1316 DAG.TransferDbgValues(SDValue(Node, i), ResultVals[i]); 1317 ReplacedNode(Node); 1318 } 1319 return; 1320 } 1321 } 1322 // FALL THROUGH 1323 case TargetLowering::Expand: 1324 ExpandNode(Node); 1325 return; 1326 case TargetLowering::Promote: 1327 PromoteNode(Node); 1328 return; 1329 } 1330 } 1331 1332 switch (Node->getOpcode()) { 1333 default: 1334#ifndef NDEBUG 1335 dbgs() << "NODE: "; 1336 Node->dump( &DAG); 1337 dbgs() << "\n"; 1338#endif 1339 llvm_unreachable("Do not know how to legalize this operator!"); 1340 1341 case ISD::CALLSEQ_START: 1342 case ISD::CALLSEQ_END: 1343 break; 1344 case ISD::LOAD: { 1345 return LegalizeLoadOps(Node); 1346 } 1347 case ISD::STORE: { 1348 return LegalizeStoreOps(Node); 1349 } 1350 } 1351} 1352 1353SDValue SelectionDAGLegalize::ExpandExtractFromVectorThroughStack(SDValue Op) { 1354 SDValue Vec = Op.getOperand(0); 1355 SDValue Idx = Op.getOperand(1); 1356 DebugLoc dl = Op.getDebugLoc(); 1357 // Store the value to a temporary stack slot, then LOAD the returned part. 1358 SDValue StackPtr = DAG.CreateStackTemporary(Vec.getValueType()); 1359 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, 1360 MachinePointerInfo(), false, false, 0); 1361 1362 // Add the offset to the index. 1363 unsigned EltSize = 1364 Vec.getValueType().getVectorElementType().getSizeInBits()/8; 1365 Idx = DAG.getNode(ISD::MUL, dl, Idx.getValueType(), Idx, 1366 DAG.getConstant(EltSize, Idx.getValueType())); 1367 1368 if (Idx.getValueType().bitsGT(TLI.getPointerTy())) 1369 Idx = DAG.getNode(ISD::TRUNCATE, dl, TLI.getPointerTy(), Idx); 1370 else 1371 Idx = DAG.getNode(ISD::ZERO_EXTEND, dl, TLI.getPointerTy(), Idx); 1372 1373 StackPtr = DAG.getNode(ISD::ADD, dl, Idx.getValueType(), Idx, StackPtr); 1374 1375 if (Op.getValueType().isVector()) 1376 return DAG.getLoad(Op.getValueType(), dl, Ch, StackPtr,MachinePointerInfo(), 1377 false, false, false, 0); 1378 return DAG.getExtLoad(ISD::EXTLOAD, dl, Op.getValueType(), Ch, StackPtr, 1379 MachinePointerInfo(), 1380 Vec.getValueType().getVectorElementType(), 1381 false, false, 0); 1382} 1383 1384SDValue SelectionDAGLegalize::ExpandInsertToVectorThroughStack(SDValue Op) { 1385 assert(Op.getValueType().isVector() && "Non-vector insert subvector!"); 1386 1387 SDValue Vec = Op.getOperand(0); 1388 SDValue Part = Op.getOperand(1); 1389 SDValue Idx = Op.getOperand(2); 1390 DebugLoc dl = Op.getDebugLoc(); 1391 1392 // Store the value to a temporary stack slot, then LOAD the returned part. 1393 1394 SDValue StackPtr = DAG.CreateStackTemporary(Vec.getValueType()); 1395 int FI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex(); 1396 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(FI); 1397 1398 // First store the whole vector. 1399 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, PtrInfo, 1400 false, false, 0); 1401 1402 // Then store the inserted part. 1403 1404 // Add the offset to the index. 1405 unsigned EltSize = 1406 Vec.getValueType().getVectorElementType().getSizeInBits()/8; 1407 1408 Idx = DAG.getNode(ISD::MUL, dl, Idx.getValueType(), Idx, 1409 DAG.getConstant(EltSize, Idx.getValueType())); 1410 1411 if (Idx.getValueType().bitsGT(TLI.getPointerTy())) 1412 Idx = DAG.getNode(ISD::TRUNCATE, dl, TLI.getPointerTy(), Idx); 1413 else 1414 Idx = DAG.getNode(ISD::ZERO_EXTEND, dl, TLI.getPointerTy(), Idx); 1415 1416 SDValue SubStackPtr = DAG.getNode(ISD::ADD, dl, Idx.getValueType(), Idx, 1417 StackPtr); 1418 1419 // Store the subvector. 1420 Ch = DAG.getStore(DAG.getEntryNode(), dl, Part, SubStackPtr, 1421 MachinePointerInfo(), false, false, 0); 1422 1423 // Finally, load the updated vector. 1424 return DAG.getLoad(Op.getValueType(), dl, Ch, StackPtr, PtrInfo, 1425 false, false, false, 0); 1426} 1427 1428SDValue SelectionDAGLegalize::ExpandVectorBuildThroughStack(SDNode* Node) { 1429 // We can't handle this case efficiently. Allocate a sufficiently 1430 // aligned object on the stack, store each element into it, then load 1431 // the result as a vector. 1432 // Create the stack frame object. 1433 EVT VT = Node->getValueType(0); 1434 EVT EltVT = VT.getVectorElementType(); 1435 DebugLoc dl = Node->getDebugLoc(); 1436 SDValue FIPtr = DAG.CreateStackTemporary(VT); 1437 int FI = cast<FrameIndexSDNode>(FIPtr.getNode())->getIndex(); 1438 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(FI); 1439 1440 // Emit a store of each element to the stack slot. 1441 SmallVector<SDValue, 8> Stores; 1442 unsigned TypeByteSize = EltVT.getSizeInBits() / 8; 1443 // Store (in the right endianness) the elements to memory. 1444 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) { 1445 // Ignore undef elements. 1446 if (Node->getOperand(i).getOpcode() == ISD::UNDEF) continue; 1447 1448 unsigned Offset = TypeByteSize*i; 1449 1450 SDValue Idx = DAG.getConstant(Offset, FIPtr.getValueType()); 1451 Idx = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr, Idx); 1452 1453 // If the destination vector element type is narrower than the source 1454 // element type, only store the bits necessary. 1455 if (EltVT.bitsLT(Node->getOperand(i).getValueType().getScalarType())) { 1456 Stores.push_back(DAG.getTruncStore(DAG.getEntryNode(), dl, 1457 Node->getOperand(i), Idx, 1458 PtrInfo.getWithOffset(Offset), 1459 EltVT, false, false, 0)); 1460 } else 1461 Stores.push_back(DAG.getStore(DAG.getEntryNode(), dl, 1462 Node->getOperand(i), Idx, 1463 PtrInfo.getWithOffset(Offset), 1464 false, false, 0)); 1465 } 1466 1467 SDValue StoreChain; 1468 if (!Stores.empty()) // Not all undef elements? 1469 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 1470 &Stores[0], Stores.size()); 1471 else 1472 StoreChain = DAG.getEntryNode(); 1473 1474 // Result is a load from the stack slot. 1475 return DAG.getLoad(VT, dl, StoreChain, FIPtr, PtrInfo, 1476 false, false, false, 0); 1477} 1478 1479SDValue SelectionDAGLegalize::ExpandFCOPYSIGN(SDNode* Node) { 1480 DebugLoc dl = Node->getDebugLoc(); 1481 SDValue Tmp1 = Node->getOperand(0); 1482 SDValue Tmp2 = Node->getOperand(1); 1483 1484 // Get the sign bit of the RHS. First obtain a value that has the same 1485 // sign as the sign bit, i.e. negative if and only if the sign bit is 1. 1486 SDValue SignBit; 1487 EVT FloatVT = Tmp2.getValueType(); 1488 EVT IVT = EVT::getIntegerVT(*DAG.getContext(), FloatVT.getSizeInBits()); 1489 if (TLI.isTypeLegal(IVT)) { 1490 // Convert to an integer with the same sign bit. 1491 SignBit = DAG.getNode(ISD::BITCAST, dl, IVT, Tmp2); 1492 } else { 1493 // Store the float to memory, then load the sign part out as an integer. 1494 MVT LoadTy = TLI.getPointerTy(); 1495 // First create a temporary that is aligned for both the load and store. 1496 SDValue StackPtr = DAG.CreateStackTemporary(FloatVT, LoadTy); 1497 // Then store the float to it. 1498 SDValue Ch = 1499 DAG.getStore(DAG.getEntryNode(), dl, Tmp2, StackPtr, MachinePointerInfo(), 1500 false, false, 0); 1501 if (TLI.isBigEndian()) { 1502 assert(FloatVT.isByteSized() && "Unsupported floating point type!"); 1503 // Load out a legal integer with the same sign bit as the float. 1504 SignBit = DAG.getLoad(LoadTy, dl, Ch, StackPtr, MachinePointerInfo(), 1505 false, false, false, 0); 1506 } else { // Little endian 1507 SDValue LoadPtr = StackPtr; 1508 // The float may be wider than the integer we are going to load. Advance 1509 // the pointer so that the loaded integer will contain the sign bit. 1510 unsigned Strides = (FloatVT.getSizeInBits()-1)/LoadTy.getSizeInBits(); 1511 unsigned ByteOffset = (Strides * LoadTy.getSizeInBits()) / 8; 1512 LoadPtr = DAG.getNode(ISD::ADD, dl, LoadPtr.getValueType(), 1513 LoadPtr, DAG.getIntPtrConstant(ByteOffset)); 1514 // Load a legal integer containing the sign bit. 1515 SignBit = DAG.getLoad(LoadTy, dl, Ch, LoadPtr, MachinePointerInfo(), 1516 false, false, false, 0); 1517 // Move the sign bit to the top bit of the loaded integer. 1518 unsigned BitShift = LoadTy.getSizeInBits() - 1519 (FloatVT.getSizeInBits() - 8 * ByteOffset); 1520 assert(BitShift < LoadTy.getSizeInBits() && "Pointer advanced wrong?"); 1521 if (BitShift) 1522 SignBit = DAG.getNode(ISD::SHL, dl, LoadTy, SignBit, 1523 DAG.getConstant(BitShift, 1524 TLI.getShiftAmountTy(SignBit.getValueType()))); 1525 } 1526 } 1527 // Now get the sign bit proper, by seeing whether the value is negative. 1528 SignBit = DAG.getSetCC(dl, TLI.getSetCCResultType(SignBit.getValueType()), 1529 SignBit, DAG.getConstant(0, SignBit.getValueType()), 1530 ISD::SETLT); 1531 // Get the absolute value of the result. 1532 SDValue AbsVal = DAG.getNode(ISD::FABS, dl, Tmp1.getValueType(), Tmp1); 1533 // Select between the nabs and abs value based on the sign bit of 1534 // the input. 1535 return DAG.getNode(ISD::SELECT, dl, AbsVal.getValueType(), SignBit, 1536 DAG.getNode(ISD::FNEG, dl, AbsVal.getValueType(), AbsVal), 1537 AbsVal); 1538} 1539 1540void SelectionDAGLegalize::ExpandDYNAMIC_STACKALLOC(SDNode* Node, 1541 SmallVectorImpl<SDValue> &Results) { 1542 unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore(); 1543 assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and" 1544 " not tell us which reg is the stack pointer!"); 1545 DebugLoc dl = Node->getDebugLoc(); 1546 EVT VT = Node->getValueType(0); 1547 SDValue Tmp1 = SDValue(Node, 0); 1548 SDValue Tmp2 = SDValue(Node, 1); 1549 SDValue Tmp3 = Node->getOperand(2); 1550 SDValue Chain = Tmp1.getOperand(0); 1551 1552 // Chain the dynamic stack allocation so that it doesn't modify the stack 1553 // pointer when other instructions are using the stack. 1554 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(0, true)); 1555 1556 SDValue Size = Tmp2.getOperand(1); 1557 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT); 1558 Chain = SP.getValue(1); 1559 unsigned Align = cast<ConstantSDNode>(Tmp3)->getZExtValue(); 1560 unsigned StackAlign = TM.getFrameLowering()->getStackAlignment(); 1561 if (Align > StackAlign) 1562 SP = DAG.getNode(ISD::AND, dl, VT, SP, 1563 DAG.getConstant(-(uint64_t)Align, VT)); 1564 Tmp1 = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value 1565 Chain = DAG.getCopyToReg(Chain, dl, SPReg, Tmp1); // Output chain 1566 1567 Tmp2 = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, true), 1568 DAG.getIntPtrConstant(0, true), SDValue()); 1569 1570 Results.push_back(Tmp1); 1571 Results.push_back(Tmp2); 1572} 1573 1574/// LegalizeSetCCCondCode - Legalize a SETCC with given LHS and RHS and 1575/// condition code CC on the current target. This routine expands SETCC with 1576/// illegal condition code into AND / OR of multiple SETCC values. 1577void SelectionDAGLegalize::LegalizeSetCCCondCode(EVT VT, 1578 SDValue &LHS, SDValue &RHS, 1579 SDValue &CC, 1580 DebugLoc dl) { 1581 EVT OpVT = LHS.getValueType(); 1582 ISD::CondCode CCCode = cast<CondCodeSDNode>(CC)->get(); 1583 switch (TLI.getCondCodeAction(CCCode, OpVT)) { 1584 default: llvm_unreachable("Unknown condition code action!"); 1585 case TargetLowering::Legal: 1586 // Nothing to do. 1587 break; 1588 case TargetLowering::Expand: { 1589 ISD::CondCode CC1 = ISD::SETCC_INVALID, CC2 = ISD::SETCC_INVALID; 1590 ISD::CondCode InvCC = ISD::SETCC_INVALID; 1591 unsigned Opc = 0; 1592 switch (CCCode) { 1593 default: llvm_unreachable("Don't know how to expand this condition!"); 1594 case ISD::SETO: 1595 assert(TLI.getCondCodeAction(ISD::SETOEQ, OpVT) 1596 == TargetLowering::Legal 1597 && "If SETO is expanded, SETOEQ must be legal!"); 1598 CC1 = ISD::SETOEQ; CC2 = ISD::SETOEQ; Opc = ISD::AND; break; 1599 case ISD::SETUO: 1600 assert(TLI.getCondCodeAction(ISD::SETUNE, OpVT) 1601 == TargetLowering::Legal 1602 && "If SETUO is expanded, SETUNE must be legal!"); 1603 CC1 = ISD::SETUNE; CC2 = ISD::SETUNE; Opc = ISD::OR; break; 1604 case ISD::SETOEQ: 1605 case ISD::SETOGT: 1606 case ISD::SETOGE: 1607 case ISD::SETOLT: 1608 case ISD::SETOLE: 1609 case ISD::SETONE: 1610 case ISD::SETUEQ: 1611 case ISD::SETUNE: 1612 case ISD::SETUGT: 1613 case ISD::SETUGE: 1614 case ISD::SETULT: 1615 case ISD::SETULE: 1616 // If we are floating point, assign and break, otherwise fall through. 1617 if (!OpVT.isInteger()) { 1618 // We can use the 4th bit to tell if we are the unordered 1619 // or ordered version of the opcode. 1620 CC2 = ((unsigned)CCCode & 0x8U) ? ISD::SETUO : ISD::SETO; 1621 Opc = ((unsigned)CCCode & 0x8U) ? ISD::OR : ISD::AND; 1622 CC1 = (ISD::CondCode)(((int)CCCode & 0x7) | 0x10); 1623 break; 1624 } 1625 // Fallthrough if we are unsigned integer. 1626 case ISD::SETLE: 1627 case ISD::SETGT: 1628 case ISD::SETGE: 1629 case ISD::SETLT: 1630 case ISD::SETNE: 1631 case ISD::SETEQ: 1632 InvCC = ISD::getSetCCSwappedOperands(CCCode); 1633 if (TLI.getCondCodeAction(InvCC, OpVT) == TargetLowering::Expand) { 1634 // We only support using the inverted operation and not a 1635 // different manner of supporting expanding these cases. 1636 llvm_unreachable("Don't know how to expand this condition!"); 1637 } 1638 LHS = DAG.getSetCC(dl, VT, RHS, LHS, InvCC); 1639 RHS = SDValue(); 1640 CC = SDValue(); 1641 return; 1642 } 1643 1644 SDValue SetCC1, SetCC2; 1645 if (CCCode != ISD::SETO && CCCode != ISD::SETUO) { 1646 // If we aren't the ordered or unorder operation, 1647 // then the pattern is (LHS CC1 RHS) Opc (LHS CC2 RHS). 1648 SetCC1 = DAG.getSetCC(dl, VT, LHS, RHS, CC1); 1649 SetCC2 = DAG.getSetCC(dl, VT, LHS, RHS, CC2); 1650 } else { 1651 // Otherwise, the pattern is (LHS CC1 LHS) Opc (RHS CC2 RHS) 1652 SetCC1 = DAG.getSetCC(dl, VT, LHS, LHS, CC1); 1653 SetCC2 = DAG.getSetCC(dl, VT, RHS, RHS, CC2); 1654 } 1655 LHS = DAG.getNode(Opc, dl, VT, SetCC1, SetCC2); 1656 RHS = SDValue(); 1657 CC = SDValue(); 1658 break; 1659 } 1660 } 1661} 1662 1663/// EmitStackConvert - Emit a store/load combination to the stack. This stores 1664/// SrcOp to a stack slot of type SlotVT, truncating it if needed. It then does 1665/// a load from the stack slot to DestVT, extending it if needed. 1666/// The resultant code need not be legal. 1667SDValue SelectionDAGLegalize::EmitStackConvert(SDValue SrcOp, 1668 EVT SlotVT, 1669 EVT DestVT, 1670 DebugLoc dl) { 1671 // Create the stack frame object. 1672 unsigned SrcAlign = 1673 TLI.getDataLayout()->getPrefTypeAlignment(SrcOp.getValueType(). 1674 getTypeForEVT(*DAG.getContext())); 1675 SDValue FIPtr = DAG.CreateStackTemporary(SlotVT, SrcAlign); 1676 1677 FrameIndexSDNode *StackPtrFI = cast<FrameIndexSDNode>(FIPtr); 1678 int SPFI = StackPtrFI->getIndex(); 1679 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(SPFI); 1680 1681 unsigned SrcSize = SrcOp.getValueType().getSizeInBits(); 1682 unsigned SlotSize = SlotVT.getSizeInBits(); 1683 unsigned DestSize = DestVT.getSizeInBits(); 1684 Type *DestType = DestVT.getTypeForEVT(*DAG.getContext()); 1685 unsigned DestAlign = TLI.getDataLayout()->getPrefTypeAlignment(DestType); 1686 1687 // Emit a store to the stack slot. Use a truncstore if the input value is 1688 // later than DestVT. 1689 SDValue Store; 1690 1691 if (SrcSize > SlotSize) 1692 Store = DAG.getTruncStore(DAG.getEntryNode(), dl, SrcOp, FIPtr, 1693 PtrInfo, SlotVT, false, false, SrcAlign); 1694 else { 1695 assert(SrcSize == SlotSize && "Invalid store"); 1696 Store = DAG.getStore(DAG.getEntryNode(), dl, SrcOp, FIPtr, 1697 PtrInfo, false, false, SrcAlign); 1698 } 1699 1700 // Result is a load from the stack slot. 1701 if (SlotSize == DestSize) 1702 return DAG.getLoad(DestVT, dl, Store, FIPtr, PtrInfo, 1703 false, false, false, DestAlign); 1704 1705 assert(SlotSize < DestSize && "Unknown extension!"); 1706 return DAG.getExtLoad(ISD::EXTLOAD, dl, DestVT, Store, FIPtr, 1707 PtrInfo, SlotVT, false, false, DestAlign); 1708} 1709 1710SDValue SelectionDAGLegalize::ExpandSCALAR_TO_VECTOR(SDNode *Node) { 1711 DebugLoc dl = Node->getDebugLoc(); 1712 // Create a vector sized/aligned stack slot, store the value to element #0, 1713 // then load the whole vector back out. 1714 SDValue StackPtr = DAG.CreateStackTemporary(Node->getValueType(0)); 1715 1716 FrameIndexSDNode *StackPtrFI = cast<FrameIndexSDNode>(StackPtr); 1717 int SPFI = StackPtrFI->getIndex(); 1718 1719 SDValue Ch = DAG.getTruncStore(DAG.getEntryNode(), dl, Node->getOperand(0), 1720 StackPtr, 1721 MachinePointerInfo::getFixedStack(SPFI), 1722 Node->getValueType(0).getVectorElementType(), 1723 false, false, 0); 1724 return DAG.getLoad(Node->getValueType(0), dl, Ch, StackPtr, 1725 MachinePointerInfo::getFixedStack(SPFI), 1726 false, false, false, 0); 1727} 1728 1729 1730/// ExpandBUILD_VECTOR - Expand a BUILD_VECTOR node on targets that don't 1731/// support the operation, but do support the resultant vector type. 1732SDValue SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) { 1733 unsigned NumElems = Node->getNumOperands(); 1734 SDValue Value1, Value2; 1735 DebugLoc dl = Node->getDebugLoc(); 1736 EVT VT = Node->getValueType(0); 1737 EVT OpVT = Node->getOperand(0).getValueType(); 1738 EVT EltVT = VT.getVectorElementType(); 1739 1740 // If the only non-undef value is the low element, turn this into a 1741 // SCALAR_TO_VECTOR node. If this is { X, X, X, X }, determine X. 1742 bool isOnlyLowElement = true; 1743 bool MoreThanTwoValues = false; 1744 bool isConstant = true; 1745 for (unsigned i = 0; i < NumElems; ++i) { 1746 SDValue V = Node->getOperand(i); 1747 if (V.getOpcode() == ISD::UNDEF) 1748 continue; 1749 if (i > 0) 1750 isOnlyLowElement = false; 1751 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V)) 1752 isConstant = false; 1753 1754 if (!Value1.getNode()) { 1755 Value1 = V; 1756 } else if (!Value2.getNode()) { 1757 if (V != Value1) 1758 Value2 = V; 1759 } else if (V != Value1 && V != Value2) { 1760 MoreThanTwoValues = true; 1761 } 1762 } 1763 1764 if (!Value1.getNode()) 1765 return DAG.getUNDEF(VT); 1766 1767 if (isOnlyLowElement) 1768 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Node->getOperand(0)); 1769 1770 // If all elements are constants, create a load from the constant pool. 1771 if (isConstant) { 1772 SmallVector<Constant*, 16> CV; 1773 for (unsigned i = 0, e = NumElems; i != e; ++i) { 1774 if (ConstantFPSDNode *V = 1775 dyn_cast<ConstantFPSDNode>(Node->getOperand(i))) { 1776 CV.push_back(const_cast<ConstantFP *>(V->getConstantFPValue())); 1777 } else if (ConstantSDNode *V = 1778 dyn_cast<ConstantSDNode>(Node->getOperand(i))) { 1779 if (OpVT==EltVT) 1780 CV.push_back(const_cast<ConstantInt *>(V->getConstantIntValue())); 1781 else { 1782 // If OpVT and EltVT don't match, EltVT is not legal and the 1783 // element values have been promoted/truncated earlier. Undo this; 1784 // we don't want a v16i8 to become a v16i32 for example. 1785 const ConstantInt *CI = V->getConstantIntValue(); 1786 CV.push_back(ConstantInt::get(EltVT.getTypeForEVT(*DAG.getContext()), 1787 CI->getZExtValue())); 1788 } 1789 } else { 1790 assert(Node->getOperand(i).getOpcode() == ISD::UNDEF); 1791 Type *OpNTy = EltVT.getTypeForEVT(*DAG.getContext()); 1792 CV.push_back(UndefValue::get(OpNTy)); 1793 } 1794 } 1795 Constant *CP = ConstantVector::get(CV); 1796 SDValue CPIdx = DAG.getConstantPool(CP, TLI.getPointerTy()); 1797 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment(); 1798 return DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 1799 MachinePointerInfo::getConstantPool(), 1800 false, false, false, Alignment); 1801 } 1802 1803 if (!MoreThanTwoValues) { 1804 SmallVector<int, 8> ShuffleVec(NumElems, -1); 1805 for (unsigned i = 0; i < NumElems; ++i) { 1806 SDValue V = Node->getOperand(i); 1807 if (V.getOpcode() == ISD::UNDEF) 1808 continue; 1809 ShuffleVec[i] = V == Value1 ? 0 : NumElems; 1810 } 1811 if (TLI.isShuffleMaskLegal(ShuffleVec, Node->getValueType(0))) { 1812 // Get the splatted value into the low element of a vector register. 1813 SDValue Vec1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value1); 1814 SDValue Vec2; 1815 if (Value2.getNode()) 1816 Vec2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value2); 1817 else 1818 Vec2 = DAG.getUNDEF(VT); 1819 1820 // Return shuffle(LowValVec, undef, <0,0,0,0>) 1821 return DAG.getVectorShuffle(VT, dl, Vec1, Vec2, ShuffleVec.data()); 1822 } 1823 } 1824 1825 // Otherwise, we can't handle this case efficiently. 1826 return ExpandVectorBuildThroughStack(Node); 1827} 1828 1829// ExpandLibCall - Expand a node into a call to a libcall. If the result value 1830// does not fit into a register, return the lo part and set the hi part to the 1831// by-reg argument. If it does fit into a single register, return the result 1832// and leave the Hi part unset. 1833SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, SDNode *Node, 1834 bool isSigned) { 1835 TargetLowering::ArgListTy Args; 1836 TargetLowering::ArgListEntry Entry; 1837 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) { 1838 EVT ArgVT = Node->getOperand(i).getValueType(); 1839 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); 1840 Entry.Node = Node->getOperand(i); Entry.Ty = ArgTy; 1841 Entry.isSExt = isSigned; 1842 Entry.isZExt = !isSigned; 1843 Args.push_back(Entry); 1844 } 1845 SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC), 1846 TLI.getPointerTy()); 1847 1848 Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext()); 1849 1850 // By default, the input chain to this libcall is the entry node of the 1851 // function. If the libcall is going to be emitted as a tail call then 1852 // TLI.isUsedByReturnOnly will change it to the right chain if the return 1853 // node which is being folded has a non-entry input chain. 1854 SDValue InChain = DAG.getEntryNode(); 1855 1856 // isTailCall may be true since the callee does not reference caller stack 1857 // frame. Check if it's in the right position. 1858 SDValue TCChain = InChain; 1859 bool isTailCall = isInTailCallPosition(DAG, Node, TCChain, TLI); 1860 if (isTailCall) 1861 InChain = TCChain; 1862 1863 TargetLowering:: 1864 CallLoweringInfo CLI(InChain, RetTy, isSigned, !isSigned, false, false, 1865 0, TLI.getLibcallCallingConv(LC), isTailCall, 1866 /*doesNotReturn=*/false, /*isReturnValueUsed=*/true, 1867 Callee, Args, DAG, Node->getDebugLoc()); 1868 std::pair<SDValue, SDValue> CallInfo = TLI.LowerCallTo(CLI); 1869 1870 1871 if (!CallInfo.second.getNode()) 1872 // It's a tailcall, return the chain (which is the DAG root). 1873 return DAG.getRoot(); 1874 1875 return CallInfo.first; 1876} 1877 1878/// ExpandLibCall - Generate a libcall taking the given operands as arguments 1879/// and returning a result of type RetVT. 1880SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, EVT RetVT, 1881 const SDValue *Ops, unsigned NumOps, 1882 bool isSigned, DebugLoc dl) { 1883 TargetLowering::ArgListTy Args; 1884 Args.reserve(NumOps); 1885 1886 TargetLowering::ArgListEntry Entry; 1887 for (unsigned i = 0; i != NumOps; ++i) { 1888 Entry.Node = Ops[i]; 1889 Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.getContext()); 1890 Entry.isSExt = isSigned; 1891 Entry.isZExt = !isSigned; 1892 Args.push_back(Entry); 1893 } 1894 SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC), 1895 TLI.getPointerTy()); 1896 1897 Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext()); 1898 TargetLowering:: 1899 CallLoweringInfo CLI(DAG.getEntryNode(), RetTy, isSigned, !isSigned, false, 1900 false, 0, TLI.getLibcallCallingConv(LC), 1901 /*isTailCall=*/false, 1902 /*doesNotReturn=*/false, /*isReturnValueUsed=*/true, 1903 Callee, Args, DAG, dl); 1904 std::pair<SDValue,SDValue> CallInfo = TLI.LowerCallTo(CLI); 1905 1906 return CallInfo.first; 1907} 1908 1909// ExpandChainLibCall - Expand a node into a call to a libcall. Similar to 1910// ExpandLibCall except that the first operand is the in-chain. 1911std::pair<SDValue, SDValue> 1912SelectionDAGLegalize::ExpandChainLibCall(RTLIB::Libcall LC, 1913 SDNode *Node, 1914 bool isSigned) { 1915 SDValue InChain = Node->getOperand(0); 1916 1917 TargetLowering::ArgListTy Args; 1918 TargetLowering::ArgListEntry Entry; 1919 for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i) { 1920 EVT ArgVT = Node->getOperand(i).getValueType(); 1921 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); 1922 Entry.Node = Node->getOperand(i); 1923 Entry.Ty = ArgTy; 1924 Entry.isSExt = isSigned; 1925 Entry.isZExt = !isSigned; 1926 Args.push_back(Entry); 1927 } 1928 SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC), 1929 TLI.getPointerTy()); 1930 1931 Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext()); 1932 TargetLowering:: 1933 CallLoweringInfo CLI(InChain, RetTy, isSigned, !isSigned, false, false, 1934 0, TLI.getLibcallCallingConv(LC), /*isTailCall=*/false, 1935 /*doesNotReturn=*/false, /*isReturnValueUsed=*/true, 1936 Callee, Args, DAG, Node->getDebugLoc()); 1937 std::pair<SDValue, SDValue> CallInfo = TLI.LowerCallTo(CLI); 1938 1939 return CallInfo; 1940} 1941 1942SDValue SelectionDAGLegalize::ExpandFPLibCall(SDNode* Node, 1943 RTLIB::Libcall Call_F32, 1944 RTLIB::Libcall Call_F64, 1945 RTLIB::Libcall Call_F80, 1946 RTLIB::Libcall Call_PPCF128) { 1947 RTLIB::Libcall LC; 1948 switch (Node->getValueType(0).getSimpleVT().SimpleTy) { 1949 default: llvm_unreachable("Unexpected request for libcall!"); 1950 case MVT::f32: LC = Call_F32; break; 1951 case MVT::f64: LC = Call_F64; break; 1952 case MVT::f80: LC = Call_F80; break; 1953 case MVT::ppcf128: LC = Call_PPCF128; break; 1954 } 1955 return ExpandLibCall(LC, Node, false); 1956} 1957 1958SDValue SelectionDAGLegalize::ExpandIntLibCall(SDNode* Node, bool isSigned, 1959 RTLIB::Libcall Call_I8, 1960 RTLIB::Libcall Call_I16, 1961 RTLIB::Libcall Call_I32, 1962 RTLIB::Libcall Call_I64, 1963 RTLIB::Libcall Call_I128) { 1964 RTLIB::Libcall LC; 1965 switch (Node->getValueType(0).getSimpleVT().SimpleTy) { 1966 default: llvm_unreachable("Unexpected request for libcall!"); 1967 case MVT::i8: LC = Call_I8; break; 1968 case MVT::i16: LC = Call_I16; break; 1969 case MVT::i32: LC = Call_I32; break; 1970 case MVT::i64: LC = Call_I64; break; 1971 case MVT::i128: LC = Call_I128; break; 1972 } 1973 return ExpandLibCall(LC, Node, isSigned); 1974} 1975 1976/// isDivRemLibcallAvailable - Return true if divmod libcall is available. 1977static bool isDivRemLibcallAvailable(SDNode *Node, bool isSigned, 1978 const TargetLowering &TLI) { 1979 RTLIB::Libcall LC; 1980 switch (Node->getValueType(0).getSimpleVT().SimpleTy) { 1981 default: llvm_unreachable("Unexpected request for libcall!"); 1982 case MVT::i8: LC= isSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8; break; 1983 case MVT::i16: LC= isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break; 1984 case MVT::i32: LC= isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break; 1985 case MVT::i64: LC= isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break; 1986 case MVT::i128: LC= isSigned ? RTLIB::SDIVREM_I128:RTLIB::UDIVREM_I128; break; 1987 } 1988 1989 return TLI.getLibcallName(LC) != 0; 1990} 1991 1992/// useDivRem - Only issue divrem libcall if both quotient and remainder are 1993/// needed. 1994static bool useDivRem(SDNode *Node, bool isSigned, bool isDIV) { 1995 // The other use might have been replaced with a divrem already. 1996 unsigned DivRemOpc = isSigned ? ISD::SDIVREM : ISD::UDIVREM; 1997 unsigned OtherOpcode = 0; 1998 if (isSigned) 1999 OtherOpcode = isDIV ? ISD::SREM : ISD::SDIV; 2000 else 2001 OtherOpcode = isDIV ? ISD::UREM : ISD::UDIV; 2002 2003 SDValue Op0 = Node->getOperand(0); 2004 SDValue Op1 = Node->getOperand(1); 2005 for (SDNode::use_iterator UI = Op0.getNode()->use_begin(), 2006 UE = Op0.getNode()->use_end(); UI != UE; ++UI) { 2007 SDNode *User = *UI; 2008 if (User == Node) 2009 continue; 2010 if ((User->getOpcode() == OtherOpcode || User->getOpcode() == DivRemOpc) && 2011 User->getOperand(0) == Op0 && 2012 User->getOperand(1) == Op1) 2013 return true; 2014 } 2015 return false; 2016} 2017 2018/// ExpandDivRemLibCall - Issue libcalls to __{u}divmod to compute div / rem 2019/// pairs. 2020void 2021SelectionDAGLegalize::ExpandDivRemLibCall(SDNode *Node, 2022 SmallVectorImpl<SDValue> &Results) { 2023 unsigned Opcode = Node->getOpcode(); 2024 bool isSigned = Opcode == ISD::SDIVREM; 2025 2026 RTLIB::Libcall LC; 2027 switch (Node->getValueType(0).getSimpleVT().SimpleTy) { 2028 default: llvm_unreachable("Unexpected request for libcall!"); 2029 case MVT::i8: LC= isSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8; break; 2030 case MVT::i16: LC= isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break; 2031 case MVT::i32: LC= isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break; 2032 case MVT::i64: LC= isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break; 2033 case MVT::i128: LC= isSigned ? RTLIB::SDIVREM_I128:RTLIB::UDIVREM_I128; break; 2034 } 2035 2036 // The input chain to this libcall is the entry node of the function. 2037 // Legalizing the call will automatically add the previous call to the 2038 // dependence. 2039 SDValue InChain = DAG.getEntryNode(); 2040 2041 EVT RetVT = Node->getValueType(0); 2042 Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext()); 2043 2044 TargetLowering::ArgListTy Args; 2045 TargetLowering::ArgListEntry Entry; 2046 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) { 2047 EVT ArgVT = Node->getOperand(i).getValueType(); 2048 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); 2049 Entry.Node = Node->getOperand(i); Entry.Ty = ArgTy; 2050 Entry.isSExt = isSigned; 2051 Entry.isZExt = !isSigned; 2052 Args.push_back(Entry); 2053 } 2054 2055 // Also pass the return address of the remainder. 2056 SDValue FIPtr = DAG.CreateStackTemporary(RetVT); 2057 Entry.Node = FIPtr; 2058 Entry.Ty = RetTy->getPointerTo(); 2059 Entry.isSExt = isSigned; 2060 Entry.isZExt = !isSigned; 2061 Args.push_back(Entry); 2062 2063 SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC), 2064 TLI.getPointerTy()); 2065 2066 DebugLoc dl = Node->getDebugLoc(); 2067 TargetLowering:: 2068 CallLoweringInfo CLI(InChain, RetTy, isSigned, !isSigned, false, false, 2069 0, TLI.getLibcallCallingConv(LC), /*isTailCall=*/false, 2070 /*doesNotReturn=*/false, /*isReturnValueUsed=*/true, 2071 Callee, Args, DAG, dl); 2072 std::pair<SDValue, SDValue> CallInfo = TLI.LowerCallTo(CLI); 2073 2074 // Remainder is loaded back from the stack frame. 2075 SDValue Rem = DAG.getLoad(RetVT, dl, CallInfo.second, FIPtr, 2076 MachinePointerInfo(), false, false, false, 0); 2077 Results.push_back(CallInfo.first); 2078 Results.push_back(Rem); 2079} 2080 2081/// ExpandLegalINT_TO_FP - This function is responsible for legalizing a 2082/// INT_TO_FP operation of the specified operand when the target requests that 2083/// we expand it. At this point, we know that the result and operand types are 2084/// legal for the target. 2085SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned, 2086 SDValue Op0, 2087 EVT DestVT, 2088 DebugLoc dl) { 2089 if (Op0.getValueType() == MVT::i32 && TLI.isTypeLegal(MVT::f64)) { 2090 // simple 32-bit [signed|unsigned] integer to float/double expansion 2091 2092 // Get the stack frame index of a 8 byte buffer. 2093 SDValue StackSlot = DAG.CreateStackTemporary(MVT::f64); 2094 2095 // word offset constant for Hi/Lo address computation 2096 SDValue WordOff = DAG.getConstant(sizeof(int), TLI.getPointerTy()); 2097 // set up Hi and Lo (into buffer) address based on endian 2098 SDValue Hi = StackSlot; 2099 SDValue Lo = DAG.getNode(ISD::ADD, dl, 2100 TLI.getPointerTy(), StackSlot, WordOff); 2101 if (TLI.isLittleEndian()) 2102 std::swap(Hi, Lo); 2103 2104 // if signed map to unsigned space 2105 SDValue Op0Mapped; 2106 if (isSigned) { 2107 // constant used to invert sign bit (signed to unsigned mapping) 2108 SDValue SignBit = DAG.getConstant(0x80000000u, MVT::i32); 2109 Op0Mapped = DAG.getNode(ISD::XOR, dl, MVT::i32, Op0, SignBit); 2110 } else { 2111 Op0Mapped = Op0; 2112 } 2113 // store the lo of the constructed double - based on integer input 2114 SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, 2115 Op0Mapped, Lo, MachinePointerInfo(), 2116 false, false, 0); 2117 // initial hi portion of constructed double 2118 SDValue InitialHi = DAG.getConstant(0x43300000u, MVT::i32); 2119 // store the hi of the constructed double - biased exponent 2120 SDValue Store2 = DAG.getStore(Store1, dl, InitialHi, Hi, 2121 MachinePointerInfo(), 2122 false, false, 0); 2123 // load the constructed double 2124 SDValue Load = DAG.getLoad(MVT::f64, dl, Store2, StackSlot, 2125 MachinePointerInfo(), false, false, false, 0); 2126 // FP constant to bias correct the final result 2127 SDValue Bias = DAG.getConstantFP(isSigned ? 2128 BitsToDouble(0x4330000080000000ULL) : 2129 BitsToDouble(0x4330000000000000ULL), 2130 MVT::f64); 2131 // subtract the bias 2132 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Load, Bias); 2133 // final result 2134 SDValue Result; 2135 // handle final rounding 2136 if (DestVT == MVT::f64) { 2137 // do nothing 2138 Result = Sub; 2139 } else if (DestVT.bitsLT(MVT::f64)) { 2140 Result = DAG.getNode(ISD::FP_ROUND, dl, DestVT, Sub, 2141 DAG.getIntPtrConstant(0)); 2142 } else if (DestVT.bitsGT(MVT::f64)) { 2143 Result = DAG.getNode(ISD::FP_EXTEND, dl, DestVT, Sub); 2144 } 2145 return Result; 2146 } 2147 assert(!isSigned && "Legalize cannot Expand SINT_TO_FP for i64 yet"); 2148 // Code below here assumes !isSigned without checking again. 2149 2150 // Implementation of unsigned i64 to f64 following the algorithm in 2151 // __floatundidf in compiler_rt. This implementation has the advantage 2152 // of performing rounding correctly, both in the default rounding mode 2153 // and in all alternate rounding modes. 2154 // TODO: Generalize this for use with other types. 2155 if (Op0.getValueType() == MVT::i64 && DestVT == MVT::f64) { 2156 SDValue TwoP52 = 2157 DAG.getConstant(UINT64_C(0x4330000000000000), MVT::i64); 2158 SDValue TwoP84PlusTwoP52 = 2159 DAG.getConstantFP(BitsToDouble(UINT64_C(0x4530000000100000)), MVT::f64); 2160 SDValue TwoP84 = 2161 DAG.getConstant(UINT64_C(0x4530000000000000), MVT::i64); 2162 2163 SDValue Lo = DAG.getZeroExtendInReg(Op0, dl, MVT::i32); 2164 SDValue Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Op0, 2165 DAG.getConstant(32, MVT::i64)); 2166 SDValue LoOr = DAG.getNode(ISD::OR, dl, MVT::i64, Lo, TwoP52); 2167 SDValue HiOr = DAG.getNode(ISD::OR, dl, MVT::i64, Hi, TwoP84); 2168 SDValue LoFlt = DAG.getNode(ISD::BITCAST, dl, MVT::f64, LoOr); 2169 SDValue HiFlt = DAG.getNode(ISD::BITCAST, dl, MVT::f64, HiOr); 2170 SDValue HiSub = DAG.getNode(ISD::FSUB, dl, MVT::f64, HiFlt, 2171 TwoP84PlusTwoP52); 2172 return DAG.getNode(ISD::FADD, dl, MVT::f64, LoFlt, HiSub); 2173 } 2174 2175 // Implementation of unsigned i64 to f32. 2176 // TODO: Generalize this for use with other types. 2177 if (Op0.getValueType() == MVT::i64 && DestVT == MVT::f32) { 2178 // For unsigned conversions, convert them to signed conversions using the 2179 // algorithm from the x86_64 __floatundidf in compiler_rt. 2180 if (!isSigned) { 2181 SDValue Fast = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, Op0); 2182 2183 SDValue ShiftConst = 2184 DAG.getConstant(1, TLI.getShiftAmountTy(Op0.getValueType())); 2185 SDValue Shr = DAG.getNode(ISD::SRL, dl, MVT::i64, Op0, ShiftConst); 2186 SDValue AndConst = DAG.getConstant(1, MVT::i64); 2187 SDValue And = DAG.getNode(ISD::AND, dl, MVT::i64, Op0, AndConst); 2188 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::i64, And, Shr); 2189 2190 SDValue SignCvt = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, Or); 2191 SDValue Slow = DAG.getNode(ISD::FADD, dl, MVT::f32, SignCvt, SignCvt); 2192 2193 // TODO: This really should be implemented using a branch rather than a 2194 // select. We happen to get lucky and machinesink does the right 2195 // thing most of the time. This would be a good candidate for a 2196 //pseudo-op, or, even better, for whole-function isel. 2197 SDValue SignBitTest = DAG.getSetCC(dl, TLI.getSetCCResultType(MVT::i64), 2198 Op0, DAG.getConstant(0, MVT::i64), ISD::SETLT); 2199 return DAG.getNode(ISD::SELECT, dl, MVT::f32, SignBitTest, Slow, Fast); 2200 } 2201 2202 // Otherwise, implement the fully general conversion. 2203 2204 SDValue And = DAG.getNode(ISD::AND, dl, MVT::i64, Op0, 2205 DAG.getConstant(UINT64_C(0xfffffffffffff800), MVT::i64)); 2206 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::i64, And, 2207 DAG.getConstant(UINT64_C(0x800), MVT::i64)); 2208 SDValue And2 = DAG.getNode(ISD::AND, dl, MVT::i64, Op0, 2209 DAG.getConstant(UINT64_C(0x7ff), MVT::i64)); 2210 SDValue Ne = DAG.getSetCC(dl, TLI.getSetCCResultType(MVT::i64), 2211 And2, DAG.getConstant(UINT64_C(0), MVT::i64), ISD::SETNE); 2212 SDValue Sel = DAG.getNode(ISD::SELECT, dl, MVT::i64, Ne, Or, Op0); 2213 SDValue Ge = DAG.getSetCC(dl, TLI.getSetCCResultType(MVT::i64), 2214 Op0, DAG.getConstant(UINT64_C(0x0020000000000000), MVT::i64), 2215 ISD::SETUGE); 2216 SDValue Sel2 = DAG.getNode(ISD::SELECT, dl, MVT::i64, Ge, Sel, Op0); 2217 EVT SHVT = TLI.getShiftAmountTy(Sel2.getValueType()); 2218 2219 SDValue Sh = DAG.getNode(ISD::SRL, dl, MVT::i64, Sel2, 2220 DAG.getConstant(32, SHVT)); 2221 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Sh); 2222 SDValue Fcvt = DAG.getNode(ISD::UINT_TO_FP, dl, MVT::f64, Trunc); 2223 SDValue TwoP32 = 2224 DAG.getConstantFP(BitsToDouble(UINT64_C(0x41f0000000000000)), MVT::f64); 2225 SDValue Fmul = DAG.getNode(ISD::FMUL, dl, MVT::f64, TwoP32, Fcvt); 2226 SDValue Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Sel2); 2227 SDValue Fcvt2 = DAG.getNode(ISD::UINT_TO_FP, dl, MVT::f64, Lo); 2228 SDValue Fadd = DAG.getNode(ISD::FADD, dl, MVT::f64, Fmul, Fcvt2); 2229 return DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, Fadd, 2230 DAG.getIntPtrConstant(0)); 2231 } 2232 2233 SDValue Tmp1 = DAG.getNode(ISD::SINT_TO_FP, dl, DestVT, Op0); 2234 2235 SDValue SignSet = DAG.getSetCC(dl, TLI.getSetCCResultType(Op0.getValueType()), 2236 Op0, DAG.getConstant(0, Op0.getValueType()), 2237 ISD::SETLT); 2238 SDValue Zero = DAG.getIntPtrConstant(0), Four = DAG.getIntPtrConstant(4); 2239 SDValue CstOffset = DAG.getNode(ISD::SELECT, dl, Zero.getValueType(), 2240 SignSet, Four, Zero); 2241 2242 // If the sign bit of the integer is set, the large number will be treated 2243 // as a negative number. To counteract this, the dynamic code adds an 2244 // offset depending on the data type. 2245 uint64_t FF; 2246 switch (Op0.getValueType().getSimpleVT().SimpleTy) { 2247 default: llvm_unreachable("Unsupported integer type!"); 2248 case MVT::i8 : FF = 0x43800000ULL; break; // 2^8 (as a float) 2249 case MVT::i16: FF = 0x47800000ULL; break; // 2^16 (as a float) 2250 case MVT::i32: FF = 0x4F800000ULL; break; // 2^32 (as a float) 2251 case MVT::i64: FF = 0x5F800000ULL; break; // 2^64 (as a float) 2252 } 2253 if (TLI.isLittleEndian()) FF <<= 32; 2254 Constant *FudgeFactor = ConstantInt::get( 2255 Type::getInt64Ty(*DAG.getContext()), FF); 2256 2257 SDValue CPIdx = DAG.getConstantPool(FudgeFactor, TLI.getPointerTy()); 2258 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment(); 2259 CPIdx = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), CPIdx, CstOffset); 2260 Alignment = std::min(Alignment, 4u); 2261 SDValue FudgeInReg; 2262 if (DestVT == MVT::f32) 2263 FudgeInReg = DAG.getLoad(MVT::f32, dl, DAG.getEntryNode(), CPIdx, 2264 MachinePointerInfo::getConstantPool(), 2265 false, false, false, Alignment); 2266 else { 2267 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, DestVT, 2268 DAG.getEntryNode(), CPIdx, 2269 MachinePointerInfo::getConstantPool(), 2270 MVT::f32, false, false, Alignment); 2271 HandleSDNode Handle(Load); 2272 LegalizeOp(Load.getNode()); 2273 FudgeInReg = Handle.getValue(); 2274 } 2275 2276 return DAG.getNode(ISD::FADD, dl, DestVT, Tmp1, FudgeInReg); 2277} 2278 2279/// PromoteLegalINT_TO_FP - This function is responsible for legalizing a 2280/// *INT_TO_FP operation of the specified operand when the target requests that 2281/// we promote it. At this point, we know that the result and operand types are 2282/// legal for the target, and that there is a legal UINT_TO_FP or SINT_TO_FP 2283/// operation that takes a larger input. 2284SDValue SelectionDAGLegalize::PromoteLegalINT_TO_FP(SDValue LegalOp, 2285 EVT DestVT, 2286 bool isSigned, 2287 DebugLoc dl) { 2288 // First step, figure out the appropriate *INT_TO_FP operation to use. 2289 EVT NewInTy = LegalOp.getValueType(); 2290 2291 unsigned OpToUse = 0; 2292 2293 // Scan for the appropriate larger type to use. 2294 while (1) { 2295 NewInTy = (MVT::SimpleValueType)(NewInTy.getSimpleVT().SimpleTy+1); 2296 assert(NewInTy.isInteger() && "Ran out of possibilities!"); 2297 2298 // If the target supports SINT_TO_FP of this type, use it. 2299 if (TLI.isOperationLegalOrCustom(ISD::SINT_TO_FP, NewInTy)) { 2300 OpToUse = ISD::SINT_TO_FP; 2301 break; 2302 } 2303 if (isSigned) continue; 2304 2305 // If the target supports UINT_TO_FP of this type, use it. 2306 if (TLI.isOperationLegalOrCustom(ISD::UINT_TO_FP, NewInTy)) { 2307 OpToUse = ISD::UINT_TO_FP; 2308 break; 2309 } 2310 2311 // Otherwise, try a larger type. 2312 } 2313 2314 // Okay, we found the operation and type to use. Zero extend our input to the 2315 // desired type then run the operation on it. 2316 return DAG.getNode(OpToUse, dl, DestVT, 2317 DAG.getNode(isSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, 2318 dl, NewInTy, LegalOp)); 2319} 2320 2321/// PromoteLegalFP_TO_INT - This function is responsible for legalizing a 2322/// FP_TO_*INT operation of the specified operand when the target requests that 2323/// we promote it. At this point, we know that the result and operand types are 2324/// legal for the target, and that there is a legal FP_TO_UINT or FP_TO_SINT 2325/// operation that returns a larger result. 2326SDValue SelectionDAGLegalize::PromoteLegalFP_TO_INT(SDValue LegalOp, 2327 EVT DestVT, 2328 bool isSigned, 2329 DebugLoc dl) { 2330 // First step, figure out the appropriate FP_TO*INT operation to use. 2331 EVT NewOutTy = DestVT; 2332 2333 unsigned OpToUse = 0; 2334 2335 // Scan for the appropriate larger type to use. 2336 while (1) { 2337 NewOutTy = (MVT::SimpleValueType)(NewOutTy.getSimpleVT().SimpleTy+1); 2338 assert(NewOutTy.isInteger() && "Ran out of possibilities!"); 2339 2340 if (TLI.isOperationLegalOrCustom(ISD::FP_TO_SINT, NewOutTy)) { 2341 OpToUse = ISD::FP_TO_SINT; 2342 break; 2343 } 2344 2345 if (TLI.isOperationLegalOrCustom(ISD::FP_TO_UINT, NewOutTy)) { 2346 OpToUse = ISD::FP_TO_UINT; 2347 break; 2348 } 2349 2350 // Otherwise, try a larger type. 2351 } 2352 2353 2354 // Okay, we found the operation and type to use. 2355 SDValue Operation = DAG.getNode(OpToUse, dl, NewOutTy, LegalOp); 2356 2357 // Truncate the result of the extended FP_TO_*INT operation to the desired 2358 // size. 2359 return DAG.getNode(ISD::TRUNCATE, dl, DestVT, Operation); 2360} 2361 2362/// ExpandBSWAP - Open code the operations for BSWAP of the specified operation. 2363/// 2364SDValue SelectionDAGLegalize::ExpandBSWAP(SDValue Op, DebugLoc dl) { 2365 EVT VT = Op.getValueType(); 2366 EVT SHVT = TLI.getShiftAmountTy(VT); 2367 SDValue Tmp1, Tmp2, Tmp3, Tmp4, Tmp5, Tmp6, Tmp7, Tmp8; 2368 switch (VT.getSimpleVT().SimpleTy) { 2369 default: llvm_unreachable("Unhandled Expand type in BSWAP!"); 2370 case MVT::i16: 2371 Tmp2 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(8, SHVT)); 2372 Tmp1 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(8, SHVT)); 2373 return DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 2374 case MVT::i32: 2375 Tmp4 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(24, SHVT)); 2376 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(8, SHVT)); 2377 Tmp2 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(8, SHVT)); 2378 Tmp1 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(24, SHVT)); 2379 Tmp3 = DAG.getNode(ISD::AND, dl, VT, Tmp3, DAG.getConstant(0xFF0000, VT)); 2380 Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, DAG.getConstant(0xFF00, VT)); 2381 Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp3); 2382 Tmp2 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp1); 2383 return DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp2); 2384 case MVT::i64: 2385 Tmp8 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(56, SHVT)); 2386 Tmp7 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(40, SHVT)); 2387 Tmp6 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(24, SHVT)); 2388 Tmp5 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(8, SHVT)); 2389 Tmp4 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(8, SHVT)); 2390 Tmp3 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(24, SHVT)); 2391 Tmp2 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(40, SHVT)); 2392 Tmp1 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(56, SHVT)); 2393 Tmp7 = DAG.getNode(ISD::AND, dl, VT, Tmp7, DAG.getConstant(255ULL<<48, VT)); 2394 Tmp6 = DAG.getNode(ISD::AND, dl, VT, Tmp6, DAG.getConstant(255ULL<<40, VT)); 2395 Tmp5 = DAG.getNode(ISD::AND, dl, VT, Tmp5, DAG.getConstant(255ULL<<32, VT)); 2396 Tmp4 = DAG.getNode(ISD::AND, dl, VT, Tmp4, DAG.getConstant(255ULL<<24, VT)); 2397 Tmp3 = DAG.getNode(ISD::AND, dl, VT, Tmp3, DAG.getConstant(255ULL<<16, VT)); 2398 Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, DAG.getConstant(255ULL<<8 , VT)); 2399 Tmp8 = DAG.getNode(ISD::OR, dl, VT, Tmp8, Tmp7); 2400 Tmp6 = DAG.getNode(ISD::OR, dl, VT, Tmp6, Tmp5); 2401 Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp3); 2402 Tmp2 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp1); 2403 Tmp8 = DAG.getNode(ISD::OR, dl, VT, Tmp8, Tmp6); 2404 Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp2); 2405 return DAG.getNode(ISD::OR, dl, VT, Tmp8, Tmp4); 2406 } 2407} 2408 2409/// SplatByte - Distribute ByteVal over NumBits bits. 2410// FIXME: Move this helper to a common place. 2411static APInt SplatByte(unsigned NumBits, uint8_t ByteVal) { 2412 APInt Val = APInt(NumBits, ByteVal); 2413 unsigned Shift = 8; 2414 for (unsigned i = NumBits; i > 8; i >>= 1) { 2415 Val = (Val << Shift) | Val; 2416 Shift <<= 1; 2417 } 2418 return Val; 2419} 2420 2421/// ExpandBitCount - Expand the specified bitcount instruction into operations. 2422/// 2423SDValue SelectionDAGLegalize::ExpandBitCount(unsigned Opc, SDValue Op, 2424 DebugLoc dl) { 2425 switch (Opc) { 2426 default: llvm_unreachable("Cannot expand this yet!"); 2427 case ISD::CTPOP: { 2428 EVT VT = Op.getValueType(); 2429 EVT ShVT = TLI.getShiftAmountTy(VT); 2430 unsigned Len = VT.getSizeInBits(); 2431 2432 assert(VT.isInteger() && Len <= 128 && Len % 8 == 0 && 2433 "CTPOP not implemented for this type."); 2434 2435 // This is the "best" algorithm from 2436 // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel 2437 2438 SDValue Mask55 = DAG.getConstant(SplatByte(Len, 0x55), VT); 2439 SDValue Mask33 = DAG.getConstant(SplatByte(Len, 0x33), VT); 2440 SDValue Mask0F = DAG.getConstant(SplatByte(Len, 0x0F), VT); 2441 SDValue Mask01 = DAG.getConstant(SplatByte(Len, 0x01), VT); 2442 2443 // v = v - ((v >> 1) & 0x55555555...) 2444 Op = DAG.getNode(ISD::SUB, dl, VT, Op, 2445 DAG.getNode(ISD::AND, dl, VT, 2446 DAG.getNode(ISD::SRL, dl, VT, Op, 2447 DAG.getConstant(1, ShVT)), 2448 Mask55)); 2449 // v = (v & 0x33333333...) + ((v >> 2) & 0x33333333...) 2450 Op = DAG.getNode(ISD::ADD, dl, VT, 2451 DAG.getNode(ISD::AND, dl, VT, Op, Mask33), 2452 DAG.getNode(ISD::AND, dl, VT, 2453 DAG.getNode(ISD::SRL, dl, VT, Op, 2454 DAG.getConstant(2, ShVT)), 2455 Mask33)); 2456 // v = (v + (v >> 4)) & 0x0F0F0F0F... 2457 Op = DAG.getNode(ISD::AND, dl, VT, 2458 DAG.getNode(ISD::ADD, dl, VT, Op, 2459 DAG.getNode(ISD::SRL, dl, VT, Op, 2460 DAG.getConstant(4, ShVT))), 2461 Mask0F); 2462 // v = (v * 0x01010101...) >> (Len - 8) 2463 Op = DAG.getNode(ISD::SRL, dl, VT, 2464 DAG.getNode(ISD::MUL, dl, VT, Op, Mask01), 2465 DAG.getConstant(Len - 8, ShVT)); 2466 2467 return Op; 2468 } 2469 case ISD::CTLZ_ZERO_UNDEF: 2470 // This trivially expands to CTLZ. 2471 return DAG.getNode(ISD::CTLZ, dl, Op.getValueType(), Op); 2472 case ISD::CTLZ: { 2473 // for now, we do this: 2474 // x = x | (x >> 1); 2475 // x = x | (x >> 2); 2476 // ... 2477 // x = x | (x >>16); 2478 // x = x | (x >>32); // for 64-bit input 2479 // return popcount(~x); 2480 // 2481 // but see also: http://www.hackersdelight.org/HDcode/nlz.cc 2482 EVT VT = Op.getValueType(); 2483 EVT ShVT = TLI.getShiftAmountTy(VT); 2484 unsigned len = VT.getSizeInBits(); 2485 for (unsigned i = 0; (1U << i) <= (len / 2); ++i) { 2486 SDValue Tmp3 = DAG.getConstant(1ULL << i, ShVT); 2487 Op = DAG.getNode(ISD::OR, dl, VT, Op, 2488 DAG.getNode(ISD::SRL, dl, VT, Op, Tmp3)); 2489 } 2490 Op = DAG.getNOT(dl, Op, VT); 2491 return DAG.getNode(ISD::CTPOP, dl, VT, Op); 2492 } 2493 case ISD::CTTZ_ZERO_UNDEF: 2494 // This trivially expands to CTTZ. 2495 return DAG.getNode(ISD::CTTZ, dl, Op.getValueType(), Op); 2496 case ISD::CTTZ: { 2497 // for now, we use: { return popcount(~x & (x - 1)); } 2498 // unless the target has ctlz but not ctpop, in which case we use: 2499 // { return 32 - nlz(~x & (x-1)); } 2500 // see also http://www.hackersdelight.org/HDcode/ntz.cc 2501 EVT VT = Op.getValueType(); 2502 SDValue Tmp3 = DAG.getNode(ISD::AND, dl, VT, 2503 DAG.getNOT(dl, Op, VT), 2504 DAG.getNode(ISD::SUB, dl, VT, Op, 2505 DAG.getConstant(1, VT))); 2506 // If ISD::CTLZ is legal and CTPOP isn't, then do that instead. 2507 if (!TLI.isOperationLegalOrCustom(ISD::CTPOP, VT) && 2508 TLI.isOperationLegalOrCustom(ISD::CTLZ, VT)) 2509 return DAG.getNode(ISD::SUB, dl, VT, 2510 DAG.getConstant(VT.getSizeInBits(), VT), 2511 DAG.getNode(ISD::CTLZ, dl, VT, Tmp3)); 2512 return DAG.getNode(ISD::CTPOP, dl, VT, Tmp3); 2513 } 2514 } 2515} 2516 2517std::pair <SDValue, SDValue> SelectionDAGLegalize::ExpandAtomic(SDNode *Node) { 2518 unsigned Opc = Node->getOpcode(); 2519 MVT VT = cast<AtomicSDNode>(Node)->getMemoryVT().getSimpleVT(); 2520 RTLIB::Libcall LC; 2521 2522 switch (Opc) { 2523 default: 2524 llvm_unreachable("Unhandled atomic intrinsic Expand!"); 2525 case ISD::ATOMIC_SWAP: 2526 switch (VT.SimpleTy) { 2527 default: llvm_unreachable("Unexpected value type for atomic!"); 2528 case MVT::i8: LC = RTLIB::SYNC_LOCK_TEST_AND_SET_1; break; 2529 case MVT::i16: LC = RTLIB::SYNC_LOCK_TEST_AND_SET_2; break; 2530 case MVT::i32: LC = RTLIB::SYNC_LOCK_TEST_AND_SET_4; break; 2531 case MVT::i64: LC = RTLIB::SYNC_LOCK_TEST_AND_SET_8; break; 2532 } 2533 break; 2534 case ISD::ATOMIC_CMP_SWAP: 2535 switch (VT.SimpleTy) { 2536 default: llvm_unreachable("Unexpected value type for atomic!"); 2537 case MVT::i8: LC = RTLIB::SYNC_VAL_COMPARE_AND_SWAP_1; break; 2538 case MVT::i16: LC = RTLIB::SYNC_VAL_COMPARE_AND_SWAP_2; break; 2539 case MVT::i32: LC = RTLIB::SYNC_VAL_COMPARE_AND_SWAP_4; break; 2540 case MVT::i64: LC = RTLIB::SYNC_VAL_COMPARE_AND_SWAP_8; break; 2541 } 2542 break; 2543 case ISD::ATOMIC_LOAD_ADD: 2544 switch (VT.SimpleTy) { 2545 default: llvm_unreachable("Unexpected value type for atomic!"); 2546 case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_ADD_1; break; 2547 case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_ADD_2; break; 2548 case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_ADD_4; break; 2549 case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_ADD_8; break; 2550 } 2551 break; 2552 case ISD::ATOMIC_LOAD_SUB: 2553 switch (VT.SimpleTy) { 2554 default: llvm_unreachable("Unexpected value type for atomic!"); 2555 case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_SUB_1; break; 2556 case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_SUB_2; break; 2557 case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_SUB_4; break; 2558 case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_SUB_8; break; 2559 } 2560 break; 2561 case ISD::ATOMIC_LOAD_AND: 2562 switch (VT.SimpleTy) { 2563 default: llvm_unreachable("Unexpected value type for atomic!"); 2564 case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_AND_1; break; 2565 case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_AND_2; break; 2566 case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_AND_4; break; 2567 case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_AND_8; break; 2568 } 2569 break; 2570 case ISD::ATOMIC_LOAD_OR: 2571 switch (VT.SimpleTy) { 2572 default: llvm_unreachable("Unexpected value type for atomic!"); 2573 case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_OR_1; break; 2574 case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_OR_2; break; 2575 case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_OR_4; break; 2576 case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_OR_8; break; 2577 } 2578 break; 2579 case ISD::ATOMIC_LOAD_XOR: 2580 switch (VT.SimpleTy) { 2581 default: llvm_unreachable("Unexpected value type for atomic!"); 2582 case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_XOR_1; break; 2583 case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_XOR_2; break; 2584 case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_XOR_4; break; 2585 case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_XOR_8; break; 2586 } 2587 break; 2588 case ISD::ATOMIC_LOAD_NAND: 2589 switch (VT.SimpleTy) { 2590 default: llvm_unreachable("Unexpected value type for atomic!"); 2591 case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_NAND_1; break; 2592 case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_NAND_2; break; 2593 case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_NAND_4; break; 2594 case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_NAND_8; break; 2595 } 2596 break; 2597 } 2598 2599 return ExpandChainLibCall(LC, Node, false); 2600} 2601 2602void SelectionDAGLegalize::ExpandNode(SDNode *Node) { 2603 SmallVector<SDValue, 8> Results; 2604 DebugLoc dl = Node->getDebugLoc(); 2605 SDValue Tmp1, Tmp2, Tmp3, Tmp4; 2606 switch (Node->getOpcode()) { 2607 case ISD::CTPOP: 2608 case ISD::CTLZ: 2609 case ISD::CTLZ_ZERO_UNDEF: 2610 case ISD::CTTZ: 2611 case ISD::CTTZ_ZERO_UNDEF: 2612 Tmp1 = ExpandBitCount(Node->getOpcode(), Node->getOperand(0), dl); 2613 Results.push_back(Tmp1); 2614 break; 2615 case ISD::BSWAP: 2616 Results.push_back(ExpandBSWAP(Node->getOperand(0), dl)); 2617 break; 2618 case ISD::FRAMEADDR: 2619 case ISD::RETURNADDR: 2620 case ISD::FRAME_TO_ARGS_OFFSET: 2621 Results.push_back(DAG.getConstant(0, Node->getValueType(0))); 2622 break; 2623 case ISD::FLT_ROUNDS_: 2624 Results.push_back(DAG.getConstant(1, Node->getValueType(0))); 2625 break; 2626 case ISD::EH_RETURN: 2627 case ISD::EH_LABEL: 2628 case ISD::PREFETCH: 2629 case ISD::VAEND: 2630 case ISD::EH_SJLJ_LONGJMP: 2631 // If the target didn't expand these, there's nothing to do, so just 2632 // preserve the chain and be done. 2633 Results.push_back(Node->getOperand(0)); 2634 break; 2635 case ISD::EH_SJLJ_SETJMP: 2636 // If the target didn't expand this, just return 'zero' and preserve the 2637 // chain. 2638 Results.push_back(DAG.getConstant(0, MVT::i32)); 2639 Results.push_back(Node->getOperand(0)); 2640 break; 2641 case ISD::ATOMIC_FENCE: 2642 case ISD::MEMBARRIER: { 2643 // If the target didn't lower this, lower it to '__sync_synchronize()' call 2644 // FIXME: handle "fence singlethread" more efficiently. 2645 TargetLowering::ArgListTy Args; 2646 TargetLowering:: 2647 CallLoweringInfo CLI(Node->getOperand(0), 2648 Type::getVoidTy(*DAG.getContext()), 2649 false, false, false, false, 0, CallingConv::C, 2650 /*isTailCall=*/false, 2651 /*doesNotReturn=*/false, /*isReturnValueUsed=*/true, 2652 DAG.getExternalSymbol("__sync_synchronize", 2653 TLI.getPointerTy()), 2654 Args, DAG, dl); 2655 std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI); 2656 2657 Results.push_back(CallResult.second); 2658 break; 2659 } 2660 case ISD::ATOMIC_LOAD: { 2661 // There is no libcall for atomic load; fake it with ATOMIC_CMP_SWAP. 2662 SDValue Zero = DAG.getConstant(0, Node->getValueType(0)); 2663 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_CMP_SWAP, dl, 2664 cast<AtomicSDNode>(Node)->getMemoryVT(), 2665 Node->getOperand(0), 2666 Node->getOperand(1), Zero, Zero, 2667 cast<AtomicSDNode>(Node)->getMemOperand(), 2668 cast<AtomicSDNode>(Node)->getOrdering(), 2669 cast<AtomicSDNode>(Node)->getSynchScope()); 2670 Results.push_back(Swap.getValue(0)); 2671 Results.push_back(Swap.getValue(1)); 2672 break; 2673 } 2674 case ISD::ATOMIC_STORE: { 2675 // There is no libcall for atomic store; fake it with ATOMIC_SWAP. 2676 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl, 2677 cast<AtomicSDNode>(Node)->getMemoryVT(), 2678 Node->getOperand(0), 2679 Node->getOperand(1), Node->getOperand(2), 2680 cast<AtomicSDNode>(Node)->getMemOperand(), 2681 cast<AtomicSDNode>(Node)->getOrdering(), 2682 cast<AtomicSDNode>(Node)->getSynchScope()); 2683 Results.push_back(Swap.getValue(1)); 2684 break; 2685 } 2686 // By default, atomic intrinsics are marked Legal and lowered. Targets 2687 // which don't support them directly, however, may want libcalls, in which 2688 // case they mark them Expand, and we get here. 2689 case ISD::ATOMIC_SWAP: 2690 case ISD::ATOMIC_LOAD_ADD: 2691 case ISD::ATOMIC_LOAD_SUB: 2692 case ISD::ATOMIC_LOAD_AND: 2693 case ISD::ATOMIC_LOAD_OR: 2694 case ISD::ATOMIC_LOAD_XOR: 2695 case ISD::ATOMIC_LOAD_NAND: 2696 case ISD::ATOMIC_LOAD_MIN: 2697 case ISD::ATOMIC_LOAD_MAX: 2698 case ISD::ATOMIC_LOAD_UMIN: 2699 case ISD::ATOMIC_LOAD_UMAX: 2700 case ISD::ATOMIC_CMP_SWAP: { 2701 std::pair<SDValue, SDValue> Tmp = ExpandAtomic(Node); 2702 Results.push_back(Tmp.first); 2703 Results.push_back(Tmp.second); 2704 break; 2705 } 2706 case ISD::DYNAMIC_STACKALLOC: 2707 ExpandDYNAMIC_STACKALLOC(Node, Results); 2708 break; 2709 case ISD::MERGE_VALUES: 2710 for (unsigned i = 0; i < Node->getNumValues(); i++) 2711 Results.push_back(Node->getOperand(i)); 2712 break; 2713 case ISD::UNDEF: { 2714 EVT VT = Node->getValueType(0); 2715 if (VT.isInteger()) 2716 Results.push_back(DAG.getConstant(0, VT)); 2717 else { 2718 assert(VT.isFloatingPoint() && "Unknown value type!"); 2719 Results.push_back(DAG.getConstantFP(0, VT)); 2720 } 2721 break; 2722 } 2723 case ISD::TRAP: { 2724 // If this operation is not supported, lower it to 'abort()' call 2725 TargetLowering::ArgListTy Args; 2726 TargetLowering:: 2727 CallLoweringInfo CLI(Node->getOperand(0), 2728 Type::getVoidTy(*DAG.getContext()), 2729 false, false, false, false, 0, CallingConv::C, 2730 /*isTailCall=*/false, 2731 /*doesNotReturn=*/false, /*isReturnValueUsed=*/true, 2732 DAG.getExternalSymbol("abort", TLI.getPointerTy()), 2733 Args, DAG, dl); 2734 std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI); 2735 2736 Results.push_back(CallResult.second); 2737 break; 2738 } 2739 case ISD::FP_ROUND: 2740 case ISD::BITCAST: 2741 Tmp1 = EmitStackConvert(Node->getOperand(0), Node->getValueType(0), 2742 Node->getValueType(0), dl); 2743 Results.push_back(Tmp1); 2744 break; 2745 case ISD::FP_EXTEND: 2746 Tmp1 = EmitStackConvert(Node->getOperand(0), 2747 Node->getOperand(0).getValueType(), 2748 Node->getValueType(0), dl); 2749 Results.push_back(Tmp1); 2750 break; 2751 case ISD::SIGN_EXTEND_INREG: { 2752 // NOTE: we could fall back on load/store here too for targets without 2753 // SAR. However, it is doubtful that any exist. 2754 EVT ExtraVT = cast<VTSDNode>(Node->getOperand(1))->getVT(); 2755 EVT VT = Node->getValueType(0); 2756 EVT ShiftAmountTy = TLI.getShiftAmountTy(VT); 2757 if (VT.isVector()) 2758 ShiftAmountTy = VT; 2759 unsigned BitsDiff = VT.getScalarType().getSizeInBits() - 2760 ExtraVT.getScalarType().getSizeInBits(); 2761 SDValue ShiftCst = DAG.getConstant(BitsDiff, ShiftAmountTy); 2762 Tmp1 = DAG.getNode(ISD::SHL, dl, Node->getValueType(0), 2763 Node->getOperand(0), ShiftCst); 2764 Tmp1 = DAG.getNode(ISD::SRA, dl, Node->getValueType(0), Tmp1, ShiftCst); 2765 Results.push_back(Tmp1); 2766 break; 2767 } 2768 case ISD::FP_ROUND_INREG: { 2769 // The only way we can lower this is to turn it into a TRUNCSTORE, 2770 // EXTLOAD pair, targeting a temporary location (a stack slot). 2771 2772 // NOTE: there is a choice here between constantly creating new stack 2773 // slots and always reusing the same one. We currently always create 2774 // new ones, as reuse may inhibit scheduling. 2775 EVT ExtraVT = cast<VTSDNode>(Node->getOperand(1))->getVT(); 2776 Tmp1 = EmitStackConvert(Node->getOperand(0), ExtraVT, 2777 Node->getValueType(0), dl); 2778 Results.push_back(Tmp1); 2779 break; 2780 } 2781 case ISD::SINT_TO_FP: 2782 case ISD::UINT_TO_FP: 2783 Tmp1 = ExpandLegalINT_TO_FP(Node->getOpcode() == ISD::SINT_TO_FP, 2784 Node->getOperand(0), Node->getValueType(0), dl); 2785 Results.push_back(Tmp1); 2786 break; 2787 case ISD::FP_TO_UINT: { 2788 SDValue True, False; 2789 EVT VT = Node->getOperand(0).getValueType(); 2790 EVT NVT = Node->getValueType(0); 2791 APFloat apf(APInt::getNullValue(VT.getSizeInBits())); 2792 APInt x = APInt::getSignBit(NVT.getSizeInBits()); 2793 (void)apf.convertFromAPInt(x, false, APFloat::rmNearestTiesToEven); 2794 Tmp1 = DAG.getConstantFP(apf, VT); 2795 Tmp2 = DAG.getSetCC(dl, TLI.getSetCCResultType(VT), 2796 Node->getOperand(0), 2797 Tmp1, ISD::SETLT); 2798 True = DAG.getNode(ISD::FP_TO_SINT, dl, NVT, Node->getOperand(0)); 2799 False = DAG.getNode(ISD::FP_TO_SINT, dl, NVT, 2800 DAG.getNode(ISD::FSUB, dl, VT, 2801 Node->getOperand(0), Tmp1)); 2802 False = DAG.getNode(ISD::XOR, dl, NVT, False, 2803 DAG.getConstant(x, NVT)); 2804 Tmp1 = DAG.getNode(ISD::SELECT, dl, NVT, Tmp2, True, False); 2805 Results.push_back(Tmp1); 2806 break; 2807 } 2808 case ISD::VAARG: { 2809 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 2810 EVT VT = Node->getValueType(0); 2811 Tmp1 = Node->getOperand(0); 2812 Tmp2 = Node->getOperand(1); 2813 unsigned Align = Node->getConstantOperandVal(3); 2814 2815 SDValue VAListLoad = DAG.getLoad(TLI.getPointerTy(), dl, Tmp1, Tmp2, 2816 MachinePointerInfo(V), 2817 false, false, false, 0); 2818 SDValue VAList = VAListLoad; 2819 2820 if (Align > TLI.getMinStackArgumentAlignment()) { 2821 assert(((Align & (Align-1)) == 0) && "Expected Align to be a power of 2"); 2822 2823 VAList = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), VAList, 2824 DAG.getConstant(Align - 1, 2825 TLI.getPointerTy())); 2826 2827 VAList = DAG.getNode(ISD::AND, dl, TLI.getPointerTy(), VAList, 2828 DAG.getConstant(-(int64_t)Align, 2829 TLI.getPointerTy())); 2830 } 2831 2832 // Increment the pointer, VAList, to the next vaarg 2833 Tmp3 = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), VAList, 2834 DAG.getConstant(TLI.getDataLayout()-> 2835 getTypeAllocSize(VT.getTypeForEVT(*DAG.getContext())), 2836 TLI.getPointerTy())); 2837 // Store the incremented VAList to the legalized pointer 2838 Tmp3 = DAG.getStore(VAListLoad.getValue(1), dl, Tmp3, Tmp2, 2839 MachinePointerInfo(V), false, false, 0); 2840 // Load the actual argument out of the pointer VAList 2841 Results.push_back(DAG.getLoad(VT, dl, Tmp3, VAList, MachinePointerInfo(), 2842 false, false, false, 0)); 2843 Results.push_back(Results[0].getValue(1)); 2844 break; 2845 } 2846 case ISD::VACOPY: { 2847 // This defaults to loading a pointer from the input and storing it to the 2848 // output, returning the chain. 2849 const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue(); 2850 const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue(); 2851 Tmp1 = DAG.getLoad(TLI.getPointerTy(), dl, Node->getOperand(0), 2852 Node->getOperand(2), MachinePointerInfo(VS), 2853 false, false, false, 0); 2854 Tmp1 = DAG.getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1), 2855 MachinePointerInfo(VD), false, false, 0); 2856 Results.push_back(Tmp1); 2857 break; 2858 } 2859 case ISD::EXTRACT_VECTOR_ELT: 2860 if (Node->getOperand(0).getValueType().getVectorNumElements() == 1) 2861 // This must be an access of the only element. Return it. 2862 Tmp1 = DAG.getNode(ISD::BITCAST, dl, Node->getValueType(0), 2863 Node->getOperand(0)); 2864 else 2865 Tmp1 = ExpandExtractFromVectorThroughStack(SDValue(Node, 0)); 2866 Results.push_back(Tmp1); 2867 break; 2868 case ISD::EXTRACT_SUBVECTOR: 2869 Results.push_back(ExpandExtractFromVectorThroughStack(SDValue(Node, 0))); 2870 break; 2871 case ISD::INSERT_SUBVECTOR: 2872 Results.push_back(ExpandInsertToVectorThroughStack(SDValue(Node, 0))); 2873 break; 2874 case ISD::CONCAT_VECTORS: { 2875 Results.push_back(ExpandVectorBuildThroughStack(Node)); 2876 break; 2877 } 2878 case ISD::SCALAR_TO_VECTOR: 2879 Results.push_back(ExpandSCALAR_TO_VECTOR(Node)); 2880 break; 2881 case ISD::INSERT_VECTOR_ELT: 2882 Results.push_back(ExpandINSERT_VECTOR_ELT(Node->getOperand(0), 2883 Node->getOperand(1), 2884 Node->getOperand(2), dl)); 2885 break; 2886 case ISD::VECTOR_SHUFFLE: { 2887 SmallVector<int, 32> NewMask; 2888 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Node)->getMask(); 2889 2890 EVT VT = Node->getValueType(0); 2891 EVT EltVT = VT.getVectorElementType(); 2892 SDValue Op0 = Node->getOperand(0); 2893 SDValue Op1 = Node->getOperand(1); 2894 if (!TLI.isTypeLegal(EltVT)) { 2895 2896 EVT NewEltVT = TLI.getTypeToTransformTo(*DAG.getContext(), EltVT); 2897 2898 // BUILD_VECTOR operands are allowed to be wider than the element type. 2899 // But if NewEltVT is smaller that EltVT the BUILD_VECTOR does not accept it 2900 if (NewEltVT.bitsLT(EltVT)) { 2901 2902 // Convert shuffle node. 2903 // If original node was v4i64 and the new EltVT is i32, 2904 // cast operands to v8i32 and re-build the mask. 2905 2906 // Calculate new VT, the size of the new VT should be equal to original. 2907 EVT NewVT = EVT::getVectorVT(*DAG.getContext(), NewEltVT, 2908 VT.getSizeInBits()/NewEltVT.getSizeInBits()); 2909 assert(NewVT.bitsEq(VT)); 2910 2911 // cast operands to new VT 2912 Op0 = DAG.getNode(ISD::BITCAST, dl, NewVT, Op0); 2913 Op1 = DAG.getNode(ISD::BITCAST, dl, NewVT, Op1); 2914 2915 // Convert the shuffle mask 2916 unsigned int factor = NewVT.getVectorNumElements()/VT.getVectorNumElements(); 2917 2918 // EltVT gets smaller 2919 assert(factor > 0); 2920 2921 for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) { 2922 if (Mask[i] < 0) { 2923 for (unsigned fi = 0; fi < factor; ++fi) 2924 NewMask.push_back(Mask[i]); 2925 } 2926 else { 2927 for (unsigned fi = 0; fi < factor; ++fi) 2928 NewMask.push_back(Mask[i]*factor+fi); 2929 } 2930 } 2931 Mask = NewMask; 2932 VT = NewVT; 2933 } 2934 EltVT = NewEltVT; 2935 } 2936 unsigned NumElems = VT.getVectorNumElements(); 2937 SmallVector<SDValue, 16> Ops; 2938 for (unsigned i = 0; i != NumElems; ++i) { 2939 if (Mask[i] < 0) { 2940 Ops.push_back(DAG.getUNDEF(EltVT)); 2941 continue; 2942 } 2943 unsigned Idx = Mask[i]; 2944 if (Idx < NumElems) 2945 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, 2946 Op0, 2947 DAG.getIntPtrConstant(Idx))); 2948 else 2949 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, 2950 Op1, 2951 DAG.getIntPtrConstant(Idx - NumElems))); 2952 } 2953 2954 Tmp1 = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &Ops[0], Ops.size()); 2955 // We may have changed the BUILD_VECTOR type. Cast it back to the Node type. 2956 Tmp1 = DAG.getNode(ISD::BITCAST, dl, Node->getValueType(0), Tmp1); 2957 Results.push_back(Tmp1); 2958 break; 2959 } 2960 case ISD::EXTRACT_ELEMENT: { 2961 EVT OpTy = Node->getOperand(0).getValueType(); 2962 if (cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue()) { 2963 // 1 -> Hi 2964 Tmp1 = DAG.getNode(ISD::SRL, dl, OpTy, Node->getOperand(0), 2965 DAG.getConstant(OpTy.getSizeInBits()/2, 2966 TLI.getShiftAmountTy(Node->getOperand(0).getValueType()))); 2967 Tmp1 = DAG.getNode(ISD::TRUNCATE, dl, Node->getValueType(0), Tmp1); 2968 } else { 2969 // 0 -> Lo 2970 Tmp1 = DAG.getNode(ISD::TRUNCATE, dl, Node->getValueType(0), 2971 Node->getOperand(0)); 2972 } 2973 Results.push_back(Tmp1); 2974 break; 2975 } 2976 case ISD::STACKSAVE: 2977 // Expand to CopyFromReg if the target set 2978 // StackPointerRegisterToSaveRestore. 2979 if (unsigned SP = TLI.getStackPointerRegisterToSaveRestore()) { 2980 Results.push_back(DAG.getCopyFromReg(Node->getOperand(0), dl, SP, 2981 Node->getValueType(0))); 2982 Results.push_back(Results[0].getValue(1)); 2983 } else { 2984 Results.push_back(DAG.getUNDEF(Node->getValueType(0))); 2985 Results.push_back(Node->getOperand(0)); 2986 } 2987 break; 2988 case ISD::STACKRESTORE: 2989 // Expand to CopyToReg if the target set 2990 // StackPointerRegisterToSaveRestore. 2991 if (unsigned SP = TLI.getStackPointerRegisterToSaveRestore()) { 2992 Results.push_back(DAG.getCopyToReg(Node->getOperand(0), dl, SP, 2993 Node->getOperand(1))); 2994 } else { 2995 Results.push_back(Node->getOperand(0)); 2996 } 2997 break; 2998 case ISD::FCOPYSIGN: 2999 Results.push_back(ExpandFCOPYSIGN(Node)); 3000 break; 3001 case ISD::FNEG: 3002 // Expand Y = FNEG(X) -> Y = SUB -0.0, X 3003 Tmp1 = DAG.getConstantFP(-0.0, Node->getValueType(0)); 3004 Tmp1 = DAG.getNode(ISD::FSUB, dl, Node->getValueType(0), Tmp1, 3005 Node->getOperand(0)); 3006 Results.push_back(Tmp1); 3007 break; 3008 case ISD::FABS: { 3009 // Expand Y = FABS(X) -> Y = (X >u 0.0) ? X : fneg(X). 3010 EVT VT = Node->getValueType(0); 3011 Tmp1 = Node->getOperand(0); 3012 Tmp2 = DAG.getConstantFP(0.0, VT); 3013 Tmp2 = DAG.getSetCC(dl, TLI.getSetCCResultType(Tmp1.getValueType()), 3014 Tmp1, Tmp2, ISD::SETUGT); 3015 Tmp3 = DAG.getNode(ISD::FNEG, dl, VT, Tmp1); 3016 Tmp1 = DAG.getNode(ISD::SELECT, dl, VT, Tmp2, Tmp1, Tmp3); 3017 Results.push_back(Tmp1); 3018 break; 3019 } 3020 case ISD::FSQRT: 3021 Results.push_back(ExpandFPLibCall(Node, RTLIB::SQRT_F32, RTLIB::SQRT_F64, 3022 RTLIB::SQRT_F80, RTLIB::SQRT_PPCF128)); 3023 break; 3024 case ISD::FSIN: 3025 Results.push_back(ExpandFPLibCall(Node, RTLIB::SIN_F32, RTLIB::SIN_F64, 3026 RTLIB::SIN_F80, RTLIB::SIN_PPCF128)); 3027 break; 3028 case ISD::FCOS: 3029 Results.push_back(ExpandFPLibCall(Node, RTLIB::COS_F32, RTLIB::COS_F64, 3030 RTLIB::COS_F80, RTLIB::COS_PPCF128)); 3031 break; 3032 case ISD::FLOG: 3033 Results.push_back(ExpandFPLibCall(Node, RTLIB::LOG_F32, RTLIB::LOG_F64, 3034 RTLIB::LOG_F80, RTLIB::LOG_PPCF128)); 3035 break; 3036 case ISD::FLOG2: 3037 Results.push_back(ExpandFPLibCall(Node, RTLIB::LOG2_F32, RTLIB::LOG2_F64, 3038 RTLIB::LOG2_F80, RTLIB::LOG2_PPCF128)); 3039 break; 3040 case ISD::FLOG10: 3041 Results.push_back(ExpandFPLibCall(Node, RTLIB::LOG10_F32, RTLIB::LOG10_F64, 3042 RTLIB::LOG10_F80, RTLIB::LOG10_PPCF128)); 3043 break; 3044 case ISD::FEXP: 3045 Results.push_back(ExpandFPLibCall(Node, RTLIB::EXP_F32, RTLIB::EXP_F64, 3046 RTLIB::EXP_F80, RTLIB::EXP_PPCF128)); 3047 break; 3048 case ISD::FEXP2: 3049 Results.push_back(ExpandFPLibCall(Node, RTLIB::EXP2_F32, RTLIB::EXP2_F64, 3050 RTLIB::EXP2_F80, RTLIB::EXP2_PPCF128)); 3051 break; 3052 case ISD::FTRUNC: 3053 Results.push_back(ExpandFPLibCall(Node, RTLIB::TRUNC_F32, RTLIB::TRUNC_F64, 3054 RTLIB::TRUNC_F80, RTLIB::TRUNC_PPCF128)); 3055 break; 3056 case ISD::FFLOOR: 3057 Results.push_back(ExpandFPLibCall(Node, RTLIB::FLOOR_F32, RTLIB::FLOOR_F64, 3058 RTLIB::FLOOR_F80, RTLIB::FLOOR_PPCF128)); 3059 break; 3060 case ISD::FCEIL: 3061 Results.push_back(ExpandFPLibCall(Node, RTLIB::CEIL_F32, RTLIB::CEIL_F64, 3062 RTLIB::CEIL_F80, RTLIB::CEIL_PPCF128)); 3063 break; 3064 case ISD::FRINT: 3065 Results.push_back(ExpandFPLibCall(Node, RTLIB::RINT_F32, RTLIB::RINT_F64, 3066 RTLIB::RINT_F80, RTLIB::RINT_PPCF128)); 3067 break; 3068 case ISD::FNEARBYINT: 3069 Results.push_back(ExpandFPLibCall(Node, RTLIB::NEARBYINT_F32, 3070 RTLIB::NEARBYINT_F64, 3071 RTLIB::NEARBYINT_F80, 3072 RTLIB::NEARBYINT_PPCF128)); 3073 break; 3074 case ISD::FPOWI: 3075 Results.push_back(ExpandFPLibCall(Node, RTLIB::POWI_F32, RTLIB::POWI_F64, 3076 RTLIB::POWI_F80, RTLIB::POWI_PPCF128)); 3077 break; 3078 case ISD::FPOW: 3079 Results.push_back(ExpandFPLibCall(Node, RTLIB::POW_F32, RTLIB::POW_F64, 3080 RTLIB::POW_F80, RTLIB::POW_PPCF128)); 3081 break; 3082 case ISD::FDIV: 3083 Results.push_back(ExpandFPLibCall(Node, RTLIB::DIV_F32, RTLIB::DIV_F64, 3084 RTLIB::DIV_F80, RTLIB::DIV_PPCF128)); 3085 break; 3086 case ISD::FREM: 3087 Results.push_back(ExpandFPLibCall(Node, RTLIB::REM_F32, RTLIB::REM_F64, 3088 RTLIB::REM_F80, RTLIB::REM_PPCF128)); 3089 break; 3090 case ISD::FMA: 3091 Results.push_back(ExpandFPLibCall(Node, RTLIB::FMA_F32, RTLIB::FMA_F64, 3092 RTLIB::FMA_F80, RTLIB::FMA_PPCF128)); 3093 break; 3094 case ISD::FP16_TO_FP32: 3095 Results.push_back(ExpandLibCall(RTLIB::FPEXT_F16_F32, Node, false)); 3096 break; 3097 case ISD::FP32_TO_FP16: 3098 Results.push_back(ExpandLibCall(RTLIB::FPROUND_F32_F16, Node, false)); 3099 break; 3100 case ISD::ConstantFP: { 3101 ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Node); 3102 // Check to see if this FP immediate is already legal. 3103 // If this is a legal constant, turn it into a TargetConstantFP node. 3104 if (!TLI.isFPImmLegal(CFP->getValueAPF(), Node->getValueType(0))) 3105 Results.push_back(ExpandConstantFP(CFP, true)); 3106 break; 3107 } 3108 case ISD::EHSELECTION: { 3109 unsigned Reg = TLI.getExceptionSelectorRegister(); 3110 assert(Reg && "Can't expand to unknown register!"); 3111 Results.push_back(DAG.getCopyFromReg(Node->getOperand(1), dl, Reg, 3112 Node->getValueType(0))); 3113 Results.push_back(Results[0].getValue(1)); 3114 break; 3115 } 3116 case ISD::EXCEPTIONADDR: { 3117 unsigned Reg = TLI.getExceptionPointerRegister(); 3118 assert(Reg && "Can't expand to unknown register!"); 3119 Results.push_back(DAG.getCopyFromReg(Node->getOperand(0), dl, Reg, 3120 Node->getValueType(0))); 3121 Results.push_back(Results[0].getValue(1)); 3122 break; 3123 } 3124 case ISD::FSUB: { 3125 EVT VT = Node->getValueType(0); 3126 assert(TLI.isOperationLegalOrCustom(ISD::FADD, VT) && 3127 TLI.isOperationLegalOrCustom(ISD::FNEG, VT) && 3128 "Don't know how to expand this FP subtraction!"); 3129 Tmp1 = DAG.getNode(ISD::FNEG, dl, VT, Node->getOperand(1)); 3130 Tmp1 = DAG.getNode(ISD::FADD, dl, VT, Node->getOperand(0), Tmp1); 3131 Results.push_back(Tmp1); 3132 break; 3133 } 3134 case ISD::SUB: { 3135 EVT VT = Node->getValueType(0); 3136 assert(TLI.isOperationLegalOrCustom(ISD::ADD, VT) && 3137 TLI.isOperationLegalOrCustom(ISD::XOR, VT) && 3138 "Don't know how to expand this subtraction!"); 3139 Tmp1 = DAG.getNode(ISD::XOR, dl, VT, Node->getOperand(1), 3140 DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), VT)); 3141 Tmp1 = DAG.getNode(ISD::ADD, dl, VT, Tmp1, DAG.getConstant(1, VT)); 3142 Results.push_back(DAG.getNode(ISD::ADD, dl, VT, Node->getOperand(0), Tmp1)); 3143 break; 3144 } 3145 case ISD::UREM: 3146 case ISD::SREM: { 3147 EVT VT = Node->getValueType(0); 3148 SDVTList VTs = DAG.getVTList(VT, VT); 3149 bool isSigned = Node->getOpcode() == ISD::SREM; 3150 unsigned DivOpc = isSigned ? ISD::SDIV : ISD::UDIV; 3151 unsigned DivRemOpc = isSigned ? ISD::SDIVREM : ISD::UDIVREM; 3152 Tmp2 = Node->getOperand(0); 3153 Tmp3 = Node->getOperand(1); 3154 if (TLI.isOperationLegalOrCustom(DivRemOpc, VT) || 3155 (isDivRemLibcallAvailable(Node, isSigned, TLI) && 3156 // If div is legal, it's better to do the normal expansion 3157 !TLI.isOperationLegalOrCustom(DivOpc, Node->getValueType(0)) && 3158 useDivRem(Node, isSigned, false))) { 3159 Tmp1 = DAG.getNode(DivRemOpc, dl, VTs, Tmp2, Tmp3).getValue(1); 3160 } else if (TLI.isOperationLegalOrCustom(DivOpc, VT)) { 3161 // X % Y -> X-X/Y*Y 3162 Tmp1 = DAG.getNode(DivOpc, dl, VT, Tmp2, Tmp3); 3163 Tmp1 = DAG.getNode(ISD::MUL, dl, VT, Tmp1, Tmp3); 3164 Tmp1 = DAG.getNode(ISD::SUB, dl, VT, Tmp2, Tmp1); 3165 } else if (isSigned) 3166 Tmp1 = ExpandIntLibCall(Node, true, 3167 RTLIB::SREM_I8, 3168 RTLIB::SREM_I16, RTLIB::SREM_I32, 3169 RTLIB::SREM_I64, RTLIB::SREM_I128); 3170 else 3171 Tmp1 = ExpandIntLibCall(Node, false, 3172 RTLIB::UREM_I8, 3173 RTLIB::UREM_I16, RTLIB::UREM_I32, 3174 RTLIB::UREM_I64, RTLIB::UREM_I128); 3175 Results.push_back(Tmp1); 3176 break; 3177 } 3178 case ISD::UDIV: 3179 case ISD::SDIV: { 3180 bool isSigned = Node->getOpcode() == ISD::SDIV; 3181 unsigned DivRemOpc = isSigned ? ISD::SDIVREM : ISD::UDIVREM; 3182 EVT VT = Node->getValueType(0); 3183 SDVTList VTs = DAG.getVTList(VT, VT); 3184 if (TLI.isOperationLegalOrCustom(DivRemOpc, VT) || 3185 (isDivRemLibcallAvailable(Node, isSigned, TLI) && 3186 useDivRem(Node, isSigned, true))) 3187 Tmp1 = DAG.getNode(DivRemOpc, dl, VTs, Node->getOperand(0), 3188 Node->getOperand(1)); 3189 else if (isSigned) 3190 Tmp1 = ExpandIntLibCall(Node, true, 3191 RTLIB::SDIV_I8, 3192 RTLIB::SDIV_I16, RTLIB::SDIV_I32, 3193 RTLIB::SDIV_I64, RTLIB::SDIV_I128); 3194 else 3195 Tmp1 = ExpandIntLibCall(Node, false, 3196 RTLIB::UDIV_I8, 3197 RTLIB::UDIV_I16, RTLIB::UDIV_I32, 3198 RTLIB::UDIV_I64, RTLIB::UDIV_I128); 3199 Results.push_back(Tmp1); 3200 break; 3201 } 3202 case ISD::MULHU: 3203 case ISD::MULHS: { 3204 unsigned ExpandOpcode = Node->getOpcode() == ISD::MULHU ? ISD::UMUL_LOHI : 3205 ISD::SMUL_LOHI; 3206 EVT VT = Node->getValueType(0); 3207 SDVTList VTs = DAG.getVTList(VT, VT); 3208 assert(TLI.isOperationLegalOrCustom(ExpandOpcode, VT) && 3209 "If this wasn't legal, it shouldn't have been created!"); 3210 Tmp1 = DAG.getNode(ExpandOpcode, dl, VTs, Node->getOperand(0), 3211 Node->getOperand(1)); 3212 Results.push_back(Tmp1.getValue(1)); 3213 break; 3214 } 3215 case ISD::SDIVREM: 3216 case ISD::UDIVREM: 3217 // Expand into divrem libcall 3218 ExpandDivRemLibCall(Node, Results); 3219 break; 3220 case ISD::MUL: { 3221 EVT VT = Node->getValueType(0); 3222 SDVTList VTs = DAG.getVTList(VT, VT); 3223 // See if multiply or divide can be lowered using two-result operations. 3224 // We just need the low half of the multiply; try both the signed 3225 // and unsigned forms. If the target supports both SMUL_LOHI and 3226 // UMUL_LOHI, form a preference by checking which forms of plain 3227 // MULH it supports. 3228 bool HasSMUL_LOHI = TLI.isOperationLegalOrCustom(ISD::SMUL_LOHI, VT); 3229 bool HasUMUL_LOHI = TLI.isOperationLegalOrCustom(ISD::UMUL_LOHI, VT); 3230 bool HasMULHS = TLI.isOperationLegalOrCustom(ISD::MULHS, VT); 3231 bool HasMULHU = TLI.isOperationLegalOrCustom(ISD::MULHU, VT); 3232 unsigned OpToUse = 0; 3233 if (HasSMUL_LOHI && !HasMULHS) { 3234 OpToUse = ISD::SMUL_LOHI; 3235 } else if (HasUMUL_LOHI && !HasMULHU) { 3236 OpToUse = ISD::UMUL_LOHI; 3237 } else if (HasSMUL_LOHI) { 3238 OpToUse = ISD::SMUL_LOHI; 3239 } else if (HasUMUL_LOHI) { 3240 OpToUse = ISD::UMUL_LOHI; 3241 } 3242 if (OpToUse) { 3243 Results.push_back(DAG.getNode(OpToUse, dl, VTs, Node->getOperand(0), 3244 Node->getOperand(1))); 3245 break; 3246 } 3247 Tmp1 = ExpandIntLibCall(Node, false, 3248 RTLIB::MUL_I8, 3249 RTLIB::MUL_I16, RTLIB::MUL_I32, 3250 RTLIB::MUL_I64, RTLIB::MUL_I128); 3251 Results.push_back(Tmp1); 3252 break; 3253 } 3254 case ISD::SADDO: 3255 case ISD::SSUBO: { 3256 SDValue LHS = Node->getOperand(0); 3257 SDValue RHS = Node->getOperand(1); 3258 SDValue Sum = DAG.getNode(Node->getOpcode() == ISD::SADDO ? 3259 ISD::ADD : ISD::SUB, dl, LHS.getValueType(), 3260 LHS, RHS); 3261 Results.push_back(Sum); 3262 EVT OType = Node->getValueType(1); 3263 3264 SDValue Zero = DAG.getConstant(0, LHS.getValueType()); 3265 3266 // LHSSign -> LHS >= 0 3267 // RHSSign -> RHS >= 0 3268 // SumSign -> Sum >= 0 3269 // 3270 // Add: 3271 // Overflow -> (LHSSign == RHSSign) && (LHSSign != SumSign) 3272 // Sub: 3273 // Overflow -> (LHSSign != RHSSign) && (LHSSign != SumSign) 3274 // 3275 SDValue LHSSign = DAG.getSetCC(dl, OType, LHS, Zero, ISD::SETGE); 3276 SDValue RHSSign = DAG.getSetCC(dl, OType, RHS, Zero, ISD::SETGE); 3277 SDValue SignsMatch = DAG.getSetCC(dl, OType, LHSSign, RHSSign, 3278 Node->getOpcode() == ISD::SADDO ? 3279 ISD::SETEQ : ISD::SETNE); 3280 3281 SDValue SumSign = DAG.getSetCC(dl, OType, Sum, Zero, ISD::SETGE); 3282 SDValue SumSignNE = DAG.getSetCC(dl, OType, LHSSign, SumSign, ISD::SETNE); 3283 3284 SDValue Cmp = DAG.getNode(ISD::AND, dl, OType, SignsMatch, SumSignNE); 3285 Results.push_back(Cmp); 3286 break; 3287 } 3288 case ISD::UADDO: 3289 case ISD::USUBO: { 3290 SDValue LHS = Node->getOperand(0); 3291 SDValue RHS = Node->getOperand(1); 3292 SDValue Sum = DAG.getNode(Node->getOpcode() == ISD::UADDO ? 3293 ISD::ADD : ISD::SUB, dl, LHS.getValueType(), 3294 LHS, RHS); 3295 Results.push_back(Sum); 3296 Results.push_back(DAG.getSetCC(dl, Node->getValueType(1), Sum, LHS, 3297 Node->getOpcode () == ISD::UADDO ? 3298 ISD::SETULT : ISD::SETUGT)); 3299 break; 3300 } 3301 case ISD::UMULO: 3302 case ISD::SMULO: { 3303 EVT VT = Node->getValueType(0); 3304 EVT WideVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits() * 2); 3305 SDValue LHS = Node->getOperand(0); 3306 SDValue RHS = Node->getOperand(1); 3307 SDValue BottomHalf; 3308 SDValue TopHalf; 3309 static const unsigned Ops[2][3] = 3310 { { ISD::MULHU, ISD::UMUL_LOHI, ISD::ZERO_EXTEND }, 3311 { ISD::MULHS, ISD::SMUL_LOHI, ISD::SIGN_EXTEND }}; 3312 bool isSigned = Node->getOpcode() == ISD::SMULO; 3313 if (TLI.isOperationLegalOrCustom(Ops[isSigned][0], VT)) { 3314 BottomHalf = DAG.getNode(ISD::MUL, dl, VT, LHS, RHS); 3315 TopHalf = DAG.getNode(Ops[isSigned][0], dl, VT, LHS, RHS); 3316 } else if (TLI.isOperationLegalOrCustom(Ops[isSigned][1], VT)) { 3317 BottomHalf = DAG.getNode(Ops[isSigned][1], dl, DAG.getVTList(VT, VT), LHS, 3318 RHS); 3319 TopHalf = BottomHalf.getValue(1); 3320 } else if (TLI.isTypeLegal(EVT::getIntegerVT(*DAG.getContext(), 3321 VT.getSizeInBits() * 2))) { 3322 LHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, LHS); 3323 RHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, RHS); 3324 Tmp1 = DAG.getNode(ISD::MUL, dl, WideVT, LHS, RHS); 3325 BottomHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, Tmp1, 3326 DAG.getIntPtrConstant(0)); 3327 TopHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, Tmp1, 3328 DAG.getIntPtrConstant(1)); 3329 } else { 3330 // We can fall back to a libcall with an illegal type for the MUL if we 3331 // have a libcall big enough. 3332 // Also, we can fall back to a division in some cases, but that's a big 3333 // performance hit in the general case. 3334 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 3335 if (WideVT == MVT::i16) 3336 LC = RTLIB::MUL_I16; 3337 else if (WideVT == MVT::i32) 3338 LC = RTLIB::MUL_I32; 3339 else if (WideVT == MVT::i64) 3340 LC = RTLIB::MUL_I64; 3341 else if (WideVT == MVT::i128) 3342 LC = RTLIB::MUL_I128; 3343 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Cannot expand this operation!"); 3344 3345 // The high part is obtained by SRA'ing all but one of the bits of low 3346 // part. 3347 unsigned LoSize = VT.getSizeInBits(); 3348 SDValue HiLHS = DAG.getNode(ISD::SRA, dl, VT, RHS, 3349 DAG.getConstant(LoSize-1, TLI.getPointerTy())); 3350 SDValue HiRHS = DAG.getNode(ISD::SRA, dl, VT, LHS, 3351 DAG.getConstant(LoSize-1, TLI.getPointerTy())); 3352 3353 // Here we're passing the 2 arguments explicitly as 4 arguments that are 3354 // pre-lowered to the correct types. This all depends upon WideVT not 3355 // being a legal type for the architecture and thus has to be split to 3356 // two arguments. 3357 SDValue Args[] = { LHS, HiLHS, RHS, HiRHS }; 3358 SDValue Ret = ExpandLibCall(LC, WideVT, Args, 4, isSigned, dl); 3359 BottomHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, Ret, 3360 DAG.getIntPtrConstant(0)); 3361 TopHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, Ret, 3362 DAG.getIntPtrConstant(1)); 3363 // Ret is a node with an illegal type. Because such things are not 3364 // generally permitted during this phase of legalization, delete the 3365 // node. The above EXTRACT_ELEMENT nodes should have been folded. 3366 DAG.DeleteNode(Ret.getNode()); 3367 } 3368 3369 if (isSigned) { 3370 Tmp1 = DAG.getConstant(VT.getSizeInBits() - 1, 3371 TLI.getShiftAmountTy(BottomHalf.getValueType())); 3372 Tmp1 = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, Tmp1); 3373 TopHalf = DAG.getSetCC(dl, TLI.getSetCCResultType(VT), TopHalf, Tmp1, 3374 ISD::SETNE); 3375 } else { 3376 TopHalf = DAG.getSetCC(dl, TLI.getSetCCResultType(VT), TopHalf, 3377 DAG.getConstant(0, VT), ISD::SETNE); 3378 } 3379 Results.push_back(BottomHalf); 3380 Results.push_back(TopHalf); 3381 break; 3382 } 3383 case ISD::BUILD_PAIR: { 3384 EVT PairTy = Node->getValueType(0); 3385 Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, dl, PairTy, Node->getOperand(0)); 3386 Tmp2 = DAG.getNode(ISD::ANY_EXTEND, dl, PairTy, Node->getOperand(1)); 3387 Tmp2 = DAG.getNode(ISD::SHL, dl, PairTy, Tmp2, 3388 DAG.getConstant(PairTy.getSizeInBits()/2, 3389 TLI.getShiftAmountTy(PairTy))); 3390 Results.push_back(DAG.getNode(ISD::OR, dl, PairTy, Tmp1, Tmp2)); 3391 break; 3392 } 3393 case ISD::SELECT: 3394 Tmp1 = Node->getOperand(0); 3395 Tmp2 = Node->getOperand(1); 3396 Tmp3 = Node->getOperand(2); 3397 if (Tmp1.getOpcode() == ISD::SETCC) { 3398 Tmp1 = DAG.getSelectCC(dl, Tmp1.getOperand(0), Tmp1.getOperand(1), 3399 Tmp2, Tmp3, 3400 cast<CondCodeSDNode>(Tmp1.getOperand(2))->get()); 3401 } else { 3402 Tmp1 = DAG.getSelectCC(dl, Tmp1, 3403 DAG.getConstant(0, Tmp1.getValueType()), 3404 Tmp2, Tmp3, ISD::SETNE); 3405 } 3406 Results.push_back(Tmp1); 3407 break; 3408 case ISD::BR_JT: { 3409 SDValue Chain = Node->getOperand(0); 3410 SDValue Table = Node->getOperand(1); 3411 SDValue Index = Node->getOperand(2); 3412 3413 EVT PTy = TLI.getPointerTy(); 3414 3415 const DataLayout &TD = *TLI.getDataLayout(); 3416 unsigned EntrySize = 3417 DAG.getMachineFunction().getJumpTableInfo()->getEntrySize(TD); 3418 3419 Index = DAG.getNode(ISD::MUL, dl, PTy, 3420 Index, DAG.getConstant(EntrySize, PTy)); 3421 SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Index, Table); 3422 3423 EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), EntrySize * 8); 3424 SDValue LD = DAG.getExtLoad(ISD::SEXTLOAD, dl, PTy, Chain, Addr, 3425 MachinePointerInfo::getJumpTable(), MemVT, 3426 false, false, 0); 3427 Addr = LD; 3428 if (TM.getRelocationModel() == Reloc::PIC_) { 3429 // For PIC, the sequence is: 3430 // BRIND(load(Jumptable + index) + RelocBase) 3431 // RelocBase can be JumpTable, GOT or some sort of global base. 3432 Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr, 3433 TLI.getPICJumpTableRelocBase(Table, DAG)); 3434 } 3435 Tmp1 = DAG.getNode(ISD::BRIND, dl, MVT::Other, LD.getValue(1), Addr); 3436 Results.push_back(Tmp1); 3437 break; 3438 } 3439 case ISD::BRCOND: 3440 // Expand brcond's setcc into its constituent parts and create a BR_CC 3441 // Node. 3442 Tmp1 = Node->getOperand(0); 3443 Tmp2 = Node->getOperand(1); 3444 if (Tmp2.getOpcode() == ISD::SETCC) { 3445 Tmp1 = DAG.getNode(ISD::BR_CC, dl, MVT::Other, 3446 Tmp1, Tmp2.getOperand(2), 3447 Tmp2.getOperand(0), Tmp2.getOperand(1), 3448 Node->getOperand(2)); 3449 } else { 3450 // We test only the i1 bit. Skip the AND if UNDEF. 3451 Tmp3 = (Tmp2.getOpcode() == ISD::UNDEF) ? Tmp2 : 3452 DAG.getNode(ISD::AND, dl, Tmp2.getValueType(), Tmp2, 3453 DAG.getConstant(1, Tmp2.getValueType())); 3454 Tmp1 = DAG.getNode(ISD::BR_CC, dl, MVT::Other, Tmp1, 3455 DAG.getCondCode(ISD::SETNE), Tmp3, 3456 DAG.getConstant(0, Tmp3.getValueType()), 3457 Node->getOperand(2)); 3458 } 3459 Results.push_back(Tmp1); 3460 break; 3461 case ISD::SETCC: { 3462 Tmp1 = Node->getOperand(0); 3463 Tmp2 = Node->getOperand(1); 3464 Tmp3 = Node->getOperand(2); 3465 LegalizeSetCCCondCode(Node->getValueType(0), Tmp1, Tmp2, Tmp3, dl); 3466 3467 // If we expanded the SETCC into an AND/OR, return the new node 3468 if (Tmp2.getNode() == 0) { 3469 Results.push_back(Tmp1); 3470 break; 3471 } 3472 3473 // Otherwise, SETCC for the given comparison type must be completely 3474 // illegal; expand it into a SELECT_CC. 3475 EVT VT = Node->getValueType(0); 3476 Tmp1 = DAG.getNode(ISD::SELECT_CC, dl, VT, Tmp1, Tmp2, 3477 DAG.getConstant(1, VT), DAG.getConstant(0, VT), Tmp3); 3478 Results.push_back(Tmp1); 3479 break; 3480 } 3481 case ISD::SELECT_CC: { 3482 Tmp1 = Node->getOperand(0); // LHS 3483 Tmp2 = Node->getOperand(1); // RHS 3484 Tmp3 = Node->getOperand(2); // True 3485 Tmp4 = Node->getOperand(3); // False 3486 SDValue CC = Node->getOperand(4); 3487 3488 LegalizeSetCCCondCode(TLI.getSetCCResultType(Tmp1.getValueType()), 3489 Tmp1, Tmp2, CC, dl); 3490 3491 assert(!Tmp2.getNode() && "Can't legalize SELECT_CC with legal condition!"); 3492 Tmp2 = DAG.getConstant(0, Tmp1.getValueType()); 3493 CC = DAG.getCondCode(ISD::SETNE); 3494 Tmp1 = DAG.getNode(ISD::SELECT_CC, dl, Node->getValueType(0), Tmp1, Tmp2, 3495 Tmp3, Tmp4, CC); 3496 Results.push_back(Tmp1); 3497 break; 3498 } 3499 case ISD::BR_CC: { 3500 Tmp1 = Node->getOperand(0); // Chain 3501 Tmp2 = Node->getOperand(2); // LHS 3502 Tmp3 = Node->getOperand(3); // RHS 3503 Tmp4 = Node->getOperand(1); // CC 3504 3505 LegalizeSetCCCondCode(TLI.getSetCCResultType(Tmp2.getValueType()), 3506 Tmp2, Tmp3, Tmp4, dl); 3507 3508 assert(!Tmp3.getNode() && "Can't legalize BR_CC with legal condition!"); 3509 Tmp3 = DAG.getConstant(0, Tmp2.getValueType()); 3510 Tmp4 = DAG.getCondCode(ISD::SETNE); 3511 Tmp1 = DAG.getNode(ISD::BR_CC, dl, Node->getValueType(0), Tmp1, Tmp4, Tmp2, 3512 Tmp3, Node->getOperand(4)); 3513 Results.push_back(Tmp1); 3514 break; 3515 } 3516 case ISD::BUILD_VECTOR: 3517 Results.push_back(ExpandBUILD_VECTOR(Node)); 3518 break; 3519 case ISD::SRA: 3520 case ISD::SRL: 3521 case ISD::SHL: { 3522 // Scalarize vector SRA/SRL/SHL. 3523 EVT VT = Node->getValueType(0); 3524 assert(VT.isVector() && "Unable to legalize non-vector shift"); 3525 assert(TLI.isTypeLegal(VT.getScalarType())&& "Element type must be legal"); 3526 unsigned NumElem = VT.getVectorNumElements(); 3527 3528 SmallVector<SDValue, 8> Scalars; 3529 for (unsigned Idx = 0; Idx < NumElem; Idx++) { 3530 SDValue Ex = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, 3531 VT.getScalarType(), 3532 Node->getOperand(0), DAG.getIntPtrConstant(Idx)); 3533 SDValue Sh = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, 3534 VT.getScalarType(), 3535 Node->getOperand(1), DAG.getIntPtrConstant(Idx)); 3536 Scalars.push_back(DAG.getNode(Node->getOpcode(), dl, 3537 VT.getScalarType(), Ex, Sh)); 3538 } 3539 SDValue Result = 3540 DAG.getNode(ISD::BUILD_VECTOR, dl, Node->getValueType(0), 3541 &Scalars[0], Scalars.size()); 3542 ReplaceNode(SDValue(Node, 0), Result); 3543 break; 3544 } 3545 case ISD::GLOBAL_OFFSET_TABLE: 3546 case ISD::GlobalAddress: 3547 case ISD::GlobalTLSAddress: 3548 case ISD::ExternalSymbol: 3549 case ISD::ConstantPool: 3550 case ISD::JumpTable: 3551 case ISD::INTRINSIC_W_CHAIN: 3552 case ISD::INTRINSIC_WO_CHAIN: 3553 case ISD::INTRINSIC_VOID: 3554 // FIXME: Custom lowering for these operations shouldn't return null! 3555 break; 3556 } 3557 3558 // Replace the original node with the legalized result. 3559 if (!Results.empty()) 3560 ReplaceNode(Node, Results.data()); 3561} 3562 3563void SelectionDAGLegalize::PromoteNode(SDNode *Node) { 3564 SmallVector<SDValue, 8> Results; 3565 EVT OVT = Node->getValueType(0); 3566 if (Node->getOpcode() == ISD::UINT_TO_FP || 3567 Node->getOpcode() == ISD::SINT_TO_FP || 3568 Node->getOpcode() == ISD::SETCC) { 3569 OVT = Node->getOperand(0).getValueType(); 3570 } 3571 EVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), OVT); 3572 DebugLoc dl = Node->getDebugLoc(); 3573 SDValue Tmp1, Tmp2, Tmp3; 3574 switch (Node->getOpcode()) { 3575 case ISD::CTTZ: 3576 case ISD::CTTZ_ZERO_UNDEF: 3577 case ISD::CTLZ: 3578 case ISD::CTLZ_ZERO_UNDEF: 3579 case ISD::CTPOP: 3580 // Zero extend the argument. 3581 Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, Node->getOperand(0)); 3582 // Perform the larger operation. For CTPOP and CTTZ_ZERO_UNDEF, this is 3583 // already the correct result. 3584 Tmp1 = DAG.getNode(Node->getOpcode(), dl, NVT, Tmp1); 3585 if (Node->getOpcode() == ISD::CTTZ) { 3586 // FIXME: This should set a bit in the zero extended value instead. 3587 Tmp2 = DAG.getSetCC(dl, TLI.getSetCCResultType(NVT), 3588 Tmp1, DAG.getConstant(NVT.getSizeInBits(), NVT), 3589 ISD::SETEQ); 3590 Tmp1 = DAG.getNode(ISD::SELECT, dl, NVT, Tmp2, 3591 DAG.getConstant(OVT.getSizeInBits(), NVT), Tmp1); 3592 } else if (Node->getOpcode() == ISD::CTLZ || 3593 Node->getOpcode() == ISD::CTLZ_ZERO_UNDEF) { 3594 // Tmp1 = Tmp1 - (sizeinbits(NVT) - sizeinbits(Old VT)) 3595 Tmp1 = DAG.getNode(ISD::SUB, dl, NVT, Tmp1, 3596 DAG.getConstant(NVT.getSizeInBits() - 3597 OVT.getSizeInBits(), NVT)); 3598 } 3599 Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, OVT, Tmp1)); 3600 break; 3601 case ISD::BSWAP: { 3602 unsigned DiffBits = NVT.getSizeInBits() - OVT.getSizeInBits(); 3603 Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, Node->getOperand(0)); 3604 Tmp1 = DAG.getNode(ISD::BSWAP, dl, NVT, Tmp1); 3605 Tmp1 = DAG.getNode(ISD::SRL, dl, NVT, Tmp1, 3606 DAG.getConstant(DiffBits, TLI.getShiftAmountTy(NVT))); 3607 Results.push_back(Tmp1); 3608 break; 3609 } 3610 case ISD::FP_TO_UINT: 3611 case ISD::FP_TO_SINT: 3612 Tmp1 = PromoteLegalFP_TO_INT(Node->getOperand(0), Node->getValueType(0), 3613 Node->getOpcode() == ISD::FP_TO_SINT, dl); 3614 Results.push_back(Tmp1); 3615 break; 3616 case ISD::UINT_TO_FP: 3617 case ISD::SINT_TO_FP: 3618 Tmp1 = PromoteLegalINT_TO_FP(Node->getOperand(0), Node->getValueType(0), 3619 Node->getOpcode() == ISD::SINT_TO_FP, dl); 3620 Results.push_back(Tmp1); 3621 break; 3622 case ISD::VAARG: { 3623 SDValue Chain = Node->getOperand(0); // Get the chain. 3624 SDValue Ptr = Node->getOperand(1); // Get the pointer. 3625 3626 unsigned TruncOp; 3627 if (OVT.isVector()) { 3628 TruncOp = ISD::BITCAST; 3629 } else { 3630 assert(OVT.isInteger() 3631 && "VAARG promotion is supported only for vectors or integer types"); 3632 TruncOp = ISD::TRUNCATE; 3633 } 3634 3635 // Perform the larger operation, then convert back 3636 Tmp1 = DAG.getVAArg(NVT, dl, Chain, Ptr, Node->getOperand(2), 3637 Node->getConstantOperandVal(3)); 3638 Chain = Tmp1.getValue(1); 3639 3640 Tmp2 = DAG.getNode(TruncOp, dl, OVT, Tmp1); 3641 3642 // Modified the chain result - switch anything that used the old chain to 3643 // use the new one. 3644 DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 0), Tmp2); 3645 DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), Chain); 3646 ReplacedNode(Node); 3647 break; 3648 } 3649 case ISD::AND: 3650 case ISD::OR: 3651 case ISD::XOR: { 3652 unsigned ExtOp, TruncOp; 3653 if (OVT.isVector()) { 3654 ExtOp = ISD::BITCAST; 3655 TruncOp = ISD::BITCAST; 3656 } else { 3657 assert(OVT.isInteger() && "Cannot promote logic operation"); 3658 ExtOp = ISD::ANY_EXTEND; 3659 TruncOp = ISD::TRUNCATE; 3660 } 3661 // Promote each of the values to the new type. 3662 Tmp1 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(0)); 3663 Tmp2 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(1)); 3664 // Perform the larger operation, then convert back 3665 Tmp1 = DAG.getNode(Node->getOpcode(), dl, NVT, Tmp1, Tmp2); 3666 Results.push_back(DAG.getNode(TruncOp, dl, OVT, Tmp1)); 3667 break; 3668 } 3669 case ISD::SELECT: { 3670 unsigned ExtOp, TruncOp; 3671 if (Node->getValueType(0).isVector()) { 3672 ExtOp = ISD::BITCAST; 3673 TruncOp = ISD::BITCAST; 3674 } else if (Node->getValueType(0).isInteger()) { 3675 ExtOp = ISD::ANY_EXTEND; 3676 TruncOp = ISD::TRUNCATE; 3677 } else { 3678 ExtOp = ISD::FP_EXTEND; 3679 TruncOp = ISD::FP_ROUND; 3680 } 3681 Tmp1 = Node->getOperand(0); 3682 // Promote each of the values to the new type. 3683 Tmp2 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(1)); 3684 Tmp3 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(2)); 3685 // Perform the larger operation, then round down. 3686 Tmp1 = DAG.getNode(ISD::SELECT, dl, NVT, Tmp1, Tmp2, Tmp3); 3687 if (TruncOp != ISD::FP_ROUND) 3688 Tmp1 = DAG.getNode(TruncOp, dl, Node->getValueType(0), Tmp1); 3689 else 3690 Tmp1 = DAG.getNode(TruncOp, dl, Node->getValueType(0), Tmp1, 3691 DAG.getIntPtrConstant(0)); 3692 Results.push_back(Tmp1); 3693 break; 3694 } 3695 case ISD::VECTOR_SHUFFLE: { 3696 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Node)->getMask(); 3697 3698 // Cast the two input vectors. 3699 Tmp1 = DAG.getNode(ISD::BITCAST, dl, NVT, Node->getOperand(0)); 3700 Tmp2 = DAG.getNode(ISD::BITCAST, dl, NVT, Node->getOperand(1)); 3701 3702 // Convert the shuffle mask to the right # elements. 3703 Tmp1 = ShuffleWithNarrowerEltType(NVT, OVT, dl, Tmp1, Tmp2, Mask); 3704 Tmp1 = DAG.getNode(ISD::BITCAST, dl, OVT, Tmp1); 3705 Results.push_back(Tmp1); 3706 break; 3707 } 3708 case ISD::SETCC: { 3709 unsigned ExtOp = ISD::FP_EXTEND; 3710 if (NVT.isInteger()) { 3711 ISD::CondCode CCCode = 3712 cast<CondCodeSDNode>(Node->getOperand(2))->get(); 3713 ExtOp = isSignedIntSetCC(CCCode) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 3714 } 3715 Tmp1 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(0)); 3716 Tmp2 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(1)); 3717 Results.push_back(DAG.getNode(ISD::SETCC, dl, Node->getValueType(0), 3718 Tmp1, Tmp2, Node->getOperand(2))); 3719 break; 3720 } 3721 case ISD::FDIV: 3722 case ISD::FREM: 3723 case ISD::FPOW: { 3724 Tmp1 = DAG.getNode(ISD::FP_EXTEND, dl, NVT, Node->getOperand(0)); 3725 Tmp2 = DAG.getNode(ISD::FP_EXTEND, dl, NVT, Node->getOperand(1)); 3726 Tmp3 = DAG.getNode(Node->getOpcode(), dl, NVT, Tmp1, Tmp2); 3727 Results.push_back(DAG.getNode(ISD::FP_ROUND, dl, OVT, 3728 Tmp3, DAG.getIntPtrConstant(0))); 3729 break; 3730 } 3731 case ISD::FLOG2: 3732 case ISD::FEXP2: 3733 case ISD::FLOG: 3734 case ISD::FEXP: { 3735 Tmp1 = DAG.getNode(ISD::FP_EXTEND, dl, NVT, Node->getOperand(0)); 3736 Tmp2 = DAG.getNode(Node->getOpcode(), dl, NVT, Tmp1); 3737 Results.push_back(DAG.getNode(ISD::FP_ROUND, dl, OVT, 3738 Tmp2, DAG.getIntPtrConstant(0))); 3739 break; 3740 } 3741 } 3742 3743 // Replace the original node with the legalized result. 3744 if (!Results.empty()) 3745 ReplaceNode(Node, Results.data()); 3746} 3747 3748// SelectionDAG::Legalize - This is the entry point for the file. 3749// 3750void SelectionDAG::Legalize() { 3751 /// run - This is the main entry point to this class. 3752 /// 3753 SelectionDAGLegalize(*this).LegalizeDAG(); 3754} 3755