LegalizeDAG.cpp revision 65fd6564b8aedd053845c81ede1ac594acb470e4
1//===-- LegalizeDAG.cpp - Implement SelectionDAG::Legalize ----------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the SelectionDAG::Legalize method. 11// 12//===----------------------------------------------------------------------===// 13 14#include "llvm/Analysis/DebugInfo.h" 15#include "llvm/CodeGen/Analysis.h" 16#include "llvm/CodeGen/MachineFunction.h" 17#include "llvm/CodeGen/MachineJumpTableInfo.h" 18#include "llvm/CodeGen/SelectionDAG.h" 19#include "llvm/Target/TargetFrameLowering.h" 20#include "llvm/Target/TargetLowering.h" 21#include "llvm/Target/TargetData.h" 22#include "llvm/Target/TargetMachine.h" 23#include "llvm/CallingConv.h" 24#include "llvm/Constants.h" 25#include "llvm/DerivedTypes.h" 26#include "llvm/LLVMContext.h" 27#include "llvm/Support/Debug.h" 28#include "llvm/Support/ErrorHandling.h" 29#include "llvm/Support/MathExtras.h" 30#include "llvm/Support/raw_ostream.h" 31#include "llvm/ADT/DenseMap.h" 32#include "llvm/ADT/SmallVector.h" 33#include "llvm/ADT/SmallPtrSet.h" 34using namespace llvm; 35 36//===----------------------------------------------------------------------===// 37/// SelectionDAGLegalize - This takes an arbitrary SelectionDAG as input and 38/// hacks on it until the target machine can handle it. This involves 39/// eliminating value sizes the machine cannot handle (promoting small sizes to 40/// large sizes or splitting up large values into small values) as well as 41/// eliminating operations the machine cannot handle. 42/// 43/// This code also does a small amount of optimization and recognition of idioms 44/// as part of its processing. For example, if a target does not support a 45/// 'setcc' instruction efficiently, but does support 'brcc' instruction, this 46/// will attempt merge setcc and brc instructions into brcc's. 47/// 48namespace { 49class SelectionDAGLegalize : public SelectionDAG::DAGUpdateListener { 50 const TargetMachine &TM; 51 const TargetLowering &TLI; 52 SelectionDAG &DAG; 53 54 /// LegalizePosition - The iterator for walking through the node list. 55 SelectionDAG::allnodes_iterator LegalizePosition; 56 57 /// LegalizedNodes - The set of nodes which have already been legalized. 58 SmallPtrSet<SDNode *, 16> LegalizedNodes; 59 60 // Libcall insertion helpers. 61 62public: 63 explicit SelectionDAGLegalize(SelectionDAG &DAG); 64 65 void LegalizeDAG(); 66 67private: 68 /// LegalizeOp - Legalizes the given operation. 69 void LegalizeOp(SDNode *Node); 70 71 SDValue OptimizeFloatStore(StoreSDNode *ST); 72 73 /// PerformInsertVectorEltInMemory - Some target cannot handle a variable 74 /// insertion index for the INSERT_VECTOR_ELT instruction. In this case, it 75 /// is necessary to spill the vector being inserted into to memory, perform 76 /// the insert there, and then read the result back. 77 SDValue PerformInsertVectorEltInMemory(SDValue Vec, SDValue Val, 78 SDValue Idx, DebugLoc dl); 79 SDValue ExpandINSERT_VECTOR_ELT(SDValue Vec, SDValue Val, 80 SDValue Idx, DebugLoc dl); 81 82 /// ShuffleWithNarrowerEltType - Return a vector shuffle operation which 83 /// performs the same shuffe in terms of order or result bytes, but on a type 84 /// whose vector element type is narrower than the original shuffle type. 85 /// e.g. <v4i32> <0, 1, 0, 1> -> v8i16 <0, 1, 2, 3, 0, 1, 2, 3> 86 SDValue ShuffleWithNarrowerEltType(EVT NVT, EVT VT, DebugLoc dl, 87 SDValue N1, SDValue N2, 88 SmallVectorImpl<int> &Mask) const; 89 90 void LegalizeSetCCCondCode(EVT VT, SDValue &LHS, SDValue &RHS, SDValue &CC, 91 DebugLoc dl); 92 93 SDValue ExpandLibCall(RTLIB::Libcall LC, SDNode *Node, bool isSigned); 94 SDValue ExpandLibCall(RTLIB::Libcall LC, EVT RetVT, const SDValue *Ops, 95 unsigned NumOps, bool isSigned, DebugLoc dl); 96 97 std::pair<SDValue, SDValue> ExpandChainLibCall(RTLIB::Libcall LC, 98 SDNode *Node, bool isSigned); 99 SDValue ExpandFPLibCall(SDNode *Node, RTLIB::Libcall Call_F32, 100 RTLIB::Libcall Call_F64, RTLIB::Libcall Call_F80, 101 RTLIB::Libcall Call_PPCF128); 102 SDValue ExpandIntLibCall(SDNode *Node, bool isSigned, 103 RTLIB::Libcall Call_I8, 104 RTLIB::Libcall Call_I16, 105 RTLIB::Libcall Call_I32, 106 RTLIB::Libcall Call_I64, 107 RTLIB::Libcall Call_I128); 108 void ExpandDivRemLibCall(SDNode *Node, SmallVectorImpl<SDValue> &Results); 109 110 SDValue EmitStackConvert(SDValue SrcOp, EVT SlotVT, EVT DestVT, DebugLoc dl); 111 SDValue ExpandBUILD_VECTOR(SDNode *Node); 112 SDValue ExpandSCALAR_TO_VECTOR(SDNode *Node); 113 void ExpandDYNAMIC_STACKALLOC(SDNode *Node, 114 SmallVectorImpl<SDValue> &Results); 115 SDValue ExpandFCOPYSIGN(SDNode *Node); 116 SDValue ExpandLegalINT_TO_FP(bool isSigned, SDValue LegalOp, EVT DestVT, 117 DebugLoc dl); 118 SDValue PromoteLegalINT_TO_FP(SDValue LegalOp, EVT DestVT, bool isSigned, 119 DebugLoc dl); 120 SDValue PromoteLegalFP_TO_INT(SDValue LegalOp, EVT DestVT, bool isSigned, 121 DebugLoc dl); 122 123 SDValue ExpandBSWAP(SDValue Op, DebugLoc dl); 124 SDValue ExpandBitCount(unsigned Opc, SDValue Op, DebugLoc dl); 125 126 SDValue ExpandExtractFromVectorThroughStack(SDValue Op); 127 SDValue ExpandInsertToVectorThroughStack(SDValue Op); 128 SDValue ExpandVectorBuildThroughStack(SDNode* Node); 129 130 SDValue ExpandConstantFP(ConstantFPSDNode *CFP, bool UseCP); 131 132 std::pair<SDValue, SDValue> ExpandAtomic(SDNode *Node); 133 134 void ExpandNode(SDNode *Node); 135 void PromoteNode(SDNode *Node); 136 137 // DAGUpdateListener implementation. 138 virtual void NodeDeleted(SDNode *N, SDNode *E) { 139 LegalizedNodes.erase(N); 140 if (LegalizePosition == SelectionDAG::allnodes_iterator(N)) 141 ++LegalizePosition; 142 } 143 144 virtual void NodeUpdated(SDNode *N) {} 145}; 146} 147 148/// ShuffleWithNarrowerEltType - Return a vector shuffle operation which 149/// performs the same shuffe in terms of order or result bytes, but on a type 150/// whose vector element type is narrower than the original shuffle type. 151/// e.g. <v4i32> <0, 1, 0, 1> -> v8i16 <0, 1, 2, 3, 0, 1, 2, 3> 152SDValue 153SelectionDAGLegalize::ShuffleWithNarrowerEltType(EVT NVT, EVT VT, DebugLoc dl, 154 SDValue N1, SDValue N2, 155 SmallVectorImpl<int> &Mask) const { 156 unsigned NumMaskElts = VT.getVectorNumElements(); 157 unsigned NumDestElts = NVT.getVectorNumElements(); 158 unsigned NumEltsGrowth = NumDestElts / NumMaskElts; 159 160 assert(NumEltsGrowth && "Cannot promote to vector type with fewer elts!"); 161 162 if (NumEltsGrowth == 1) 163 return DAG.getVectorShuffle(NVT, dl, N1, N2, &Mask[0]); 164 165 SmallVector<int, 8> NewMask; 166 for (unsigned i = 0; i != NumMaskElts; ++i) { 167 int Idx = Mask[i]; 168 for (unsigned j = 0; j != NumEltsGrowth; ++j) { 169 if (Idx < 0) 170 NewMask.push_back(-1); 171 else 172 NewMask.push_back(Idx * NumEltsGrowth + j); 173 } 174 } 175 assert(NewMask.size() == NumDestElts && "Non-integer NumEltsGrowth?"); 176 assert(TLI.isShuffleMaskLegal(NewMask, NVT) && "Shuffle not legal?"); 177 return DAG.getVectorShuffle(NVT, dl, N1, N2, &NewMask[0]); 178} 179 180SelectionDAGLegalize::SelectionDAGLegalize(SelectionDAG &dag) 181 : TM(dag.getTarget()), TLI(dag.getTargetLoweringInfo()), 182 DAG(dag) { 183} 184 185void SelectionDAGLegalize::LegalizeDAG() { 186 DAG.AssignTopologicalOrder(); 187 188 // Visit all the nodes. We start in topological order, so that we see 189 // nodes with their original operands intact. Legalization can produce 190 // new nodes which may themselves need to be legalized. Iterate until all 191 // nodes have been legalized. 192 for (;;) { 193 bool AnyLegalized = false; 194 for (LegalizePosition = DAG.allnodes_end(); 195 LegalizePosition != DAG.allnodes_begin(); ) { 196 --LegalizePosition; 197 198 SDNode *N = LegalizePosition; 199 if (LegalizedNodes.insert(N)) { 200 AnyLegalized = true; 201 LegalizeOp(N); 202 } 203 } 204 if (!AnyLegalized) 205 break; 206 207 } 208 209 // Remove dead nodes now. 210 DAG.RemoveDeadNodes(); 211} 212 213/// ExpandConstantFP - Expands the ConstantFP node to an integer constant or 214/// a load from the constant pool. 215SDValue 216SelectionDAGLegalize::ExpandConstantFP(ConstantFPSDNode *CFP, bool UseCP) { 217 bool Extend = false; 218 DebugLoc dl = CFP->getDebugLoc(); 219 220 // If a FP immediate is precise when represented as a float and if the 221 // target can do an extending load from float to double, we put it into 222 // the constant pool as a float, even if it's is statically typed as a 223 // double. This shrinks FP constants and canonicalizes them for targets where 224 // an FP extending load is the same cost as a normal load (such as on the x87 225 // fp stack or PPC FP unit). 226 EVT VT = CFP->getValueType(0); 227 ConstantFP *LLVMC = const_cast<ConstantFP*>(CFP->getConstantFPValue()); 228 if (!UseCP) { 229 assert((VT == MVT::f64 || VT == MVT::f32) && "Invalid type expansion"); 230 return DAG.getConstant(LLVMC->getValueAPF().bitcastToAPInt(), 231 (VT == MVT::f64) ? MVT::i64 : MVT::i32); 232 } 233 234 EVT OrigVT = VT; 235 EVT SVT = VT; 236 while (SVT != MVT::f32) { 237 SVT = (MVT::SimpleValueType)(SVT.getSimpleVT().SimpleTy - 1); 238 if (ConstantFPSDNode::isValueValidForType(SVT, CFP->getValueAPF()) && 239 // Only do this if the target has a native EXTLOAD instruction from 240 // smaller type. 241 TLI.isLoadExtLegal(ISD::EXTLOAD, SVT) && 242 TLI.ShouldShrinkFPConstant(OrigVT)) { 243 Type *SType = SVT.getTypeForEVT(*DAG.getContext()); 244 LLVMC = cast<ConstantFP>(ConstantExpr::getFPTrunc(LLVMC, SType)); 245 VT = SVT; 246 Extend = true; 247 } 248 } 249 250 SDValue CPIdx = DAG.getConstantPool(LLVMC, TLI.getPointerTy()); 251 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment(); 252 if (Extend) { 253 SDValue Result = 254 DAG.getExtLoad(ISD::EXTLOAD, dl, OrigVT, 255 DAG.getEntryNode(), 256 CPIdx, MachinePointerInfo::getConstantPool(), 257 VT, false, false, Alignment); 258 return Result; 259 } 260 SDValue Result = 261 DAG.getLoad(OrigVT, dl, DAG.getEntryNode(), CPIdx, 262 MachinePointerInfo::getConstantPool(), false, false, 263 Alignment); 264 return Result; 265} 266 267/// ExpandUnalignedStore - Expands an unaligned store to 2 half-size stores. 268static void ExpandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG, 269 const TargetLowering &TLI, 270 SelectionDAG::DAGUpdateListener *DUL) { 271 SDValue Chain = ST->getChain(); 272 SDValue Ptr = ST->getBasePtr(); 273 SDValue Val = ST->getValue(); 274 EVT VT = Val.getValueType(); 275 int Alignment = ST->getAlignment(); 276 DebugLoc dl = ST->getDebugLoc(); 277 if (ST->getMemoryVT().isFloatingPoint() || 278 ST->getMemoryVT().isVector()) { 279 EVT intVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits()); 280 if (TLI.isTypeLegal(intVT)) { 281 // Expand to a bitconvert of the value to the integer type of the 282 // same size, then a (misaligned) int store. 283 // FIXME: Does not handle truncating floating point stores! 284 SDValue Result = DAG.getNode(ISD::BITCAST, dl, intVT, Val); 285 Result = DAG.getStore(Chain, dl, Result, Ptr, ST->getPointerInfo(), 286 ST->isVolatile(), ST->isNonTemporal(), Alignment); 287 DAG.ReplaceAllUsesWith(SDValue(ST, 0), Result, DUL); 288 return; 289 } 290 // Do a (aligned) store to a stack slot, then copy from the stack slot 291 // to the final destination using (unaligned) integer loads and stores. 292 EVT StoredVT = ST->getMemoryVT(); 293 EVT RegVT = 294 TLI.getRegisterType(*DAG.getContext(), 295 EVT::getIntegerVT(*DAG.getContext(), 296 StoredVT.getSizeInBits())); 297 unsigned StoredBytes = StoredVT.getSizeInBits() / 8; 298 unsigned RegBytes = RegVT.getSizeInBits() / 8; 299 unsigned NumRegs = (StoredBytes + RegBytes - 1) / RegBytes; 300 301 // Make sure the stack slot is also aligned for the register type. 302 SDValue StackPtr = DAG.CreateStackTemporary(StoredVT, RegVT); 303 304 // Perform the original store, only redirected to the stack slot. 305 SDValue Store = DAG.getTruncStore(Chain, dl, 306 Val, StackPtr, MachinePointerInfo(), 307 StoredVT, false, false, 0); 308 SDValue Increment = DAG.getConstant(RegBytes, TLI.getPointerTy()); 309 SmallVector<SDValue, 8> Stores; 310 unsigned Offset = 0; 311 312 // Do all but one copies using the full register width. 313 for (unsigned i = 1; i < NumRegs; i++) { 314 // Load one integer register's worth from the stack slot. 315 SDValue Load = DAG.getLoad(RegVT, dl, Store, StackPtr, 316 MachinePointerInfo(), 317 false, false, 0); 318 // Store it to the final location. Remember the store. 319 Stores.push_back(DAG.getStore(Load.getValue(1), dl, Load, Ptr, 320 ST->getPointerInfo().getWithOffset(Offset), 321 ST->isVolatile(), ST->isNonTemporal(), 322 MinAlign(ST->getAlignment(), Offset))); 323 // Increment the pointers. 324 Offset += RegBytes; 325 StackPtr = DAG.getNode(ISD::ADD, dl, StackPtr.getValueType(), StackPtr, 326 Increment); 327 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 328 } 329 330 // The last store may be partial. Do a truncating store. On big-endian 331 // machines this requires an extending load from the stack slot to ensure 332 // that the bits are in the right place. 333 EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), 334 8 * (StoredBytes - Offset)); 335 336 // Load from the stack slot. 337 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Store, StackPtr, 338 MachinePointerInfo(), 339 MemVT, false, false, 0); 340 341 Stores.push_back(DAG.getTruncStore(Load.getValue(1), dl, Load, Ptr, 342 ST->getPointerInfo() 343 .getWithOffset(Offset), 344 MemVT, ST->isVolatile(), 345 ST->isNonTemporal(), 346 MinAlign(ST->getAlignment(), Offset))); 347 // The order of the stores doesn't matter - say it with a TokenFactor. 348 SDValue Result = 349 DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Stores[0], 350 Stores.size()); 351 DAG.ReplaceAllUsesWith(SDValue(ST, 0), Result, DUL); 352 return; 353 } 354 assert(ST->getMemoryVT().isInteger() && 355 !ST->getMemoryVT().isVector() && 356 "Unaligned store of unknown type."); 357 // Get the half-size VT 358 EVT NewStoredVT = ST->getMemoryVT().getHalfSizedIntegerVT(*DAG.getContext()); 359 int NumBits = NewStoredVT.getSizeInBits(); 360 int IncrementSize = NumBits / 8; 361 362 // Divide the stored value in two parts. 363 SDValue ShiftAmount = DAG.getConstant(NumBits, 364 TLI.getShiftAmountTy(Val.getValueType())); 365 SDValue Lo = Val; 366 SDValue Hi = DAG.getNode(ISD::SRL, dl, VT, Val, ShiftAmount); 367 368 // Store the two parts 369 SDValue Store1, Store2; 370 Store1 = DAG.getTruncStore(Chain, dl, TLI.isLittleEndian()?Lo:Hi, Ptr, 371 ST->getPointerInfo(), NewStoredVT, 372 ST->isVolatile(), ST->isNonTemporal(), Alignment); 373 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, 374 DAG.getConstant(IncrementSize, TLI.getPointerTy())); 375 Alignment = MinAlign(Alignment, IncrementSize); 376 Store2 = DAG.getTruncStore(Chain, dl, TLI.isLittleEndian()?Hi:Lo, Ptr, 377 ST->getPointerInfo().getWithOffset(IncrementSize), 378 NewStoredVT, ST->isVolatile(), ST->isNonTemporal(), 379 Alignment); 380 381 SDValue Result = 382 DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Store1, Store2); 383 DAG.ReplaceAllUsesWith(SDValue(ST, 0), Result, DUL); 384} 385 386/// ExpandUnalignedLoad - Expands an unaligned load to 2 half-size loads. 387static void 388ExpandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG, 389 const TargetLowering &TLI, 390 SDValue &ValResult, SDValue &ChainResult) { 391 SDValue Chain = LD->getChain(); 392 SDValue Ptr = LD->getBasePtr(); 393 EVT VT = LD->getValueType(0); 394 EVT LoadedVT = LD->getMemoryVT(); 395 DebugLoc dl = LD->getDebugLoc(); 396 if (VT.isFloatingPoint() || VT.isVector()) { 397 EVT intVT = EVT::getIntegerVT(*DAG.getContext(), LoadedVT.getSizeInBits()); 398 if (TLI.isTypeLegal(intVT)) { 399 // Expand to a (misaligned) integer load of the same size, 400 // then bitconvert to floating point or vector. 401 SDValue newLoad = DAG.getLoad(intVT, dl, Chain, Ptr, LD->getPointerInfo(), 402 LD->isVolatile(), 403 LD->isNonTemporal(), LD->getAlignment()); 404 SDValue Result = DAG.getNode(ISD::BITCAST, dl, LoadedVT, newLoad); 405 if (VT.isFloatingPoint() && LoadedVT != VT) 406 Result = DAG.getNode(ISD::FP_EXTEND, dl, VT, Result); 407 408 ValResult = Result; 409 ChainResult = Chain; 410 return; 411 } 412 413 // Copy the value to a (aligned) stack slot using (unaligned) integer 414 // loads and stores, then do a (aligned) load from the stack slot. 415 EVT RegVT = TLI.getRegisterType(*DAG.getContext(), intVT); 416 unsigned LoadedBytes = LoadedVT.getSizeInBits() / 8; 417 unsigned RegBytes = RegVT.getSizeInBits() / 8; 418 unsigned NumRegs = (LoadedBytes + RegBytes - 1) / RegBytes; 419 420 // Make sure the stack slot is also aligned for the register type. 421 SDValue StackBase = DAG.CreateStackTemporary(LoadedVT, RegVT); 422 423 SDValue Increment = DAG.getConstant(RegBytes, TLI.getPointerTy()); 424 SmallVector<SDValue, 8> Stores; 425 SDValue StackPtr = StackBase; 426 unsigned Offset = 0; 427 428 // Do all but one copies using the full register width. 429 for (unsigned i = 1; i < NumRegs; i++) { 430 // Load one integer register's worth from the original location. 431 SDValue Load = DAG.getLoad(RegVT, dl, Chain, Ptr, 432 LD->getPointerInfo().getWithOffset(Offset), 433 LD->isVolatile(), LD->isNonTemporal(), 434 MinAlign(LD->getAlignment(), Offset)); 435 // Follow the load with a store to the stack slot. Remember the store. 436 Stores.push_back(DAG.getStore(Load.getValue(1), dl, Load, StackPtr, 437 MachinePointerInfo(), false, false, 0)); 438 // Increment the pointers. 439 Offset += RegBytes; 440 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 441 StackPtr = DAG.getNode(ISD::ADD, dl, StackPtr.getValueType(), StackPtr, 442 Increment); 443 } 444 445 // The last copy may be partial. Do an extending load. 446 EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), 447 8 * (LoadedBytes - Offset)); 448 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Chain, Ptr, 449 LD->getPointerInfo().getWithOffset(Offset), 450 MemVT, LD->isVolatile(), 451 LD->isNonTemporal(), 452 MinAlign(LD->getAlignment(), Offset)); 453 // Follow the load with a store to the stack slot. Remember the store. 454 // On big-endian machines this requires a truncating store to ensure 455 // that the bits end up in the right place. 456 Stores.push_back(DAG.getTruncStore(Load.getValue(1), dl, Load, StackPtr, 457 MachinePointerInfo(), MemVT, 458 false, false, 0)); 459 460 // The order of the stores doesn't matter - say it with a TokenFactor. 461 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Stores[0], 462 Stores.size()); 463 464 // Finally, perform the original load only redirected to the stack slot. 465 Load = DAG.getExtLoad(LD->getExtensionType(), dl, VT, TF, StackBase, 466 MachinePointerInfo(), LoadedVT, false, false, 0); 467 468 // Callers expect a MERGE_VALUES node. 469 ValResult = Load; 470 ChainResult = TF; 471 return; 472 } 473 assert(LoadedVT.isInteger() && !LoadedVT.isVector() && 474 "Unaligned load of unsupported type."); 475 476 // Compute the new VT that is half the size of the old one. This is an 477 // integer MVT. 478 unsigned NumBits = LoadedVT.getSizeInBits(); 479 EVT NewLoadedVT; 480 NewLoadedVT = EVT::getIntegerVT(*DAG.getContext(), NumBits/2); 481 NumBits >>= 1; 482 483 unsigned Alignment = LD->getAlignment(); 484 unsigned IncrementSize = NumBits / 8; 485 ISD::LoadExtType HiExtType = LD->getExtensionType(); 486 487 // If the original load is NON_EXTLOAD, the hi part load must be ZEXTLOAD. 488 if (HiExtType == ISD::NON_EXTLOAD) 489 HiExtType = ISD::ZEXTLOAD; 490 491 // Load the value in two parts 492 SDValue Lo, Hi; 493 if (TLI.isLittleEndian()) { 494 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, LD->getPointerInfo(), 495 NewLoadedVT, LD->isVolatile(), 496 LD->isNonTemporal(), Alignment); 497 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, 498 DAG.getConstant(IncrementSize, TLI.getPointerTy())); 499 Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, 500 LD->getPointerInfo().getWithOffset(IncrementSize), 501 NewLoadedVT, LD->isVolatile(), 502 LD->isNonTemporal(), MinAlign(Alignment,IncrementSize)); 503 } else { 504 Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, LD->getPointerInfo(), 505 NewLoadedVT, LD->isVolatile(), 506 LD->isNonTemporal(), Alignment); 507 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, 508 DAG.getConstant(IncrementSize, TLI.getPointerTy())); 509 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, 510 LD->getPointerInfo().getWithOffset(IncrementSize), 511 NewLoadedVT, LD->isVolatile(), 512 LD->isNonTemporal(), MinAlign(Alignment,IncrementSize)); 513 } 514 515 // aggregate the two parts 516 SDValue ShiftAmount = DAG.getConstant(NumBits, 517 TLI.getShiftAmountTy(Hi.getValueType())); 518 SDValue Result = DAG.getNode(ISD::SHL, dl, VT, Hi, ShiftAmount); 519 Result = DAG.getNode(ISD::OR, dl, VT, Result, Lo); 520 521 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1), 522 Hi.getValue(1)); 523 524 ValResult = Result; 525 ChainResult = TF; 526} 527 528/// PerformInsertVectorEltInMemory - Some target cannot handle a variable 529/// insertion index for the INSERT_VECTOR_ELT instruction. In this case, it 530/// is necessary to spill the vector being inserted into to memory, perform 531/// the insert there, and then read the result back. 532SDValue SelectionDAGLegalize:: 533PerformInsertVectorEltInMemory(SDValue Vec, SDValue Val, SDValue Idx, 534 DebugLoc dl) { 535 SDValue Tmp1 = Vec; 536 SDValue Tmp2 = Val; 537 SDValue Tmp3 = Idx; 538 539 // If the target doesn't support this, we have to spill the input vector 540 // to a temporary stack slot, update the element, then reload it. This is 541 // badness. We could also load the value into a vector register (either 542 // with a "move to register" or "extload into register" instruction, then 543 // permute it into place, if the idx is a constant and if the idx is 544 // supported by the target. 545 EVT VT = Tmp1.getValueType(); 546 EVT EltVT = VT.getVectorElementType(); 547 EVT IdxVT = Tmp3.getValueType(); 548 EVT PtrVT = TLI.getPointerTy(); 549 SDValue StackPtr = DAG.CreateStackTemporary(VT); 550 551 int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex(); 552 553 // Store the vector. 554 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, Tmp1, StackPtr, 555 MachinePointerInfo::getFixedStack(SPFI), 556 false, false, 0); 557 558 // Truncate or zero extend offset to target pointer type. 559 unsigned CastOpc = IdxVT.bitsGT(PtrVT) ? ISD::TRUNCATE : ISD::ZERO_EXTEND; 560 Tmp3 = DAG.getNode(CastOpc, dl, PtrVT, Tmp3); 561 // Add the offset to the index. 562 unsigned EltSize = EltVT.getSizeInBits()/8; 563 Tmp3 = DAG.getNode(ISD::MUL, dl, IdxVT, Tmp3,DAG.getConstant(EltSize, IdxVT)); 564 SDValue StackPtr2 = DAG.getNode(ISD::ADD, dl, IdxVT, Tmp3, StackPtr); 565 // Store the scalar value. 566 Ch = DAG.getTruncStore(Ch, dl, Tmp2, StackPtr2, MachinePointerInfo(), EltVT, 567 false, false, 0); 568 // Load the updated vector. 569 return DAG.getLoad(VT, dl, Ch, StackPtr, 570 MachinePointerInfo::getFixedStack(SPFI), false, false, 0); 571} 572 573 574SDValue SelectionDAGLegalize:: 575ExpandINSERT_VECTOR_ELT(SDValue Vec, SDValue Val, SDValue Idx, DebugLoc dl) { 576 if (ConstantSDNode *InsertPos = dyn_cast<ConstantSDNode>(Idx)) { 577 // SCALAR_TO_VECTOR requires that the type of the value being inserted 578 // match the element type of the vector being created, except for 579 // integers in which case the inserted value can be over width. 580 EVT EltVT = Vec.getValueType().getVectorElementType(); 581 if (Val.getValueType() == EltVT || 582 (EltVT.isInteger() && Val.getValueType().bitsGE(EltVT))) { 583 SDValue ScVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 584 Vec.getValueType(), Val); 585 586 unsigned NumElts = Vec.getValueType().getVectorNumElements(); 587 // We generate a shuffle of InVec and ScVec, so the shuffle mask 588 // should be 0,1,2,3,4,5... with the appropriate element replaced with 589 // elt 0 of the RHS. 590 SmallVector<int, 8> ShufOps; 591 for (unsigned i = 0; i != NumElts; ++i) 592 ShufOps.push_back(i != InsertPos->getZExtValue() ? i : NumElts); 593 594 return DAG.getVectorShuffle(Vec.getValueType(), dl, Vec, ScVec, 595 &ShufOps[0]); 596 } 597 } 598 return PerformInsertVectorEltInMemory(Vec, Val, Idx, dl); 599} 600 601SDValue SelectionDAGLegalize::OptimizeFloatStore(StoreSDNode* ST) { 602 // Turn 'store float 1.0, Ptr' -> 'store int 0x12345678, Ptr' 603 // FIXME: We shouldn't do this for TargetConstantFP's. 604 // FIXME: move this to the DAG Combiner! Note that we can't regress due 605 // to phase ordering between legalized code and the dag combiner. This 606 // probably means that we need to integrate dag combiner and legalizer 607 // together. 608 // We generally can't do this one for long doubles. 609 SDValue Tmp1 = ST->getChain(); 610 SDValue Tmp2 = ST->getBasePtr(); 611 SDValue Tmp3; 612 unsigned Alignment = ST->getAlignment(); 613 bool isVolatile = ST->isVolatile(); 614 bool isNonTemporal = ST->isNonTemporal(); 615 DebugLoc dl = ST->getDebugLoc(); 616 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(ST->getValue())) { 617 if (CFP->getValueType(0) == MVT::f32 && 618 TLI.isTypeLegal(MVT::i32)) { 619 Tmp3 = DAG.getConstant(CFP->getValueAPF(). 620 bitcastToAPInt().zextOrTrunc(32), 621 MVT::i32); 622 return DAG.getStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(), 623 isVolatile, isNonTemporal, Alignment); 624 } 625 626 if (CFP->getValueType(0) == MVT::f64) { 627 // If this target supports 64-bit registers, do a single 64-bit store. 628 if (TLI.isTypeLegal(MVT::i64)) { 629 Tmp3 = DAG.getConstant(CFP->getValueAPF().bitcastToAPInt(). 630 zextOrTrunc(64), MVT::i64); 631 return DAG.getStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(), 632 isVolatile, isNonTemporal, Alignment); 633 } 634 635 if (TLI.isTypeLegal(MVT::i32) && !ST->isVolatile()) { 636 // Otherwise, if the target supports 32-bit registers, use 2 32-bit 637 // stores. If the target supports neither 32- nor 64-bits, this 638 // xform is certainly not worth it. 639 const APInt &IntVal =CFP->getValueAPF().bitcastToAPInt(); 640 SDValue Lo = DAG.getConstant(IntVal.trunc(32), MVT::i32); 641 SDValue Hi = DAG.getConstant(IntVal.lshr(32).trunc(32), MVT::i32); 642 if (TLI.isBigEndian()) std::swap(Lo, Hi); 643 644 Lo = DAG.getStore(Tmp1, dl, Lo, Tmp2, ST->getPointerInfo(), isVolatile, 645 isNonTemporal, Alignment); 646 Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2, 647 DAG.getIntPtrConstant(4)); 648 Hi = DAG.getStore(Tmp1, dl, Hi, Tmp2, 649 ST->getPointerInfo().getWithOffset(4), 650 isVolatile, isNonTemporal, MinAlign(Alignment, 4U)); 651 652 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo, Hi); 653 } 654 } 655 } 656 return SDValue(0, 0); 657} 658 659/// LegalizeOp - Return a legal replacement for the given operation, with 660/// all legal operands. 661void SelectionDAGLegalize::LegalizeOp(SDNode *Node) { 662 if (Node->getOpcode() == ISD::TargetConstant) // Allow illegal target nodes. 663 return; 664 665 DebugLoc dl = Node->getDebugLoc(); 666 667 for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i) 668 assert(TLI.getTypeAction(*DAG.getContext(), Node->getValueType(i)) == 669 TargetLowering::TypeLegal && 670 "Unexpected illegal type!"); 671 672 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) 673 assert((TLI.getTypeAction(*DAG.getContext(), 674 Node->getOperand(i).getValueType()) == 675 TargetLowering::TypeLegal || 676 Node->getOperand(i).getOpcode() == ISD::TargetConstant) && 677 "Unexpected illegal type!"); 678 679 SDValue Tmp1, Tmp2, Tmp3, Tmp4; 680 bool isCustom = false; 681 682 // Figure out the correct action; the way to query this varies by opcode 683 TargetLowering::LegalizeAction Action = TargetLowering::Legal; 684 bool SimpleFinishLegalizing = true; 685 switch (Node->getOpcode()) { 686 case ISD::INTRINSIC_W_CHAIN: 687 case ISD::INTRINSIC_WO_CHAIN: 688 case ISD::INTRINSIC_VOID: 689 case ISD::VAARG: 690 case ISD::STACKSAVE: 691 Action = TLI.getOperationAction(Node->getOpcode(), MVT::Other); 692 break; 693 case ISD::SINT_TO_FP: 694 case ISD::UINT_TO_FP: 695 case ISD::EXTRACT_VECTOR_ELT: 696 Action = TLI.getOperationAction(Node->getOpcode(), 697 Node->getOperand(0).getValueType()); 698 break; 699 case ISD::FP_ROUND_INREG: 700 case ISD::SIGN_EXTEND_INREG: { 701 EVT InnerType = cast<VTSDNode>(Node->getOperand(1))->getVT(); 702 Action = TLI.getOperationAction(Node->getOpcode(), InnerType); 703 break; 704 } 705 case ISD::ATOMIC_STORE: { 706 Action = TLI.getOperationAction(Node->getOpcode(), 707 Node->getOperand(2).getValueType()); 708 break; 709 } 710 case ISD::SELECT_CC: 711 case ISD::SETCC: 712 case ISD::BR_CC: { 713 unsigned CCOperand = Node->getOpcode() == ISD::SELECT_CC ? 4 : 714 Node->getOpcode() == ISD::SETCC ? 2 : 1; 715 unsigned CompareOperand = Node->getOpcode() == ISD::BR_CC ? 2 : 0; 716 EVT OpVT = Node->getOperand(CompareOperand).getValueType(); 717 ISD::CondCode CCCode = 718 cast<CondCodeSDNode>(Node->getOperand(CCOperand))->get(); 719 Action = TLI.getCondCodeAction(CCCode, OpVT); 720 if (Action == TargetLowering::Legal) { 721 if (Node->getOpcode() == ISD::SELECT_CC) 722 Action = TLI.getOperationAction(Node->getOpcode(), 723 Node->getValueType(0)); 724 else 725 Action = TLI.getOperationAction(Node->getOpcode(), OpVT); 726 } 727 break; 728 } 729 case ISD::LOAD: 730 case ISD::STORE: 731 // FIXME: Model these properly. LOAD and STORE are complicated, and 732 // STORE expects the unlegalized operand in some cases. 733 SimpleFinishLegalizing = false; 734 break; 735 case ISD::CALLSEQ_START: 736 case ISD::CALLSEQ_END: 737 // FIXME: This shouldn't be necessary. These nodes have special properties 738 // dealing with the recursive nature of legalization. Removing this 739 // special case should be done as part of making LegalizeDAG non-recursive. 740 SimpleFinishLegalizing = false; 741 break; 742 case ISD::EXTRACT_ELEMENT: 743 case ISD::FLT_ROUNDS_: 744 case ISD::SADDO: 745 case ISD::SSUBO: 746 case ISD::UADDO: 747 case ISD::USUBO: 748 case ISD::SMULO: 749 case ISD::UMULO: 750 case ISD::FPOWI: 751 case ISD::MERGE_VALUES: 752 case ISD::EH_RETURN: 753 case ISD::FRAME_TO_ARGS_OFFSET: 754 case ISD::EH_SJLJ_SETJMP: 755 case ISD::EH_SJLJ_LONGJMP: 756 case ISD::EH_SJLJ_DISPATCHSETUP: 757 // These operations lie about being legal: when they claim to be legal, 758 // they should actually be expanded. 759 Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0)); 760 if (Action == TargetLowering::Legal) 761 Action = TargetLowering::Expand; 762 break; 763 case ISD::INIT_TRAMPOLINE: 764 case ISD::ADJUST_TRAMPOLINE: 765 case ISD::FRAMEADDR: 766 case ISD::RETURNADDR: 767 // These operations lie about being legal: when they claim to be legal, 768 // they should actually be custom-lowered. 769 Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0)); 770 if (Action == TargetLowering::Legal) 771 Action = TargetLowering::Custom; 772 break; 773 default: 774 if (Node->getOpcode() >= ISD::BUILTIN_OP_END) { 775 Action = TargetLowering::Legal; 776 } else { 777 Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0)); 778 } 779 break; 780 } 781 782 if (SimpleFinishLegalizing) { 783 SmallVector<SDValue, 8> Ops; 784 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) 785 Ops.push_back(Node->getOperand(i)); 786 switch (Node->getOpcode()) { 787 default: break; 788 case ISD::SHL: 789 case ISD::SRL: 790 case ISD::SRA: 791 case ISD::ROTL: 792 case ISD::ROTR: 793 // Legalizing shifts/rotates requires adjusting the shift amount 794 // to the appropriate width. 795 if (!Ops[1].getValueType().isVector()) { 796 SDValue SAO = DAG.getShiftAmountOperand(Ops[0].getValueType(), Ops[1]); 797 HandleSDNode Handle(SAO); 798 LegalizeOp(SAO.getNode()); 799 Ops[1] = Handle.getValue(); 800 } 801 break; 802 case ISD::SRL_PARTS: 803 case ISD::SRA_PARTS: 804 case ISD::SHL_PARTS: 805 // Legalizing shifts/rotates requires adjusting the shift amount 806 // to the appropriate width. 807 if (!Ops[2].getValueType().isVector()) { 808 SDValue SAO = DAG.getShiftAmountOperand(Ops[0].getValueType(), Ops[2]); 809 HandleSDNode Handle(SAO); 810 LegalizeOp(SAO.getNode()); 811 Ops[2] = Handle.getValue(); 812 } 813 break; 814 } 815 816 SDNode *NewNode = DAG.UpdateNodeOperands(Node, Ops.data(), Ops.size()); 817 if (NewNode != Node) { 818 DAG.ReplaceAllUsesWith(Node, NewNode, this); 819 for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i) 820 DAG.TransferDbgValues(SDValue(Node, i), SDValue(NewNode, i)); 821 DAG.RemoveDeadNode(Node, this); 822 Node = NewNode; 823 } 824 switch (Action) { 825 case TargetLowering::Legal: 826 return; 827 case TargetLowering::Custom: 828 // FIXME: The handling for custom lowering with multiple results is 829 // a complete mess. 830 Tmp1 = TLI.LowerOperation(SDValue(Node, 0), DAG); 831 if (Tmp1.getNode()) { 832 SmallVector<SDValue, 8> ResultVals; 833 for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i) { 834 if (e == 1) 835 ResultVals.push_back(Tmp1); 836 else 837 ResultVals.push_back(Tmp1.getValue(i)); 838 } 839 if (Tmp1.getNode() != Node || Tmp1.getResNo() != 0) { 840 DAG.ReplaceAllUsesWith(Node, ResultVals.data(), this); 841 for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i) 842 DAG.TransferDbgValues(SDValue(Node, i), ResultVals[i]); 843 DAG.RemoveDeadNode(Node, this); 844 } 845 return; 846 } 847 848 // FALL THROUGH 849 case TargetLowering::Expand: 850 ExpandNode(Node); 851 return; 852 case TargetLowering::Promote: 853 PromoteNode(Node); 854 return; 855 } 856 } 857 858 switch (Node->getOpcode()) { 859 default: 860#ifndef NDEBUG 861 dbgs() << "NODE: "; 862 Node->dump( &DAG); 863 dbgs() << "\n"; 864#endif 865 assert(0 && "Do not know how to legalize this operator!"); 866 867 case ISD::CALLSEQ_START: 868 case ISD::CALLSEQ_END: 869 break; 870 case ISD::LOAD: { 871 LoadSDNode *LD = cast<LoadSDNode>(Node); 872 Tmp1 = LD->getChain(); // Legalize the chain. 873 Tmp2 = LD->getBasePtr(); // Legalize the base pointer. 874 875 ISD::LoadExtType ExtType = LD->getExtensionType(); 876 if (ExtType == ISD::NON_EXTLOAD) { 877 EVT VT = Node->getValueType(0); 878 Node = DAG.UpdateNodeOperands(Node, Tmp1, Tmp2, LD->getOffset()); 879 Tmp3 = SDValue(Node, 0); 880 Tmp4 = SDValue(Node, 1); 881 882 switch (TLI.getOperationAction(Node->getOpcode(), VT)) { 883 default: assert(0 && "This action is not supported yet!"); 884 case TargetLowering::Legal: 885 // If this is an unaligned load and the target doesn't support it, 886 // expand it. 887 if (!TLI.allowsUnalignedMemoryAccesses(LD->getMemoryVT())) { 888 Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext()); 889 unsigned ABIAlignment = TLI.getTargetData()->getABITypeAlignment(Ty); 890 if (LD->getAlignment() < ABIAlignment){ 891 ExpandUnalignedLoad(cast<LoadSDNode>(Node), 892 DAG, TLI, Tmp3, Tmp4); 893 } 894 } 895 break; 896 case TargetLowering::Custom: 897 Tmp1 = TLI.LowerOperation(Tmp3, DAG); 898 if (Tmp1.getNode()) { 899 Tmp3 = Tmp1; 900 Tmp4 = Tmp1.getValue(1); 901 } 902 break; 903 case TargetLowering::Promote: { 904 // Only promote a load of vector type to another. 905 assert(VT.isVector() && "Cannot promote this load!"); 906 // Change base type to a different vector type. 907 EVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), VT); 908 909 Tmp1 = DAG.getLoad(NVT, dl, Tmp1, Tmp2, LD->getPointerInfo(), 910 LD->isVolatile(), LD->isNonTemporal(), 911 LD->getAlignment()); 912 Tmp3 = DAG.getNode(ISD::BITCAST, dl, VT, Tmp1); 913 Tmp4 = Tmp1.getValue(1); 914 break; 915 } 916 } 917 // Since loads produce two values, make sure to remember that we 918 // legalized both of them. 919 DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 0), Tmp3); 920 DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), Tmp4); 921 return; 922 } 923 924 EVT SrcVT = LD->getMemoryVT(); 925 unsigned SrcWidth = SrcVT.getSizeInBits(); 926 unsigned Alignment = LD->getAlignment(); 927 bool isVolatile = LD->isVolatile(); 928 bool isNonTemporal = LD->isNonTemporal(); 929 930 if (SrcWidth != SrcVT.getStoreSizeInBits() && 931 // Some targets pretend to have an i1 loading operation, and actually 932 // load an i8. This trick is correct for ZEXTLOAD because the top 7 933 // bits are guaranteed to be zero; it helps the optimizers understand 934 // that these bits are zero. It is also useful for EXTLOAD, since it 935 // tells the optimizers that those bits are undefined. It would be 936 // nice to have an effective generic way of getting these benefits... 937 // Until such a way is found, don't insist on promoting i1 here. 938 (SrcVT != MVT::i1 || 939 TLI.getLoadExtAction(ExtType, MVT::i1) == TargetLowering::Promote)) { 940 // Promote to a byte-sized load if not loading an integral number of 941 // bytes. For example, promote EXTLOAD:i20 -> EXTLOAD:i24. 942 unsigned NewWidth = SrcVT.getStoreSizeInBits(); 943 EVT NVT = EVT::getIntegerVT(*DAG.getContext(), NewWidth); 944 SDValue Ch; 945 946 // The extra bits are guaranteed to be zero, since we stored them that 947 // way. A zext load from NVT thus automatically gives zext from SrcVT. 948 949 ISD::LoadExtType NewExtType = 950 ExtType == ISD::ZEXTLOAD ? ISD::ZEXTLOAD : ISD::EXTLOAD; 951 952 SDValue Result = 953 DAG.getExtLoad(NewExtType, dl, Node->getValueType(0), 954 Tmp1, Tmp2, LD->getPointerInfo(), 955 NVT, isVolatile, isNonTemporal, Alignment); 956 957 Ch = Result.getValue(1); // The chain. 958 959 if (ExtType == ISD::SEXTLOAD) 960 // Having the top bits zero doesn't help when sign extending. 961 Result = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, 962 Result.getValueType(), 963 Result, DAG.getValueType(SrcVT)); 964 else if (ExtType == ISD::ZEXTLOAD || NVT == Result.getValueType()) 965 // All the top bits are guaranteed to be zero - inform the optimizers. 966 Result = DAG.getNode(ISD::AssertZext, dl, 967 Result.getValueType(), Result, 968 DAG.getValueType(SrcVT)); 969 970 Tmp1 = Result; 971 Tmp2 = Ch; 972 } else if (SrcWidth & (SrcWidth - 1)) { 973 // If not loading a power-of-2 number of bits, expand as two loads. 974 assert(!SrcVT.isVector() && "Unsupported extload!"); 975 unsigned RoundWidth = 1 << Log2_32(SrcWidth); 976 assert(RoundWidth < SrcWidth); 977 unsigned ExtraWidth = SrcWidth - RoundWidth; 978 assert(ExtraWidth < RoundWidth); 979 assert(!(RoundWidth % 8) && !(ExtraWidth % 8) && 980 "Load size not an integral number of bytes!"); 981 EVT RoundVT = EVT::getIntegerVT(*DAG.getContext(), RoundWidth); 982 EVT ExtraVT = EVT::getIntegerVT(*DAG.getContext(), ExtraWidth); 983 SDValue Lo, Hi, Ch; 984 unsigned IncrementSize; 985 986 if (TLI.isLittleEndian()) { 987 // EXTLOAD:i24 -> ZEXTLOAD:i16 | (shl EXTLOAD@+2:i8, 16) 988 // Load the bottom RoundWidth bits. 989 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, Node->getValueType(0), 990 Tmp1, Tmp2, 991 LD->getPointerInfo(), RoundVT, isVolatile, 992 isNonTemporal, Alignment); 993 994 // Load the remaining ExtraWidth bits. 995 IncrementSize = RoundWidth / 8; 996 Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2, 997 DAG.getIntPtrConstant(IncrementSize)); 998 Hi = DAG.getExtLoad(ExtType, dl, Node->getValueType(0), Tmp1, Tmp2, 999 LD->getPointerInfo().getWithOffset(IncrementSize), 1000 ExtraVT, isVolatile, isNonTemporal, 1001 MinAlign(Alignment, IncrementSize)); 1002 1003 // Build a factor node to remember that this load is independent of 1004 // the other one. 1005 Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1), 1006 Hi.getValue(1)); 1007 1008 // Move the top bits to the right place. 1009 Hi = DAG.getNode(ISD::SHL, dl, Hi.getValueType(), Hi, 1010 DAG.getConstant(RoundWidth, 1011 TLI.getShiftAmountTy(Hi.getValueType()))); 1012 1013 // Join the hi and lo parts. 1014 Tmp1 = DAG.getNode(ISD::OR, dl, Node->getValueType(0), Lo, Hi); 1015 } else { 1016 // Big endian - avoid unaligned loads. 1017 // EXTLOAD:i24 -> (shl EXTLOAD:i16, 8) | ZEXTLOAD@+2:i8 1018 // Load the top RoundWidth bits. 1019 Hi = DAG.getExtLoad(ExtType, dl, Node->getValueType(0), Tmp1, Tmp2, 1020 LD->getPointerInfo(), RoundVT, isVolatile, 1021 isNonTemporal, Alignment); 1022 1023 // Load the remaining ExtraWidth bits. 1024 IncrementSize = RoundWidth / 8; 1025 Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2, 1026 DAG.getIntPtrConstant(IncrementSize)); 1027 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, 1028 dl, Node->getValueType(0), Tmp1, Tmp2, 1029 LD->getPointerInfo().getWithOffset(IncrementSize), 1030 ExtraVT, isVolatile, isNonTemporal, 1031 MinAlign(Alignment, IncrementSize)); 1032 1033 // Build a factor node to remember that this load is independent of 1034 // the other one. 1035 Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1), 1036 Hi.getValue(1)); 1037 1038 // Move the top bits to the right place. 1039 Hi = DAG.getNode(ISD::SHL, dl, Hi.getValueType(), Hi, 1040 DAG.getConstant(ExtraWidth, 1041 TLI.getShiftAmountTy(Hi.getValueType()))); 1042 1043 // Join the hi and lo parts. 1044 Tmp1 = DAG.getNode(ISD::OR, dl, Node->getValueType(0), Lo, Hi); 1045 } 1046 1047 Tmp2 = Ch; 1048 } else { 1049 switch (TLI.getLoadExtAction(ExtType, SrcVT)) { 1050 default: assert(0 && "This action is not supported yet!"); 1051 case TargetLowering::Custom: 1052 isCustom = true; 1053 // FALLTHROUGH 1054 case TargetLowering::Legal: 1055 Node = DAG.UpdateNodeOperands(Node, 1056 Tmp1, Tmp2, LD->getOffset()); 1057 Tmp1 = SDValue(Node, 0); 1058 Tmp2 = SDValue(Node, 1); 1059 1060 if (isCustom) { 1061 Tmp3 = TLI.LowerOperation(SDValue(Node, 0), DAG); 1062 if (Tmp3.getNode()) { 1063 Tmp1 = Tmp3; 1064 Tmp2 = Tmp3.getValue(1); 1065 } 1066 } else { 1067 // If this is an unaligned load and the target doesn't support it, 1068 // expand it. 1069 if (!TLI.allowsUnalignedMemoryAccesses(LD->getMemoryVT())) { 1070 Type *Ty = 1071 LD->getMemoryVT().getTypeForEVT(*DAG.getContext()); 1072 unsigned ABIAlignment = 1073 TLI.getTargetData()->getABITypeAlignment(Ty); 1074 if (LD->getAlignment() < ABIAlignment){ 1075 ExpandUnalignedLoad(cast<LoadSDNode>(Node), 1076 DAG, TLI, Tmp1, Tmp2); 1077 } 1078 } 1079 } 1080 break; 1081 case TargetLowering::Expand: 1082 if (!TLI.isLoadExtLegal(ISD::EXTLOAD, SrcVT) && TLI.isTypeLegal(SrcVT)) { 1083 SDValue Load = DAG.getLoad(SrcVT, dl, Tmp1, Tmp2, 1084 LD->getPointerInfo(), 1085 LD->isVolatile(), LD->isNonTemporal(), 1086 LD->getAlignment()); 1087 unsigned ExtendOp; 1088 switch (ExtType) { 1089 case ISD::EXTLOAD: 1090 ExtendOp = (SrcVT.isFloatingPoint() ? 1091 ISD::FP_EXTEND : ISD::ANY_EXTEND); 1092 break; 1093 case ISD::SEXTLOAD: ExtendOp = ISD::SIGN_EXTEND; break; 1094 case ISD::ZEXTLOAD: ExtendOp = ISD::ZERO_EXTEND; break; 1095 default: llvm_unreachable("Unexpected extend load type!"); 1096 } 1097 Tmp1 = DAG.getNode(ExtendOp, dl, Node->getValueType(0), Load); 1098 Tmp2 = Load.getValue(1); 1099 break; 1100 } 1101 1102 assert(!SrcVT.isVector() && 1103 "Vector Loads are handled in LegalizeVectorOps"); 1104 1105 // FIXME: This does not work for vectors on most targets. Sign- and 1106 // zero-extend operations are currently folded into extending loads, 1107 // whether they are legal or not, and then we end up here without any 1108 // support for legalizing them. 1109 assert(ExtType != ISD::EXTLOAD && 1110 "EXTLOAD should always be supported!"); 1111 // Turn the unsupported load into an EXTLOAD followed by an explicit 1112 // zero/sign extend inreg. 1113 SDValue Result = DAG.getExtLoad(ISD::EXTLOAD, dl, Node->getValueType(0), 1114 Tmp1, Tmp2, LD->getPointerInfo(), SrcVT, 1115 LD->isVolatile(), LD->isNonTemporal(), 1116 LD->getAlignment()); 1117 SDValue ValRes; 1118 if (ExtType == ISD::SEXTLOAD) 1119 ValRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, 1120 Result.getValueType(), 1121 Result, DAG.getValueType(SrcVT)); 1122 else 1123 ValRes = DAG.getZeroExtendInReg(Result, dl, SrcVT.getScalarType()); 1124 Tmp1 = ValRes; 1125 Tmp2 = Result.getValue(1); 1126 break; 1127 } 1128 } 1129 1130 // Since loads produce two values, make sure to remember that we legalized 1131 // both of them. 1132 DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 0), Tmp1); 1133 DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), Tmp2); 1134 break; 1135 } 1136 case ISD::STORE: { 1137 StoreSDNode *ST = cast<StoreSDNode>(Node); 1138 Tmp1 = ST->getChain(); 1139 Tmp2 = ST->getBasePtr(); 1140 unsigned Alignment = ST->getAlignment(); 1141 bool isVolatile = ST->isVolatile(); 1142 bool isNonTemporal = ST->isNonTemporal(); 1143 1144 if (!ST->isTruncatingStore()) { 1145 if (SDNode *OptStore = OptimizeFloatStore(ST).getNode()) { 1146 DAG.ReplaceAllUsesWith(ST, OptStore, this); 1147 break; 1148 } 1149 1150 { 1151 Tmp3 = ST->getValue(); 1152 Node = DAG.UpdateNodeOperands(Node, 1153 Tmp1, Tmp3, Tmp2, 1154 ST->getOffset()); 1155 1156 EVT VT = Tmp3.getValueType(); 1157 switch (TLI.getOperationAction(ISD::STORE, VT)) { 1158 default: assert(0 && "This action is not supported yet!"); 1159 case TargetLowering::Legal: 1160 // If this is an unaligned store and the target doesn't support it, 1161 // expand it. 1162 if (!TLI.allowsUnalignedMemoryAccesses(ST->getMemoryVT())) { 1163 Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext()); 1164 unsigned ABIAlignment= TLI.getTargetData()->getABITypeAlignment(Ty); 1165 if (ST->getAlignment() < ABIAlignment) 1166 ExpandUnalignedStore(cast<StoreSDNode>(Node), 1167 DAG, TLI, this); 1168 } 1169 break; 1170 case TargetLowering::Custom: 1171 Tmp1 = TLI.LowerOperation(SDValue(Node, 0), DAG); 1172 if (Tmp1.getNode()) 1173 DAG.ReplaceAllUsesWith(SDValue(Node, 0), Tmp1, this); 1174 break; 1175 case TargetLowering::Promote: { 1176 assert(VT.isVector() && "Unknown legal promote case!"); 1177 Tmp3 = DAG.getNode(ISD::BITCAST, dl, 1178 TLI.getTypeToPromoteTo(ISD::STORE, VT), Tmp3); 1179 SDValue Result = 1180 DAG.getStore(Tmp1, dl, Tmp3, Tmp2, 1181 ST->getPointerInfo(), isVolatile, 1182 isNonTemporal, Alignment); 1183 DAG.ReplaceAllUsesWith(SDValue(Node, 0), Result, this); 1184 break; 1185 } 1186 } 1187 break; 1188 } 1189 } else { 1190 Tmp3 = ST->getValue(); 1191 1192 EVT StVT = ST->getMemoryVT(); 1193 unsigned StWidth = StVT.getSizeInBits(); 1194 1195 if (StWidth != StVT.getStoreSizeInBits()) { 1196 // Promote to a byte-sized store with upper bits zero if not 1197 // storing an integral number of bytes. For example, promote 1198 // TRUNCSTORE:i1 X -> TRUNCSTORE:i8 (and X, 1) 1199 EVT NVT = EVT::getIntegerVT(*DAG.getContext(), 1200 StVT.getStoreSizeInBits()); 1201 Tmp3 = DAG.getZeroExtendInReg(Tmp3, dl, StVT); 1202 SDValue Result = 1203 DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(), 1204 NVT, isVolatile, isNonTemporal, Alignment); 1205 DAG.ReplaceAllUsesWith(SDValue(Node, 0), Result, this); 1206 } else if (StWidth & (StWidth - 1)) { 1207 // If not storing a power-of-2 number of bits, expand as two stores. 1208 assert(!StVT.isVector() && "Unsupported truncstore!"); 1209 unsigned RoundWidth = 1 << Log2_32(StWidth); 1210 assert(RoundWidth < StWidth); 1211 unsigned ExtraWidth = StWidth - RoundWidth; 1212 assert(ExtraWidth < RoundWidth); 1213 assert(!(RoundWidth % 8) && !(ExtraWidth % 8) && 1214 "Store size not an integral number of bytes!"); 1215 EVT RoundVT = EVT::getIntegerVT(*DAG.getContext(), RoundWidth); 1216 EVT ExtraVT = EVT::getIntegerVT(*DAG.getContext(), ExtraWidth); 1217 SDValue Lo, Hi; 1218 unsigned IncrementSize; 1219 1220 if (TLI.isLittleEndian()) { 1221 // TRUNCSTORE:i24 X -> TRUNCSTORE:i16 X, TRUNCSTORE@+2:i8 (srl X, 16) 1222 // Store the bottom RoundWidth bits. 1223 Lo = DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(), 1224 RoundVT, 1225 isVolatile, isNonTemporal, Alignment); 1226 1227 // Store the remaining ExtraWidth bits. 1228 IncrementSize = RoundWidth / 8; 1229 Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2, 1230 DAG.getIntPtrConstant(IncrementSize)); 1231 Hi = DAG.getNode(ISD::SRL, dl, Tmp3.getValueType(), Tmp3, 1232 DAG.getConstant(RoundWidth, 1233 TLI.getShiftAmountTy(Tmp3.getValueType()))); 1234 Hi = DAG.getTruncStore(Tmp1, dl, Hi, Tmp2, 1235 ST->getPointerInfo().getWithOffset(IncrementSize), 1236 ExtraVT, isVolatile, isNonTemporal, 1237 MinAlign(Alignment, IncrementSize)); 1238 } else { 1239 // Big endian - avoid unaligned stores. 1240 // TRUNCSTORE:i24 X -> TRUNCSTORE:i16 (srl X, 8), TRUNCSTORE@+2:i8 X 1241 // Store the top RoundWidth bits. 1242 Hi = DAG.getNode(ISD::SRL, dl, Tmp3.getValueType(), Tmp3, 1243 DAG.getConstant(ExtraWidth, 1244 TLI.getShiftAmountTy(Tmp3.getValueType()))); 1245 Hi = DAG.getTruncStore(Tmp1, dl, Hi, Tmp2, ST->getPointerInfo(), 1246 RoundVT, isVolatile, isNonTemporal, Alignment); 1247 1248 // Store the remaining ExtraWidth bits. 1249 IncrementSize = RoundWidth / 8; 1250 Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2, 1251 DAG.getIntPtrConstant(IncrementSize)); 1252 Lo = DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2, 1253 ST->getPointerInfo().getWithOffset(IncrementSize), 1254 ExtraVT, isVolatile, isNonTemporal, 1255 MinAlign(Alignment, IncrementSize)); 1256 } 1257 1258 // The order of the stores doesn't matter. 1259 SDValue Result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo, Hi); 1260 DAG.ReplaceAllUsesWith(SDValue(Node, 0), Result, this); 1261 } else { 1262 if (Tmp1 != ST->getChain() || Tmp3 != ST->getValue() || 1263 Tmp2 != ST->getBasePtr()) 1264 Node = DAG.UpdateNodeOperands(Node, Tmp1, Tmp3, Tmp2, 1265 ST->getOffset()); 1266 1267 switch (TLI.getTruncStoreAction(ST->getValue().getValueType(), StVT)) { 1268 default: assert(0 && "This action is not supported yet!"); 1269 case TargetLowering::Legal: 1270 // If this is an unaligned store and the target doesn't support it, 1271 // expand it. 1272 if (!TLI.allowsUnalignedMemoryAccesses(ST->getMemoryVT())) { 1273 Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext()); 1274 unsigned ABIAlignment= TLI.getTargetData()->getABITypeAlignment(Ty); 1275 if (ST->getAlignment() < ABIAlignment) 1276 ExpandUnalignedStore(cast<StoreSDNode>(Node), DAG, TLI, this); 1277 } 1278 break; 1279 case TargetLowering::Custom: 1280 DAG.ReplaceAllUsesWith(SDValue(Node, 0), 1281 TLI.LowerOperation(SDValue(Node, 0), DAG), 1282 this); 1283 break; 1284 case TargetLowering::Expand: 1285 assert(!StVT.isVector() && 1286 "Vector Stores are handled in LegalizeVectorOps"); 1287 1288 // TRUNCSTORE:i16 i32 -> STORE i16 1289 assert(TLI.isTypeLegal(StVT) && "Do not know how to expand this store!"); 1290 Tmp3 = DAG.getNode(ISD::TRUNCATE, dl, StVT, Tmp3); 1291 SDValue Result = 1292 DAG.getStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(), 1293 isVolatile, isNonTemporal, Alignment); 1294 DAG.ReplaceAllUsesWith(SDValue(Node, 0), Result, this); 1295 break; 1296 } 1297 } 1298 } 1299 break; 1300 } 1301 } 1302} 1303 1304SDValue SelectionDAGLegalize::ExpandExtractFromVectorThroughStack(SDValue Op) { 1305 SDValue Vec = Op.getOperand(0); 1306 SDValue Idx = Op.getOperand(1); 1307 DebugLoc dl = Op.getDebugLoc(); 1308 // Store the value to a temporary stack slot, then LOAD the returned part. 1309 SDValue StackPtr = DAG.CreateStackTemporary(Vec.getValueType()); 1310 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, 1311 MachinePointerInfo(), false, false, 0); 1312 1313 // Add the offset to the index. 1314 unsigned EltSize = 1315 Vec.getValueType().getVectorElementType().getSizeInBits()/8; 1316 Idx = DAG.getNode(ISD::MUL, dl, Idx.getValueType(), Idx, 1317 DAG.getConstant(EltSize, Idx.getValueType())); 1318 1319 if (Idx.getValueType().bitsGT(TLI.getPointerTy())) 1320 Idx = DAG.getNode(ISD::TRUNCATE, dl, TLI.getPointerTy(), Idx); 1321 else 1322 Idx = DAG.getNode(ISD::ZERO_EXTEND, dl, TLI.getPointerTy(), Idx); 1323 1324 StackPtr = DAG.getNode(ISD::ADD, dl, Idx.getValueType(), Idx, StackPtr); 1325 1326 if (Op.getValueType().isVector()) 1327 return DAG.getLoad(Op.getValueType(), dl, Ch, StackPtr,MachinePointerInfo(), 1328 false, false, 0); 1329 return DAG.getExtLoad(ISD::EXTLOAD, dl, Op.getValueType(), Ch, StackPtr, 1330 MachinePointerInfo(), 1331 Vec.getValueType().getVectorElementType(), 1332 false, false, 0); 1333} 1334 1335SDValue SelectionDAGLegalize::ExpandInsertToVectorThroughStack(SDValue Op) { 1336 assert(Op.getValueType().isVector() && "Non-vector insert subvector!"); 1337 1338 SDValue Vec = Op.getOperand(0); 1339 SDValue Part = Op.getOperand(1); 1340 SDValue Idx = Op.getOperand(2); 1341 DebugLoc dl = Op.getDebugLoc(); 1342 1343 // Store the value to a temporary stack slot, then LOAD the returned part. 1344 1345 SDValue StackPtr = DAG.CreateStackTemporary(Vec.getValueType()); 1346 int FI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex(); 1347 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(FI); 1348 1349 // First store the whole vector. 1350 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, PtrInfo, 1351 false, false, 0); 1352 1353 // Then store the inserted part. 1354 1355 // Add the offset to the index. 1356 unsigned EltSize = 1357 Vec.getValueType().getVectorElementType().getSizeInBits()/8; 1358 1359 Idx = DAG.getNode(ISD::MUL, dl, Idx.getValueType(), Idx, 1360 DAG.getConstant(EltSize, Idx.getValueType())); 1361 1362 if (Idx.getValueType().bitsGT(TLI.getPointerTy())) 1363 Idx = DAG.getNode(ISD::TRUNCATE, dl, TLI.getPointerTy(), Idx); 1364 else 1365 Idx = DAG.getNode(ISD::ZERO_EXTEND, dl, TLI.getPointerTy(), Idx); 1366 1367 SDValue SubStackPtr = DAG.getNode(ISD::ADD, dl, Idx.getValueType(), Idx, 1368 StackPtr); 1369 1370 // Store the subvector. 1371 Ch = DAG.getStore(DAG.getEntryNode(), dl, Part, SubStackPtr, 1372 MachinePointerInfo(), false, false, 0); 1373 1374 // Finally, load the updated vector. 1375 return DAG.getLoad(Op.getValueType(), dl, Ch, StackPtr, PtrInfo, 1376 false, false, 0); 1377} 1378 1379SDValue SelectionDAGLegalize::ExpandVectorBuildThroughStack(SDNode* Node) { 1380 // We can't handle this case efficiently. Allocate a sufficiently 1381 // aligned object on the stack, store each element into it, then load 1382 // the result as a vector. 1383 // Create the stack frame object. 1384 EVT VT = Node->getValueType(0); 1385 EVT EltVT = VT.getVectorElementType(); 1386 DebugLoc dl = Node->getDebugLoc(); 1387 SDValue FIPtr = DAG.CreateStackTemporary(VT); 1388 int FI = cast<FrameIndexSDNode>(FIPtr.getNode())->getIndex(); 1389 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(FI); 1390 1391 // Emit a store of each element to the stack slot. 1392 SmallVector<SDValue, 8> Stores; 1393 unsigned TypeByteSize = EltVT.getSizeInBits() / 8; 1394 // Store (in the right endianness) the elements to memory. 1395 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) { 1396 // Ignore undef elements. 1397 if (Node->getOperand(i).getOpcode() == ISD::UNDEF) continue; 1398 1399 unsigned Offset = TypeByteSize*i; 1400 1401 SDValue Idx = DAG.getConstant(Offset, FIPtr.getValueType()); 1402 Idx = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr, Idx); 1403 1404 // If the destination vector element type is narrower than the source 1405 // element type, only store the bits necessary. 1406 if (EltVT.bitsLT(Node->getOperand(i).getValueType().getScalarType())) { 1407 Stores.push_back(DAG.getTruncStore(DAG.getEntryNode(), dl, 1408 Node->getOperand(i), Idx, 1409 PtrInfo.getWithOffset(Offset), 1410 EltVT, false, false, 0)); 1411 } else 1412 Stores.push_back(DAG.getStore(DAG.getEntryNode(), dl, 1413 Node->getOperand(i), Idx, 1414 PtrInfo.getWithOffset(Offset), 1415 false, false, 0)); 1416 } 1417 1418 SDValue StoreChain; 1419 if (!Stores.empty()) // Not all undef elements? 1420 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 1421 &Stores[0], Stores.size()); 1422 else 1423 StoreChain = DAG.getEntryNode(); 1424 1425 // Result is a load from the stack slot. 1426 return DAG.getLoad(VT, dl, StoreChain, FIPtr, PtrInfo, false, false, 0); 1427} 1428 1429SDValue SelectionDAGLegalize::ExpandFCOPYSIGN(SDNode* Node) { 1430 DebugLoc dl = Node->getDebugLoc(); 1431 SDValue Tmp1 = Node->getOperand(0); 1432 SDValue Tmp2 = Node->getOperand(1); 1433 1434 // Get the sign bit of the RHS. First obtain a value that has the same 1435 // sign as the sign bit, i.e. negative if and only if the sign bit is 1. 1436 SDValue SignBit; 1437 EVT FloatVT = Tmp2.getValueType(); 1438 EVT IVT = EVT::getIntegerVT(*DAG.getContext(), FloatVT.getSizeInBits()); 1439 if (TLI.isTypeLegal(IVT)) { 1440 // Convert to an integer with the same sign bit. 1441 SignBit = DAG.getNode(ISD::BITCAST, dl, IVT, Tmp2); 1442 } else { 1443 // Store the float to memory, then load the sign part out as an integer. 1444 MVT LoadTy = TLI.getPointerTy(); 1445 // First create a temporary that is aligned for both the load and store. 1446 SDValue StackPtr = DAG.CreateStackTemporary(FloatVT, LoadTy); 1447 // Then store the float to it. 1448 SDValue Ch = 1449 DAG.getStore(DAG.getEntryNode(), dl, Tmp2, StackPtr, MachinePointerInfo(), 1450 false, false, 0); 1451 if (TLI.isBigEndian()) { 1452 assert(FloatVT.isByteSized() && "Unsupported floating point type!"); 1453 // Load out a legal integer with the same sign bit as the float. 1454 SignBit = DAG.getLoad(LoadTy, dl, Ch, StackPtr, MachinePointerInfo(), 1455 false, false, 0); 1456 } else { // Little endian 1457 SDValue LoadPtr = StackPtr; 1458 // The float may be wider than the integer we are going to load. Advance 1459 // the pointer so that the loaded integer will contain the sign bit. 1460 unsigned Strides = (FloatVT.getSizeInBits()-1)/LoadTy.getSizeInBits(); 1461 unsigned ByteOffset = (Strides * LoadTy.getSizeInBits()) / 8; 1462 LoadPtr = DAG.getNode(ISD::ADD, dl, LoadPtr.getValueType(), 1463 LoadPtr, DAG.getIntPtrConstant(ByteOffset)); 1464 // Load a legal integer containing the sign bit. 1465 SignBit = DAG.getLoad(LoadTy, dl, Ch, LoadPtr, MachinePointerInfo(), 1466 false, false, 0); 1467 // Move the sign bit to the top bit of the loaded integer. 1468 unsigned BitShift = LoadTy.getSizeInBits() - 1469 (FloatVT.getSizeInBits() - 8 * ByteOffset); 1470 assert(BitShift < LoadTy.getSizeInBits() && "Pointer advanced wrong?"); 1471 if (BitShift) 1472 SignBit = DAG.getNode(ISD::SHL, dl, LoadTy, SignBit, 1473 DAG.getConstant(BitShift, 1474 TLI.getShiftAmountTy(SignBit.getValueType()))); 1475 } 1476 } 1477 // Now get the sign bit proper, by seeing whether the value is negative. 1478 SignBit = DAG.getSetCC(dl, TLI.getSetCCResultType(SignBit.getValueType()), 1479 SignBit, DAG.getConstant(0, SignBit.getValueType()), 1480 ISD::SETLT); 1481 // Get the absolute value of the result. 1482 SDValue AbsVal = DAG.getNode(ISD::FABS, dl, Tmp1.getValueType(), Tmp1); 1483 // Select between the nabs and abs value based on the sign bit of 1484 // the input. 1485 return DAG.getNode(ISD::SELECT, dl, AbsVal.getValueType(), SignBit, 1486 DAG.getNode(ISD::FNEG, dl, AbsVal.getValueType(), AbsVal), 1487 AbsVal); 1488} 1489 1490void SelectionDAGLegalize::ExpandDYNAMIC_STACKALLOC(SDNode* Node, 1491 SmallVectorImpl<SDValue> &Results) { 1492 unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore(); 1493 assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and" 1494 " not tell us which reg is the stack pointer!"); 1495 DebugLoc dl = Node->getDebugLoc(); 1496 EVT VT = Node->getValueType(0); 1497 SDValue Tmp1 = SDValue(Node, 0); 1498 SDValue Tmp2 = SDValue(Node, 1); 1499 SDValue Tmp3 = Node->getOperand(2); 1500 SDValue Chain = Tmp1.getOperand(0); 1501 1502 // Chain the dynamic stack allocation so that it doesn't modify the stack 1503 // pointer when other instructions are using the stack. 1504 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(0, true)); 1505 1506 SDValue Size = Tmp2.getOperand(1); 1507 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT); 1508 Chain = SP.getValue(1); 1509 unsigned Align = cast<ConstantSDNode>(Tmp3)->getZExtValue(); 1510 unsigned StackAlign = TM.getFrameLowering()->getStackAlignment(); 1511 if (Align > StackAlign) 1512 SP = DAG.getNode(ISD::AND, dl, VT, SP, 1513 DAG.getConstant(-(uint64_t)Align, VT)); 1514 Tmp1 = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value 1515 Chain = DAG.getCopyToReg(Chain, dl, SPReg, Tmp1); // Output chain 1516 1517 Tmp2 = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, true), 1518 DAG.getIntPtrConstant(0, true), SDValue()); 1519 1520 Results.push_back(Tmp1); 1521 Results.push_back(Tmp2); 1522} 1523 1524/// LegalizeSetCCCondCode - Legalize a SETCC with given LHS and RHS and 1525/// condition code CC on the current target. This routine expands SETCC with 1526/// illegal condition code into AND / OR of multiple SETCC values. 1527void SelectionDAGLegalize::LegalizeSetCCCondCode(EVT VT, 1528 SDValue &LHS, SDValue &RHS, 1529 SDValue &CC, 1530 DebugLoc dl) { 1531 EVT OpVT = LHS.getValueType(); 1532 ISD::CondCode CCCode = cast<CondCodeSDNode>(CC)->get(); 1533 switch (TLI.getCondCodeAction(CCCode, OpVT)) { 1534 default: assert(0 && "Unknown condition code action!"); 1535 case TargetLowering::Legal: 1536 // Nothing to do. 1537 break; 1538 case TargetLowering::Expand: { 1539 ISD::CondCode CC1 = ISD::SETCC_INVALID, CC2 = ISD::SETCC_INVALID; 1540 unsigned Opc = 0; 1541 switch (CCCode) { 1542 default: assert(0 && "Don't know how to expand this condition!"); 1543 case ISD::SETOEQ: CC1 = ISD::SETEQ; CC2 = ISD::SETO; Opc = ISD::AND; break; 1544 case ISD::SETOGT: CC1 = ISD::SETGT; CC2 = ISD::SETO; Opc = ISD::AND; break; 1545 case ISD::SETOGE: CC1 = ISD::SETGE; CC2 = ISD::SETO; Opc = ISD::AND; break; 1546 case ISD::SETOLT: CC1 = ISD::SETLT; CC2 = ISD::SETO; Opc = ISD::AND; break; 1547 case ISD::SETOLE: CC1 = ISD::SETLE; CC2 = ISD::SETO; Opc = ISD::AND; break; 1548 case ISD::SETONE: CC1 = ISD::SETNE; CC2 = ISD::SETO; Opc = ISD::AND; break; 1549 case ISD::SETUEQ: CC1 = ISD::SETEQ; CC2 = ISD::SETUO; Opc = ISD::OR; break; 1550 case ISD::SETUGT: CC1 = ISD::SETGT; CC2 = ISD::SETUO; Opc = ISD::OR; break; 1551 case ISD::SETUGE: CC1 = ISD::SETGE; CC2 = ISD::SETUO; Opc = ISD::OR; break; 1552 case ISD::SETULT: CC1 = ISD::SETLT; CC2 = ISD::SETUO; Opc = ISD::OR; break; 1553 case ISD::SETULE: CC1 = ISD::SETLE; CC2 = ISD::SETUO; Opc = ISD::OR; break; 1554 case ISD::SETUNE: CC1 = ISD::SETNE; CC2 = ISD::SETUO; Opc = ISD::OR; break; 1555 // FIXME: Implement more expansions. 1556 } 1557 1558 SDValue SetCC1 = DAG.getSetCC(dl, VT, LHS, RHS, CC1); 1559 SDValue SetCC2 = DAG.getSetCC(dl, VT, LHS, RHS, CC2); 1560 LHS = DAG.getNode(Opc, dl, VT, SetCC1, SetCC2); 1561 RHS = SDValue(); 1562 CC = SDValue(); 1563 break; 1564 } 1565 } 1566} 1567 1568/// EmitStackConvert - Emit a store/load combination to the stack. This stores 1569/// SrcOp to a stack slot of type SlotVT, truncating it if needed. It then does 1570/// a load from the stack slot to DestVT, extending it if needed. 1571/// The resultant code need not be legal. 1572SDValue SelectionDAGLegalize::EmitStackConvert(SDValue SrcOp, 1573 EVT SlotVT, 1574 EVT DestVT, 1575 DebugLoc dl) { 1576 // Create the stack frame object. 1577 unsigned SrcAlign = 1578 TLI.getTargetData()->getPrefTypeAlignment(SrcOp.getValueType(). 1579 getTypeForEVT(*DAG.getContext())); 1580 SDValue FIPtr = DAG.CreateStackTemporary(SlotVT, SrcAlign); 1581 1582 FrameIndexSDNode *StackPtrFI = cast<FrameIndexSDNode>(FIPtr); 1583 int SPFI = StackPtrFI->getIndex(); 1584 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(SPFI); 1585 1586 unsigned SrcSize = SrcOp.getValueType().getSizeInBits(); 1587 unsigned SlotSize = SlotVT.getSizeInBits(); 1588 unsigned DestSize = DestVT.getSizeInBits(); 1589 Type *DestType = DestVT.getTypeForEVT(*DAG.getContext()); 1590 unsigned DestAlign = TLI.getTargetData()->getPrefTypeAlignment(DestType); 1591 1592 // Emit a store to the stack slot. Use a truncstore if the input value is 1593 // later than DestVT. 1594 SDValue Store; 1595 1596 if (SrcSize > SlotSize) 1597 Store = DAG.getTruncStore(DAG.getEntryNode(), dl, SrcOp, FIPtr, 1598 PtrInfo, SlotVT, false, false, SrcAlign); 1599 else { 1600 assert(SrcSize == SlotSize && "Invalid store"); 1601 Store = DAG.getStore(DAG.getEntryNode(), dl, SrcOp, FIPtr, 1602 PtrInfo, false, false, SrcAlign); 1603 } 1604 1605 // Result is a load from the stack slot. 1606 if (SlotSize == DestSize) 1607 return DAG.getLoad(DestVT, dl, Store, FIPtr, PtrInfo, 1608 false, false, DestAlign); 1609 1610 assert(SlotSize < DestSize && "Unknown extension!"); 1611 return DAG.getExtLoad(ISD::EXTLOAD, dl, DestVT, Store, FIPtr, 1612 PtrInfo, SlotVT, false, false, DestAlign); 1613} 1614 1615SDValue SelectionDAGLegalize::ExpandSCALAR_TO_VECTOR(SDNode *Node) { 1616 DebugLoc dl = Node->getDebugLoc(); 1617 // Create a vector sized/aligned stack slot, store the value to element #0, 1618 // then load the whole vector back out. 1619 SDValue StackPtr = DAG.CreateStackTemporary(Node->getValueType(0)); 1620 1621 FrameIndexSDNode *StackPtrFI = cast<FrameIndexSDNode>(StackPtr); 1622 int SPFI = StackPtrFI->getIndex(); 1623 1624 SDValue Ch = DAG.getTruncStore(DAG.getEntryNode(), dl, Node->getOperand(0), 1625 StackPtr, 1626 MachinePointerInfo::getFixedStack(SPFI), 1627 Node->getValueType(0).getVectorElementType(), 1628 false, false, 0); 1629 return DAG.getLoad(Node->getValueType(0), dl, Ch, StackPtr, 1630 MachinePointerInfo::getFixedStack(SPFI), 1631 false, false, 0); 1632} 1633 1634 1635/// ExpandBUILD_VECTOR - Expand a BUILD_VECTOR node on targets that don't 1636/// support the operation, but do support the resultant vector type. 1637SDValue SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) { 1638 unsigned NumElems = Node->getNumOperands(); 1639 SDValue Value1, Value2; 1640 DebugLoc dl = Node->getDebugLoc(); 1641 EVT VT = Node->getValueType(0); 1642 EVT OpVT = Node->getOperand(0).getValueType(); 1643 EVT EltVT = VT.getVectorElementType(); 1644 1645 // If the only non-undef value is the low element, turn this into a 1646 // SCALAR_TO_VECTOR node. If this is { X, X, X, X }, determine X. 1647 bool isOnlyLowElement = true; 1648 bool MoreThanTwoValues = false; 1649 bool isConstant = true; 1650 for (unsigned i = 0; i < NumElems; ++i) { 1651 SDValue V = Node->getOperand(i); 1652 if (V.getOpcode() == ISD::UNDEF) 1653 continue; 1654 if (i > 0) 1655 isOnlyLowElement = false; 1656 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V)) 1657 isConstant = false; 1658 1659 if (!Value1.getNode()) { 1660 Value1 = V; 1661 } else if (!Value2.getNode()) { 1662 if (V != Value1) 1663 Value2 = V; 1664 } else if (V != Value1 && V != Value2) { 1665 MoreThanTwoValues = true; 1666 } 1667 } 1668 1669 if (!Value1.getNode()) 1670 return DAG.getUNDEF(VT); 1671 1672 if (isOnlyLowElement) 1673 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Node->getOperand(0)); 1674 1675 // If all elements are constants, create a load from the constant pool. 1676 if (isConstant) { 1677 std::vector<Constant*> CV; 1678 for (unsigned i = 0, e = NumElems; i != e; ++i) { 1679 if (ConstantFPSDNode *V = 1680 dyn_cast<ConstantFPSDNode>(Node->getOperand(i))) { 1681 CV.push_back(const_cast<ConstantFP *>(V->getConstantFPValue())); 1682 } else if (ConstantSDNode *V = 1683 dyn_cast<ConstantSDNode>(Node->getOperand(i))) { 1684 if (OpVT==EltVT) 1685 CV.push_back(const_cast<ConstantInt *>(V->getConstantIntValue())); 1686 else { 1687 // If OpVT and EltVT don't match, EltVT is not legal and the 1688 // element values have been promoted/truncated earlier. Undo this; 1689 // we don't want a v16i8 to become a v16i32 for example. 1690 const ConstantInt *CI = V->getConstantIntValue(); 1691 CV.push_back(ConstantInt::get(EltVT.getTypeForEVT(*DAG.getContext()), 1692 CI->getZExtValue())); 1693 } 1694 } else { 1695 assert(Node->getOperand(i).getOpcode() == ISD::UNDEF); 1696 Type *OpNTy = EltVT.getTypeForEVT(*DAG.getContext()); 1697 CV.push_back(UndefValue::get(OpNTy)); 1698 } 1699 } 1700 Constant *CP = ConstantVector::get(CV); 1701 SDValue CPIdx = DAG.getConstantPool(CP, TLI.getPointerTy()); 1702 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment(); 1703 return DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 1704 MachinePointerInfo::getConstantPool(), 1705 false, false, Alignment); 1706 } 1707 1708 if (!MoreThanTwoValues) { 1709 SmallVector<int, 8> ShuffleVec(NumElems, -1); 1710 for (unsigned i = 0; i < NumElems; ++i) { 1711 SDValue V = Node->getOperand(i); 1712 if (V.getOpcode() == ISD::UNDEF) 1713 continue; 1714 ShuffleVec[i] = V == Value1 ? 0 : NumElems; 1715 } 1716 if (TLI.isShuffleMaskLegal(ShuffleVec, Node->getValueType(0))) { 1717 // Get the splatted value into the low element of a vector register. 1718 SDValue Vec1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value1); 1719 SDValue Vec2; 1720 if (Value2.getNode()) 1721 Vec2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value2); 1722 else 1723 Vec2 = DAG.getUNDEF(VT); 1724 1725 // Return shuffle(LowValVec, undef, <0,0,0,0>) 1726 return DAG.getVectorShuffle(VT, dl, Vec1, Vec2, ShuffleVec.data()); 1727 } 1728 } 1729 1730 // Otherwise, we can't handle this case efficiently. 1731 return ExpandVectorBuildThroughStack(Node); 1732} 1733 1734// ExpandLibCall - Expand a node into a call to a libcall. If the result value 1735// does not fit into a register, return the lo part and set the hi part to the 1736// by-reg argument. If it does fit into a single register, return the result 1737// and leave the Hi part unset. 1738SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, SDNode *Node, 1739 bool isSigned) { 1740 // The input chain to this libcall is the entry node of the function. 1741 // Legalizing the call will automatically add the previous call to the 1742 // dependence. 1743 SDValue InChain = DAG.getEntryNode(); 1744 1745 TargetLowering::ArgListTy Args; 1746 TargetLowering::ArgListEntry Entry; 1747 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) { 1748 EVT ArgVT = Node->getOperand(i).getValueType(); 1749 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); 1750 Entry.Node = Node->getOperand(i); Entry.Ty = ArgTy; 1751 Entry.isSExt = isSigned; 1752 Entry.isZExt = !isSigned; 1753 Args.push_back(Entry); 1754 } 1755 SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC), 1756 TLI.getPointerTy()); 1757 1758 Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext()); 1759 1760 // isTailCall may be true since the callee does not reference caller stack 1761 // frame. Check if it's in the right position. 1762 bool isTailCall = isInTailCallPosition(DAG, Node, TLI); 1763 std::pair<SDValue, SDValue> CallInfo = 1764 TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false, 1765 0, TLI.getLibcallCallingConv(LC), isTailCall, 1766 /*isReturnValueUsed=*/true, 1767 Callee, Args, DAG, Node->getDebugLoc()); 1768 1769 if (!CallInfo.second.getNode()) 1770 // It's a tailcall, return the chain (which is the DAG root). 1771 return DAG.getRoot(); 1772 1773 return CallInfo.first; 1774} 1775 1776/// ExpandLibCall - Generate a libcall taking the given operands as arguments 1777/// and returning a result of type RetVT. 1778SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, EVT RetVT, 1779 const SDValue *Ops, unsigned NumOps, 1780 bool isSigned, DebugLoc dl) { 1781 TargetLowering::ArgListTy Args; 1782 Args.reserve(NumOps); 1783 1784 TargetLowering::ArgListEntry Entry; 1785 for (unsigned i = 0; i != NumOps; ++i) { 1786 Entry.Node = Ops[i]; 1787 Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.getContext()); 1788 Entry.isSExt = isSigned; 1789 Entry.isZExt = !isSigned; 1790 Args.push_back(Entry); 1791 } 1792 SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC), 1793 TLI.getPointerTy()); 1794 1795 Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext()); 1796 std::pair<SDValue,SDValue> CallInfo = 1797 TLI.LowerCallTo(DAG.getEntryNode(), RetTy, isSigned, !isSigned, false, 1798 false, 0, TLI.getLibcallCallingConv(LC), false, 1799 /*isReturnValueUsed=*/true, 1800 Callee, Args, DAG, dl); 1801 1802 return CallInfo.first; 1803} 1804 1805// ExpandChainLibCall - Expand a node into a call to a libcall. Similar to 1806// ExpandLibCall except that the first operand is the in-chain. 1807std::pair<SDValue, SDValue> 1808SelectionDAGLegalize::ExpandChainLibCall(RTLIB::Libcall LC, 1809 SDNode *Node, 1810 bool isSigned) { 1811 SDValue InChain = Node->getOperand(0); 1812 1813 TargetLowering::ArgListTy Args; 1814 TargetLowering::ArgListEntry Entry; 1815 for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i) { 1816 EVT ArgVT = Node->getOperand(i).getValueType(); 1817 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); 1818 Entry.Node = Node->getOperand(i); 1819 Entry.Ty = ArgTy; 1820 Entry.isSExt = isSigned; 1821 Entry.isZExt = !isSigned; 1822 Args.push_back(Entry); 1823 } 1824 SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC), 1825 TLI.getPointerTy()); 1826 1827 Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext()); 1828 std::pair<SDValue, SDValue> CallInfo = 1829 TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false, 1830 0, TLI.getLibcallCallingConv(LC), /*isTailCall=*/false, 1831 /*isReturnValueUsed=*/true, 1832 Callee, Args, DAG, Node->getDebugLoc()); 1833 1834 return CallInfo; 1835} 1836 1837SDValue SelectionDAGLegalize::ExpandFPLibCall(SDNode* Node, 1838 RTLIB::Libcall Call_F32, 1839 RTLIB::Libcall Call_F64, 1840 RTLIB::Libcall Call_F80, 1841 RTLIB::Libcall Call_PPCF128) { 1842 RTLIB::Libcall LC; 1843 switch (Node->getValueType(0).getSimpleVT().SimpleTy) { 1844 default: assert(0 && "Unexpected request for libcall!"); 1845 case MVT::f32: LC = Call_F32; break; 1846 case MVT::f64: LC = Call_F64; break; 1847 case MVT::f80: LC = Call_F80; break; 1848 case MVT::ppcf128: LC = Call_PPCF128; break; 1849 } 1850 return ExpandLibCall(LC, Node, false); 1851} 1852 1853SDValue SelectionDAGLegalize::ExpandIntLibCall(SDNode* Node, bool isSigned, 1854 RTLIB::Libcall Call_I8, 1855 RTLIB::Libcall Call_I16, 1856 RTLIB::Libcall Call_I32, 1857 RTLIB::Libcall Call_I64, 1858 RTLIB::Libcall Call_I128) { 1859 RTLIB::Libcall LC; 1860 switch (Node->getValueType(0).getSimpleVT().SimpleTy) { 1861 default: assert(0 && "Unexpected request for libcall!"); 1862 case MVT::i8: LC = Call_I8; break; 1863 case MVT::i16: LC = Call_I16; break; 1864 case MVT::i32: LC = Call_I32; break; 1865 case MVT::i64: LC = Call_I64; break; 1866 case MVT::i128: LC = Call_I128; break; 1867 } 1868 return ExpandLibCall(LC, Node, isSigned); 1869} 1870 1871/// isDivRemLibcallAvailable - Return true if divmod libcall is available. 1872static bool isDivRemLibcallAvailable(SDNode *Node, bool isSigned, 1873 const TargetLowering &TLI) { 1874 RTLIB::Libcall LC; 1875 switch (Node->getValueType(0).getSimpleVT().SimpleTy) { 1876 default: assert(0 && "Unexpected request for libcall!"); 1877 case MVT::i8: LC= isSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8; break; 1878 case MVT::i16: LC= isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break; 1879 case MVT::i32: LC= isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break; 1880 case MVT::i64: LC= isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break; 1881 case MVT::i128: LC= isSigned ? RTLIB::SDIVREM_I128:RTLIB::UDIVREM_I128; break; 1882 } 1883 1884 return TLI.getLibcallName(LC) != 0; 1885} 1886 1887/// UseDivRem - Only issue divrem libcall if both quotient and remainder are 1888/// needed. 1889static bool UseDivRem(SDNode *Node, bool isSigned, bool isDIV) { 1890 unsigned OtherOpcode = 0; 1891 if (isSigned) 1892 OtherOpcode = isDIV ? ISD::SREM : ISD::SDIV; 1893 else 1894 OtherOpcode = isDIV ? ISD::UREM : ISD::UDIV; 1895 1896 SDValue Op0 = Node->getOperand(0); 1897 SDValue Op1 = Node->getOperand(1); 1898 for (SDNode::use_iterator UI = Op0.getNode()->use_begin(), 1899 UE = Op0.getNode()->use_end(); UI != UE; ++UI) { 1900 SDNode *User = *UI; 1901 if (User == Node) 1902 continue; 1903 if (User->getOpcode() == OtherOpcode && 1904 User->getOperand(0) == Op0 && 1905 User->getOperand(1) == Op1) 1906 return true; 1907 } 1908 return false; 1909} 1910 1911/// ExpandDivRemLibCall - Issue libcalls to __{u}divmod to compute div / rem 1912/// pairs. 1913void 1914SelectionDAGLegalize::ExpandDivRemLibCall(SDNode *Node, 1915 SmallVectorImpl<SDValue> &Results) { 1916 unsigned Opcode = Node->getOpcode(); 1917 bool isSigned = Opcode == ISD::SDIVREM; 1918 1919 RTLIB::Libcall LC; 1920 switch (Node->getValueType(0).getSimpleVT().SimpleTy) { 1921 default: assert(0 && "Unexpected request for libcall!"); 1922 case MVT::i8: LC= isSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8; break; 1923 case MVT::i16: LC= isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break; 1924 case MVT::i32: LC= isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break; 1925 case MVT::i64: LC= isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break; 1926 case MVT::i128: LC= isSigned ? RTLIB::SDIVREM_I128:RTLIB::UDIVREM_I128; break; 1927 } 1928 1929 // The input chain to this libcall is the entry node of the function. 1930 // Legalizing the call will automatically add the previous call to the 1931 // dependence. 1932 SDValue InChain = DAG.getEntryNode(); 1933 1934 EVT RetVT = Node->getValueType(0); 1935 Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext()); 1936 1937 TargetLowering::ArgListTy Args; 1938 TargetLowering::ArgListEntry Entry; 1939 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) { 1940 EVT ArgVT = Node->getOperand(i).getValueType(); 1941 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); 1942 Entry.Node = Node->getOperand(i); Entry.Ty = ArgTy; 1943 Entry.isSExt = isSigned; 1944 Entry.isZExt = !isSigned; 1945 Args.push_back(Entry); 1946 } 1947 1948 // Also pass the return address of the remainder. 1949 SDValue FIPtr = DAG.CreateStackTemporary(RetVT); 1950 Entry.Node = FIPtr; 1951 Entry.Ty = RetTy->getPointerTo(); 1952 Entry.isSExt = isSigned; 1953 Entry.isZExt = !isSigned; 1954 Args.push_back(Entry); 1955 1956 SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC), 1957 TLI.getPointerTy()); 1958 1959 DebugLoc dl = Node->getDebugLoc(); 1960 std::pair<SDValue, SDValue> CallInfo = 1961 TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false, 1962 0, TLI.getLibcallCallingConv(LC), /*isTailCall=*/false, 1963 /*isReturnValueUsed=*/true, Callee, Args, DAG, dl); 1964 1965 // Remainder is loaded back from the stack frame. 1966 SDValue Rem = DAG.getLoad(RetVT, dl, CallInfo.second, FIPtr, 1967 MachinePointerInfo(), false, false, 0); 1968 Results.push_back(CallInfo.first); 1969 Results.push_back(Rem); 1970} 1971 1972/// ExpandLegalINT_TO_FP - This function is responsible for legalizing a 1973/// INT_TO_FP operation of the specified operand when the target requests that 1974/// we expand it. At this point, we know that the result and operand types are 1975/// legal for the target. 1976SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned, 1977 SDValue Op0, 1978 EVT DestVT, 1979 DebugLoc dl) { 1980 if (Op0.getValueType() == MVT::i32) { 1981 // simple 32-bit [signed|unsigned] integer to float/double expansion 1982 1983 // Get the stack frame index of a 8 byte buffer. 1984 SDValue StackSlot = DAG.CreateStackTemporary(MVT::f64); 1985 1986 // word offset constant for Hi/Lo address computation 1987 SDValue WordOff = DAG.getConstant(sizeof(int), TLI.getPointerTy()); 1988 // set up Hi and Lo (into buffer) address based on endian 1989 SDValue Hi = StackSlot; 1990 SDValue Lo = DAG.getNode(ISD::ADD, dl, 1991 TLI.getPointerTy(), StackSlot, WordOff); 1992 if (TLI.isLittleEndian()) 1993 std::swap(Hi, Lo); 1994 1995 // if signed map to unsigned space 1996 SDValue Op0Mapped; 1997 if (isSigned) { 1998 // constant used to invert sign bit (signed to unsigned mapping) 1999 SDValue SignBit = DAG.getConstant(0x80000000u, MVT::i32); 2000 Op0Mapped = DAG.getNode(ISD::XOR, dl, MVT::i32, Op0, SignBit); 2001 } else { 2002 Op0Mapped = Op0; 2003 } 2004 // store the lo of the constructed double - based on integer input 2005 SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, 2006 Op0Mapped, Lo, MachinePointerInfo(), 2007 false, false, 0); 2008 // initial hi portion of constructed double 2009 SDValue InitialHi = DAG.getConstant(0x43300000u, MVT::i32); 2010 // store the hi of the constructed double - biased exponent 2011 SDValue Store2 = DAG.getStore(Store1, dl, InitialHi, Hi, 2012 MachinePointerInfo(), 2013 false, false, 0); 2014 // load the constructed double 2015 SDValue Load = DAG.getLoad(MVT::f64, dl, Store2, StackSlot, 2016 MachinePointerInfo(), false, false, 0); 2017 // FP constant to bias correct the final result 2018 SDValue Bias = DAG.getConstantFP(isSigned ? 2019 BitsToDouble(0x4330000080000000ULL) : 2020 BitsToDouble(0x4330000000000000ULL), 2021 MVT::f64); 2022 // subtract the bias 2023 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Load, Bias); 2024 // final result 2025 SDValue Result; 2026 // handle final rounding 2027 if (DestVT == MVT::f64) { 2028 // do nothing 2029 Result = Sub; 2030 } else if (DestVT.bitsLT(MVT::f64)) { 2031 Result = DAG.getNode(ISD::FP_ROUND, dl, DestVT, Sub, 2032 DAG.getIntPtrConstant(0)); 2033 } else if (DestVT.bitsGT(MVT::f64)) { 2034 Result = DAG.getNode(ISD::FP_EXTEND, dl, DestVT, Sub); 2035 } 2036 return Result; 2037 } 2038 assert(!isSigned && "Legalize cannot Expand SINT_TO_FP for i64 yet"); 2039 // Code below here assumes !isSigned without checking again. 2040 2041 // Implementation of unsigned i64 to f64 following the algorithm in 2042 // __floatundidf in compiler_rt. This implementation has the advantage 2043 // of performing rounding correctly, both in the default rounding mode 2044 // and in all alternate rounding modes. 2045 // TODO: Generalize this for use with other types. 2046 if (Op0.getValueType() == MVT::i64 && DestVT == MVT::f64) { 2047 SDValue TwoP52 = 2048 DAG.getConstant(UINT64_C(0x4330000000000000), MVT::i64); 2049 SDValue TwoP84PlusTwoP52 = 2050 DAG.getConstantFP(BitsToDouble(UINT64_C(0x4530000000100000)), MVT::f64); 2051 SDValue TwoP84 = 2052 DAG.getConstant(UINT64_C(0x4530000000000000), MVT::i64); 2053 2054 SDValue Lo = DAG.getZeroExtendInReg(Op0, dl, MVT::i32); 2055 SDValue Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Op0, 2056 DAG.getConstant(32, MVT::i64)); 2057 SDValue LoOr = DAG.getNode(ISD::OR, dl, MVT::i64, Lo, TwoP52); 2058 SDValue HiOr = DAG.getNode(ISD::OR, dl, MVT::i64, Hi, TwoP84); 2059 SDValue LoFlt = DAG.getNode(ISD::BITCAST, dl, MVT::f64, LoOr); 2060 SDValue HiFlt = DAG.getNode(ISD::BITCAST, dl, MVT::f64, HiOr); 2061 SDValue HiSub = DAG.getNode(ISD::FSUB, dl, MVT::f64, HiFlt, 2062 TwoP84PlusTwoP52); 2063 return DAG.getNode(ISD::FADD, dl, MVT::f64, LoFlt, HiSub); 2064 } 2065 2066 // Implementation of unsigned i64 to f32. 2067 // TODO: Generalize this for use with other types. 2068 if (Op0.getValueType() == MVT::i64 && DestVT == MVT::f32) { 2069 // For unsigned conversions, convert them to signed conversions using the 2070 // algorithm from the x86_64 __floatundidf in compiler_rt. 2071 if (!isSigned) { 2072 SDValue Fast = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, Op0); 2073 2074 SDValue ShiftConst = 2075 DAG.getConstant(1, TLI.getShiftAmountTy(Op0.getValueType())); 2076 SDValue Shr = DAG.getNode(ISD::SRL, dl, MVT::i64, Op0, ShiftConst); 2077 SDValue AndConst = DAG.getConstant(1, MVT::i64); 2078 SDValue And = DAG.getNode(ISD::AND, dl, MVT::i64, Op0, AndConst); 2079 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::i64, And, Shr); 2080 2081 SDValue SignCvt = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, Or); 2082 SDValue Slow = DAG.getNode(ISD::FADD, dl, MVT::f32, SignCvt, SignCvt); 2083 2084 // TODO: This really should be implemented using a branch rather than a 2085 // select. We happen to get lucky and machinesink does the right 2086 // thing most of the time. This would be a good candidate for a 2087 //pseudo-op, or, even better, for whole-function isel. 2088 SDValue SignBitTest = DAG.getSetCC(dl, TLI.getSetCCResultType(MVT::i64), 2089 Op0, DAG.getConstant(0, MVT::i64), ISD::SETLT); 2090 return DAG.getNode(ISD::SELECT, dl, MVT::f32, SignBitTest, Slow, Fast); 2091 } 2092 2093 // Otherwise, implement the fully general conversion. 2094 2095 SDValue And = DAG.getNode(ISD::AND, dl, MVT::i64, Op0, 2096 DAG.getConstant(UINT64_C(0xfffffffffffff800), MVT::i64)); 2097 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::i64, And, 2098 DAG.getConstant(UINT64_C(0x800), MVT::i64)); 2099 SDValue And2 = DAG.getNode(ISD::AND, dl, MVT::i64, Op0, 2100 DAG.getConstant(UINT64_C(0x7ff), MVT::i64)); 2101 SDValue Ne = DAG.getSetCC(dl, TLI.getSetCCResultType(MVT::i64), 2102 And2, DAG.getConstant(UINT64_C(0), MVT::i64), ISD::SETNE); 2103 SDValue Sel = DAG.getNode(ISD::SELECT, dl, MVT::i64, Ne, Or, Op0); 2104 SDValue Ge = DAG.getSetCC(dl, TLI.getSetCCResultType(MVT::i64), 2105 Op0, DAG.getConstant(UINT64_C(0x0020000000000000), MVT::i64), 2106 ISD::SETUGE); 2107 SDValue Sel2 = DAG.getNode(ISD::SELECT, dl, MVT::i64, Ge, Sel, Op0); 2108 EVT SHVT = TLI.getShiftAmountTy(Sel2.getValueType()); 2109 2110 SDValue Sh = DAG.getNode(ISD::SRL, dl, MVT::i64, Sel2, 2111 DAG.getConstant(32, SHVT)); 2112 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Sh); 2113 SDValue Fcvt = DAG.getNode(ISD::UINT_TO_FP, dl, MVT::f64, Trunc); 2114 SDValue TwoP32 = 2115 DAG.getConstantFP(BitsToDouble(UINT64_C(0x41f0000000000000)), MVT::f64); 2116 SDValue Fmul = DAG.getNode(ISD::FMUL, dl, MVT::f64, TwoP32, Fcvt); 2117 SDValue Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Sel2); 2118 SDValue Fcvt2 = DAG.getNode(ISD::UINT_TO_FP, dl, MVT::f64, Lo); 2119 SDValue Fadd = DAG.getNode(ISD::FADD, dl, MVT::f64, Fmul, Fcvt2); 2120 return DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, Fadd, 2121 DAG.getIntPtrConstant(0)); 2122 } 2123 2124 SDValue Tmp1 = DAG.getNode(ISD::SINT_TO_FP, dl, DestVT, Op0); 2125 2126 SDValue SignSet = DAG.getSetCC(dl, TLI.getSetCCResultType(Op0.getValueType()), 2127 Op0, DAG.getConstant(0, Op0.getValueType()), 2128 ISD::SETLT); 2129 SDValue Zero = DAG.getIntPtrConstant(0), Four = DAG.getIntPtrConstant(4); 2130 SDValue CstOffset = DAG.getNode(ISD::SELECT, dl, Zero.getValueType(), 2131 SignSet, Four, Zero); 2132 2133 // If the sign bit of the integer is set, the large number will be treated 2134 // as a negative number. To counteract this, the dynamic code adds an 2135 // offset depending on the data type. 2136 uint64_t FF; 2137 switch (Op0.getValueType().getSimpleVT().SimpleTy) { 2138 default: assert(0 && "Unsupported integer type!"); 2139 case MVT::i8 : FF = 0x43800000ULL; break; // 2^8 (as a float) 2140 case MVT::i16: FF = 0x47800000ULL; break; // 2^16 (as a float) 2141 case MVT::i32: FF = 0x4F800000ULL; break; // 2^32 (as a float) 2142 case MVT::i64: FF = 0x5F800000ULL; break; // 2^64 (as a float) 2143 } 2144 if (TLI.isLittleEndian()) FF <<= 32; 2145 Constant *FudgeFactor = ConstantInt::get( 2146 Type::getInt64Ty(*DAG.getContext()), FF); 2147 2148 SDValue CPIdx = DAG.getConstantPool(FudgeFactor, TLI.getPointerTy()); 2149 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment(); 2150 CPIdx = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), CPIdx, CstOffset); 2151 Alignment = std::min(Alignment, 4u); 2152 SDValue FudgeInReg; 2153 if (DestVT == MVT::f32) 2154 FudgeInReg = DAG.getLoad(MVT::f32, dl, DAG.getEntryNode(), CPIdx, 2155 MachinePointerInfo::getConstantPool(), 2156 false, false, Alignment); 2157 else { 2158 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, DestVT, 2159 DAG.getEntryNode(), CPIdx, 2160 MachinePointerInfo::getConstantPool(), 2161 MVT::f32, false, false, Alignment); 2162 HandleSDNode Handle(Load); 2163 LegalizeOp(Load.getNode()); 2164 FudgeInReg = Handle.getValue(); 2165 } 2166 2167 return DAG.getNode(ISD::FADD, dl, DestVT, Tmp1, FudgeInReg); 2168} 2169 2170/// PromoteLegalINT_TO_FP - This function is responsible for legalizing a 2171/// *INT_TO_FP operation of the specified operand when the target requests that 2172/// we promote it. At this point, we know that the result and operand types are 2173/// legal for the target, and that there is a legal UINT_TO_FP or SINT_TO_FP 2174/// operation that takes a larger input. 2175SDValue SelectionDAGLegalize::PromoteLegalINT_TO_FP(SDValue LegalOp, 2176 EVT DestVT, 2177 bool isSigned, 2178 DebugLoc dl) { 2179 // First step, figure out the appropriate *INT_TO_FP operation to use. 2180 EVT NewInTy = LegalOp.getValueType(); 2181 2182 unsigned OpToUse = 0; 2183 2184 // Scan for the appropriate larger type to use. 2185 while (1) { 2186 NewInTy = (MVT::SimpleValueType)(NewInTy.getSimpleVT().SimpleTy+1); 2187 assert(NewInTy.isInteger() && "Ran out of possibilities!"); 2188 2189 // If the target supports SINT_TO_FP of this type, use it. 2190 if (TLI.isOperationLegalOrCustom(ISD::SINT_TO_FP, NewInTy)) { 2191 OpToUse = ISD::SINT_TO_FP; 2192 break; 2193 } 2194 if (isSigned) continue; 2195 2196 // If the target supports UINT_TO_FP of this type, use it. 2197 if (TLI.isOperationLegalOrCustom(ISD::UINT_TO_FP, NewInTy)) { 2198 OpToUse = ISD::UINT_TO_FP; 2199 break; 2200 } 2201 2202 // Otherwise, try a larger type. 2203 } 2204 2205 // Okay, we found the operation and type to use. Zero extend our input to the 2206 // desired type then run the operation on it. 2207 return DAG.getNode(OpToUse, dl, DestVT, 2208 DAG.getNode(isSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, 2209 dl, NewInTy, LegalOp)); 2210} 2211 2212/// PromoteLegalFP_TO_INT - This function is responsible for legalizing a 2213/// FP_TO_*INT operation of the specified operand when the target requests that 2214/// we promote it. At this point, we know that the result and operand types are 2215/// legal for the target, and that there is a legal FP_TO_UINT or FP_TO_SINT 2216/// operation that returns a larger result. 2217SDValue SelectionDAGLegalize::PromoteLegalFP_TO_INT(SDValue LegalOp, 2218 EVT DestVT, 2219 bool isSigned, 2220 DebugLoc dl) { 2221 // First step, figure out the appropriate FP_TO*INT operation to use. 2222 EVT NewOutTy = DestVT; 2223 2224 unsigned OpToUse = 0; 2225 2226 // Scan for the appropriate larger type to use. 2227 while (1) { 2228 NewOutTy = (MVT::SimpleValueType)(NewOutTy.getSimpleVT().SimpleTy+1); 2229 assert(NewOutTy.isInteger() && "Ran out of possibilities!"); 2230 2231 if (TLI.isOperationLegalOrCustom(ISD::FP_TO_SINT, NewOutTy)) { 2232 OpToUse = ISD::FP_TO_SINT; 2233 break; 2234 } 2235 2236 if (TLI.isOperationLegalOrCustom(ISD::FP_TO_UINT, NewOutTy)) { 2237 OpToUse = ISD::FP_TO_UINT; 2238 break; 2239 } 2240 2241 // Otherwise, try a larger type. 2242 } 2243 2244 2245 // Okay, we found the operation and type to use. 2246 SDValue Operation = DAG.getNode(OpToUse, dl, NewOutTy, LegalOp); 2247 2248 // Truncate the result of the extended FP_TO_*INT operation to the desired 2249 // size. 2250 return DAG.getNode(ISD::TRUNCATE, dl, DestVT, Operation); 2251} 2252 2253/// ExpandBSWAP - Open code the operations for BSWAP of the specified operation. 2254/// 2255SDValue SelectionDAGLegalize::ExpandBSWAP(SDValue Op, DebugLoc dl) { 2256 EVT VT = Op.getValueType(); 2257 EVT SHVT = TLI.getShiftAmountTy(VT); 2258 SDValue Tmp1, Tmp2, Tmp3, Tmp4, Tmp5, Tmp6, Tmp7, Tmp8; 2259 switch (VT.getSimpleVT().SimpleTy) { 2260 default: assert(0 && "Unhandled Expand type in BSWAP!"); 2261 case MVT::i16: 2262 Tmp2 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(8, SHVT)); 2263 Tmp1 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(8, SHVT)); 2264 return DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 2265 case MVT::i32: 2266 Tmp4 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(24, SHVT)); 2267 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(8, SHVT)); 2268 Tmp2 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(8, SHVT)); 2269 Tmp1 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(24, SHVT)); 2270 Tmp3 = DAG.getNode(ISD::AND, dl, VT, Tmp3, DAG.getConstant(0xFF0000, VT)); 2271 Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, DAG.getConstant(0xFF00, VT)); 2272 Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp3); 2273 Tmp2 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp1); 2274 return DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp2); 2275 case MVT::i64: 2276 Tmp8 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(56, SHVT)); 2277 Tmp7 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(40, SHVT)); 2278 Tmp6 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(24, SHVT)); 2279 Tmp5 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(8, SHVT)); 2280 Tmp4 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(8, SHVT)); 2281 Tmp3 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(24, SHVT)); 2282 Tmp2 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(40, SHVT)); 2283 Tmp1 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(56, SHVT)); 2284 Tmp7 = DAG.getNode(ISD::AND, dl, VT, Tmp7, DAG.getConstant(255ULL<<48, VT)); 2285 Tmp6 = DAG.getNode(ISD::AND, dl, VT, Tmp6, DAG.getConstant(255ULL<<40, VT)); 2286 Tmp5 = DAG.getNode(ISD::AND, dl, VT, Tmp5, DAG.getConstant(255ULL<<32, VT)); 2287 Tmp4 = DAG.getNode(ISD::AND, dl, VT, Tmp4, DAG.getConstant(255ULL<<24, VT)); 2288 Tmp3 = DAG.getNode(ISD::AND, dl, VT, Tmp3, DAG.getConstant(255ULL<<16, VT)); 2289 Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, DAG.getConstant(255ULL<<8 , VT)); 2290 Tmp8 = DAG.getNode(ISD::OR, dl, VT, Tmp8, Tmp7); 2291 Tmp6 = DAG.getNode(ISD::OR, dl, VT, Tmp6, Tmp5); 2292 Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp3); 2293 Tmp2 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp1); 2294 Tmp8 = DAG.getNode(ISD::OR, dl, VT, Tmp8, Tmp6); 2295 Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp2); 2296 return DAG.getNode(ISD::OR, dl, VT, Tmp8, Tmp4); 2297 } 2298} 2299 2300/// SplatByte - Distribute ByteVal over NumBits bits. 2301// FIXME: Move this helper to a common place. 2302static APInt SplatByte(unsigned NumBits, uint8_t ByteVal) { 2303 APInt Val = APInt(NumBits, ByteVal); 2304 unsigned Shift = 8; 2305 for (unsigned i = NumBits; i > 8; i >>= 1) { 2306 Val = (Val << Shift) | Val; 2307 Shift <<= 1; 2308 } 2309 return Val; 2310} 2311 2312/// ExpandBitCount - Expand the specified bitcount instruction into operations. 2313/// 2314SDValue SelectionDAGLegalize::ExpandBitCount(unsigned Opc, SDValue Op, 2315 DebugLoc dl) { 2316 switch (Opc) { 2317 default: assert(0 && "Cannot expand this yet!"); 2318 case ISD::CTPOP: { 2319 EVT VT = Op.getValueType(); 2320 EVT ShVT = TLI.getShiftAmountTy(VT); 2321 unsigned Len = VT.getSizeInBits(); 2322 2323 assert(VT.isInteger() && Len <= 128 && Len % 8 == 0 && 2324 "CTPOP not implemented for this type."); 2325 2326 // This is the "best" algorithm from 2327 // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel 2328 2329 SDValue Mask55 = DAG.getConstant(SplatByte(Len, 0x55), VT); 2330 SDValue Mask33 = DAG.getConstant(SplatByte(Len, 0x33), VT); 2331 SDValue Mask0F = DAG.getConstant(SplatByte(Len, 0x0F), VT); 2332 SDValue Mask01 = DAG.getConstant(SplatByte(Len, 0x01), VT); 2333 2334 // v = v - ((v >> 1) & 0x55555555...) 2335 Op = DAG.getNode(ISD::SUB, dl, VT, Op, 2336 DAG.getNode(ISD::AND, dl, VT, 2337 DAG.getNode(ISD::SRL, dl, VT, Op, 2338 DAG.getConstant(1, ShVT)), 2339 Mask55)); 2340 // v = (v & 0x33333333...) + ((v >> 2) & 0x33333333...) 2341 Op = DAG.getNode(ISD::ADD, dl, VT, 2342 DAG.getNode(ISD::AND, dl, VT, Op, Mask33), 2343 DAG.getNode(ISD::AND, dl, VT, 2344 DAG.getNode(ISD::SRL, dl, VT, Op, 2345 DAG.getConstant(2, ShVT)), 2346 Mask33)); 2347 // v = (v + (v >> 4)) & 0x0F0F0F0F... 2348 Op = DAG.getNode(ISD::AND, dl, VT, 2349 DAG.getNode(ISD::ADD, dl, VT, Op, 2350 DAG.getNode(ISD::SRL, dl, VT, Op, 2351 DAG.getConstant(4, ShVT))), 2352 Mask0F); 2353 // v = (v * 0x01010101...) >> (Len - 8) 2354 Op = DAG.getNode(ISD::SRL, dl, VT, 2355 DAG.getNode(ISD::MUL, dl, VT, Op, Mask01), 2356 DAG.getConstant(Len - 8, ShVT)); 2357 2358 return Op; 2359 } 2360 case ISD::CTLZ: { 2361 // for now, we do this: 2362 // x = x | (x >> 1); 2363 // x = x | (x >> 2); 2364 // ... 2365 // x = x | (x >>16); 2366 // x = x | (x >>32); // for 64-bit input 2367 // return popcount(~x); 2368 // 2369 // but see also: http://www.hackersdelight.org/HDcode/nlz.cc 2370 EVT VT = Op.getValueType(); 2371 EVT ShVT = TLI.getShiftAmountTy(VT); 2372 unsigned len = VT.getSizeInBits(); 2373 for (unsigned i = 0; (1U << i) <= (len / 2); ++i) { 2374 SDValue Tmp3 = DAG.getConstant(1ULL << i, ShVT); 2375 Op = DAG.getNode(ISD::OR, dl, VT, Op, 2376 DAG.getNode(ISD::SRL, dl, VT, Op, Tmp3)); 2377 } 2378 Op = DAG.getNOT(dl, Op, VT); 2379 return DAG.getNode(ISD::CTPOP, dl, VT, Op); 2380 } 2381 case ISD::CTTZ: { 2382 // for now, we use: { return popcount(~x & (x - 1)); } 2383 // unless the target has ctlz but not ctpop, in which case we use: 2384 // { return 32 - nlz(~x & (x-1)); } 2385 // see also http://www.hackersdelight.org/HDcode/ntz.cc 2386 EVT VT = Op.getValueType(); 2387 SDValue Tmp3 = DAG.getNode(ISD::AND, dl, VT, 2388 DAG.getNOT(dl, Op, VT), 2389 DAG.getNode(ISD::SUB, dl, VT, Op, 2390 DAG.getConstant(1, VT))); 2391 // If ISD::CTLZ is legal and CTPOP isn't, then do that instead. 2392 if (!TLI.isOperationLegalOrCustom(ISD::CTPOP, VT) && 2393 TLI.isOperationLegalOrCustom(ISD::CTLZ, VT)) 2394 return DAG.getNode(ISD::SUB, dl, VT, 2395 DAG.getConstant(VT.getSizeInBits(), VT), 2396 DAG.getNode(ISD::CTLZ, dl, VT, Tmp3)); 2397 return DAG.getNode(ISD::CTPOP, dl, VT, Tmp3); 2398 } 2399 } 2400} 2401 2402std::pair <SDValue, SDValue> SelectionDAGLegalize::ExpandAtomic(SDNode *Node) { 2403 unsigned Opc = Node->getOpcode(); 2404 MVT VT = cast<AtomicSDNode>(Node)->getMemoryVT().getSimpleVT(); 2405 RTLIB::Libcall LC; 2406 2407 switch (Opc) { 2408 default: 2409 llvm_unreachable("Unhandled atomic intrinsic Expand!"); 2410 break; 2411 case ISD::ATOMIC_SWAP: 2412 switch (VT.SimpleTy) { 2413 default: llvm_unreachable("Unexpected value type for atomic!"); 2414 case MVT::i8: LC = RTLIB::SYNC_LOCK_TEST_AND_SET_1; break; 2415 case MVT::i16: LC = RTLIB::SYNC_LOCK_TEST_AND_SET_2; break; 2416 case MVT::i32: LC = RTLIB::SYNC_LOCK_TEST_AND_SET_4; break; 2417 case MVT::i64: LC = RTLIB::SYNC_LOCK_TEST_AND_SET_8; break; 2418 } 2419 break; 2420 case ISD::ATOMIC_CMP_SWAP: 2421 switch (VT.SimpleTy) { 2422 default: llvm_unreachable("Unexpected value type for atomic!"); 2423 case MVT::i8: LC = RTLIB::SYNC_VAL_COMPARE_AND_SWAP_1; break; 2424 case MVT::i16: LC = RTLIB::SYNC_VAL_COMPARE_AND_SWAP_2; break; 2425 case MVT::i32: LC = RTLIB::SYNC_VAL_COMPARE_AND_SWAP_4; break; 2426 case MVT::i64: LC = RTLIB::SYNC_VAL_COMPARE_AND_SWAP_8; break; 2427 } 2428 break; 2429 case ISD::ATOMIC_LOAD_ADD: 2430 switch (VT.SimpleTy) { 2431 default: llvm_unreachable("Unexpected value type for atomic!"); 2432 case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_ADD_1; break; 2433 case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_ADD_2; break; 2434 case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_ADD_4; break; 2435 case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_ADD_8; break; 2436 } 2437 break; 2438 case ISD::ATOMIC_LOAD_SUB: 2439 switch (VT.SimpleTy) { 2440 default: llvm_unreachable("Unexpected value type for atomic!"); 2441 case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_SUB_1; break; 2442 case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_SUB_2; break; 2443 case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_SUB_4; break; 2444 case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_SUB_8; break; 2445 } 2446 break; 2447 case ISD::ATOMIC_LOAD_AND: 2448 switch (VT.SimpleTy) { 2449 default: llvm_unreachable("Unexpected value type for atomic!"); 2450 case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_AND_1; break; 2451 case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_AND_2; break; 2452 case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_AND_4; break; 2453 case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_AND_8; break; 2454 } 2455 break; 2456 case ISD::ATOMIC_LOAD_OR: 2457 switch (VT.SimpleTy) { 2458 default: llvm_unreachable("Unexpected value type for atomic!"); 2459 case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_OR_1; break; 2460 case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_OR_2; break; 2461 case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_OR_4; break; 2462 case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_OR_8; break; 2463 } 2464 break; 2465 case ISD::ATOMIC_LOAD_XOR: 2466 switch (VT.SimpleTy) { 2467 default: llvm_unreachable("Unexpected value type for atomic!"); 2468 case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_XOR_1; break; 2469 case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_XOR_2; break; 2470 case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_XOR_4; break; 2471 case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_XOR_8; break; 2472 } 2473 break; 2474 case ISD::ATOMIC_LOAD_NAND: 2475 switch (VT.SimpleTy) { 2476 default: llvm_unreachable("Unexpected value type for atomic!"); 2477 case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_NAND_1; break; 2478 case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_NAND_2; break; 2479 case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_NAND_4; break; 2480 case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_NAND_8; break; 2481 } 2482 break; 2483 } 2484 2485 return ExpandChainLibCall(LC, Node, false); 2486} 2487 2488void SelectionDAGLegalize::ExpandNode(SDNode *Node) { 2489 SmallVector<SDValue, 8> Results; 2490 DebugLoc dl = Node->getDebugLoc(); 2491 SDValue Tmp1, Tmp2, Tmp3, Tmp4; 2492 switch (Node->getOpcode()) { 2493 case ISD::CTPOP: 2494 case ISD::CTLZ: 2495 case ISD::CTTZ: 2496 Tmp1 = ExpandBitCount(Node->getOpcode(), Node->getOperand(0), dl); 2497 Results.push_back(Tmp1); 2498 break; 2499 case ISD::BSWAP: 2500 Results.push_back(ExpandBSWAP(Node->getOperand(0), dl)); 2501 break; 2502 case ISD::FRAMEADDR: 2503 case ISD::RETURNADDR: 2504 case ISD::FRAME_TO_ARGS_OFFSET: 2505 Results.push_back(DAG.getConstant(0, Node->getValueType(0))); 2506 break; 2507 case ISD::FLT_ROUNDS_: 2508 Results.push_back(DAG.getConstant(1, Node->getValueType(0))); 2509 break; 2510 case ISD::EH_RETURN: 2511 case ISD::EH_LABEL: 2512 case ISD::PREFETCH: 2513 case ISD::VAEND: 2514 case ISD::EH_SJLJ_LONGJMP: 2515 case ISD::EH_SJLJ_DISPATCHSETUP: 2516 // If the target didn't expand these, there's nothing to do, so just 2517 // preserve the chain and be done. 2518 Results.push_back(Node->getOperand(0)); 2519 break; 2520 case ISD::EH_SJLJ_SETJMP: 2521 // If the target didn't expand this, just return 'zero' and preserve the 2522 // chain. 2523 Results.push_back(DAG.getConstant(0, MVT::i32)); 2524 Results.push_back(Node->getOperand(0)); 2525 break; 2526 case ISD::ATOMIC_FENCE: 2527 case ISD::MEMBARRIER: { 2528 // If the target didn't lower this, lower it to '__sync_synchronize()' call 2529 // FIXME: handle "fence singlethread" more efficiently. 2530 TargetLowering::ArgListTy Args; 2531 std::pair<SDValue, SDValue> CallResult = 2532 TLI.LowerCallTo(Node->getOperand(0), Type::getVoidTy(*DAG.getContext()), 2533 false, false, false, false, 0, CallingConv::C, 2534 /*isTailCall=*/false, 2535 /*isReturnValueUsed=*/true, 2536 DAG.getExternalSymbol("__sync_synchronize", 2537 TLI.getPointerTy()), 2538 Args, DAG, dl); 2539 Results.push_back(CallResult.second); 2540 break; 2541 } 2542 case ISD::ATOMIC_LOAD: { 2543 // There is no libcall for atomic load; fake it with ATOMIC_CMP_SWAP. 2544 SDValue Zero = DAG.getConstant(0, Node->getValueType(0)); 2545 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_CMP_SWAP, dl, 2546 cast<AtomicSDNode>(Node)->getMemoryVT(), 2547 Node->getOperand(0), 2548 Node->getOperand(1), Zero, Zero, 2549 cast<AtomicSDNode>(Node)->getMemOperand(), 2550 cast<AtomicSDNode>(Node)->getOrdering(), 2551 cast<AtomicSDNode>(Node)->getSynchScope()); 2552 Results.push_back(Swap.getValue(0)); 2553 Results.push_back(Swap.getValue(1)); 2554 break; 2555 } 2556 case ISD::ATOMIC_STORE: { 2557 // There is no libcall for atomic store; fake it with ATOMIC_SWAP. 2558 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl, 2559 cast<AtomicSDNode>(Node)->getMemoryVT(), 2560 Node->getOperand(0), 2561 Node->getOperand(1), Node->getOperand(2), 2562 cast<AtomicSDNode>(Node)->getMemOperand(), 2563 cast<AtomicSDNode>(Node)->getOrdering(), 2564 cast<AtomicSDNode>(Node)->getSynchScope()); 2565 Results.push_back(Swap.getValue(1)); 2566 break; 2567 } 2568 // By default, atomic intrinsics are marked Legal and lowered. Targets 2569 // which don't support them directly, however, may want libcalls, in which 2570 // case they mark them Expand, and we get here. 2571 case ISD::ATOMIC_SWAP: 2572 case ISD::ATOMIC_LOAD_ADD: 2573 case ISD::ATOMIC_LOAD_SUB: 2574 case ISD::ATOMIC_LOAD_AND: 2575 case ISD::ATOMIC_LOAD_OR: 2576 case ISD::ATOMIC_LOAD_XOR: 2577 case ISD::ATOMIC_LOAD_NAND: 2578 case ISD::ATOMIC_LOAD_MIN: 2579 case ISD::ATOMIC_LOAD_MAX: 2580 case ISD::ATOMIC_LOAD_UMIN: 2581 case ISD::ATOMIC_LOAD_UMAX: 2582 case ISD::ATOMIC_CMP_SWAP: { 2583 std::pair<SDValue, SDValue> Tmp = ExpandAtomic(Node); 2584 Results.push_back(Tmp.first); 2585 Results.push_back(Tmp.second); 2586 break; 2587 } 2588 case ISD::DYNAMIC_STACKALLOC: 2589 ExpandDYNAMIC_STACKALLOC(Node, Results); 2590 break; 2591 case ISD::MERGE_VALUES: 2592 for (unsigned i = 0; i < Node->getNumValues(); i++) 2593 Results.push_back(Node->getOperand(i)); 2594 break; 2595 case ISD::UNDEF: { 2596 EVT VT = Node->getValueType(0); 2597 if (VT.isInteger()) 2598 Results.push_back(DAG.getConstant(0, VT)); 2599 else { 2600 assert(VT.isFloatingPoint() && "Unknown value type!"); 2601 Results.push_back(DAG.getConstantFP(0, VT)); 2602 } 2603 break; 2604 } 2605 case ISD::TRAP: { 2606 // If this operation is not supported, lower it to 'abort()' call 2607 TargetLowering::ArgListTy Args; 2608 std::pair<SDValue, SDValue> CallResult = 2609 TLI.LowerCallTo(Node->getOperand(0), Type::getVoidTy(*DAG.getContext()), 2610 false, false, false, false, 0, CallingConv::C, 2611 /*isTailCall=*/false, 2612 /*isReturnValueUsed=*/true, 2613 DAG.getExternalSymbol("abort", TLI.getPointerTy()), 2614 Args, DAG, dl); 2615 Results.push_back(CallResult.second); 2616 break; 2617 } 2618 case ISD::FP_ROUND: 2619 case ISD::BITCAST: 2620 Tmp1 = EmitStackConvert(Node->getOperand(0), Node->getValueType(0), 2621 Node->getValueType(0), dl); 2622 Results.push_back(Tmp1); 2623 break; 2624 case ISD::FP_EXTEND: 2625 Tmp1 = EmitStackConvert(Node->getOperand(0), 2626 Node->getOperand(0).getValueType(), 2627 Node->getValueType(0), dl); 2628 Results.push_back(Tmp1); 2629 break; 2630 case ISD::SIGN_EXTEND_INREG: { 2631 // NOTE: we could fall back on load/store here too for targets without 2632 // SAR. However, it is doubtful that any exist. 2633 EVT ExtraVT = cast<VTSDNode>(Node->getOperand(1))->getVT(); 2634 EVT VT = Node->getValueType(0); 2635 EVT ShiftAmountTy = TLI.getShiftAmountTy(VT); 2636 if (VT.isVector()) 2637 ShiftAmountTy = VT; 2638 unsigned BitsDiff = VT.getScalarType().getSizeInBits() - 2639 ExtraVT.getScalarType().getSizeInBits(); 2640 SDValue ShiftCst = DAG.getConstant(BitsDiff, ShiftAmountTy); 2641 Tmp1 = DAG.getNode(ISD::SHL, dl, Node->getValueType(0), 2642 Node->getOperand(0), ShiftCst); 2643 Tmp1 = DAG.getNode(ISD::SRA, dl, Node->getValueType(0), Tmp1, ShiftCst); 2644 Results.push_back(Tmp1); 2645 break; 2646 } 2647 case ISD::FP_ROUND_INREG: { 2648 // The only way we can lower this is to turn it into a TRUNCSTORE, 2649 // EXTLOAD pair, targeting a temporary location (a stack slot). 2650 2651 // NOTE: there is a choice here between constantly creating new stack 2652 // slots and always reusing the same one. We currently always create 2653 // new ones, as reuse may inhibit scheduling. 2654 EVT ExtraVT = cast<VTSDNode>(Node->getOperand(1))->getVT(); 2655 Tmp1 = EmitStackConvert(Node->getOperand(0), ExtraVT, 2656 Node->getValueType(0), dl); 2657 Results.push_back(Tmp1); 2658 break; 2659 } 2660 case ISD::SINT_TO_FP: 2661 case ISD::UINT_TO_FP: 2662 Tmp1 = ExpandLegalINT_TO_FP(Node->getOpcode() == ISD::SINT_TO_FP, 2663 Node->getOperand(0), Node->getValueType(0), dl); 2664 Results.push_back(Tmp1); 2665 break; 2666 case ISD::FP_TO_UINT: { 2667 SDValue True, False; 2668 EVT VT = Node->getOperand(0).getValueType(); 2669 EVT NVT = Node->getValueType(0); 2670 APFloat apf(APInt::getNullValue(VT.getSizeInBits())); 2671 APInt x = APInt::getSignBit(NVT.getSizeInBits()); 2672 (void)apf.convertFromAPInt(x, false, APFloat::rmNearestTiesToEven); 2673 Tmp1 = DAG.getConstantFP(apf, VT); 2674 Tmp2 = DAG.getSetCC(dl, TLI.getSetCCResultType(VT), 2675 Node->getOperand(0), 2676 Tmp1, ISD::SETLT); 2677 True = DAG.getNode(ISD::FP_TO_SINT, dl, NVT, Node->getOperand(0)); 2678 False = DAG.getNode(ISD::FP_TO_SINT, dl, NVT, 2679 DAG.getNode(ISD::FSUB, dl, VT, 2680 Node->getOperand(0), Tmp1)); 2681 False = DAG.getNode(ISD::XOR, dl, NVT, False, 2682 DAG.getConstant(x, NVT)); 2683 Tmp1 = DAG.getNode(ISD::SELECT, dl, NVT, Tmp2, True, False); 2684 Results.push_back(Tmp1); 2685 break; 2686 } 2687 case ISD::VAARG: { 2688 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 2689 EVT VT = Node->getValueType(0); 2690 Tmp1 = Node->getOperand(0); 2691 Tmp2 = Node->getOperand(1); 2692 unsigned Align = Node->getConstantOperandVal(3); 2693 2694 SDValue VAListLoad = DAG.getLoad(TLI.getPointerTy(), dl, Tmp1, Tmp2, 2695 MachinePointerInfo(V), false, false, 0); 2696 SDValue VAList = VAListLoad; 2697 2698 if (Align > TLI.getMinStackArgumentAlignment()) { 2699 assert(((Align & (Align-1)) == 0) && "Expected Align to be a power of 2"); 2700 2701 VAList = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), VAList, 2702 DAG.getConstant(Align - 1, 2703 TLI.getPointerTy())); 2704 2705 VAList = DAG.getNode(ISD::AND, dl, TLI.getPointerTy(), VAList, 2706 DAG.getConstant(-(int64_t)Align, 2707 TLI.getPointerTy())); 2708 } 2709 2710 // Increment the pointer, VAList, to the next vaarg 2711 Tmp3 = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), VAList, 2712 DAG.getConstant(TLI.getTargetData()-> 2713 getTypeAllocSize(VT.getTypeForEVT(*DAG.getContext())), 2714 TLI.getPointerTy())); 2715 // Store the incremented VAList to the legalized pointer 2716 Tmp3 = DAG.getStore(VAListLoad.getValue(1), dl, Tmp3, Tmp2, 2717 MachinePointerInfo(V), false, false, 0); 2718 // Load the actual argument out of the pointer VAList 2719 Results.push_back(DAG.getLoad(VT, dl, Tmp3, VAList, MachinePointerInfo(), 2720 false, false, 0)); 2721 Results.push_back(Results[0].getValue(1)); 2722 break; 2723 } 2724 case ISD::VACOPY: { 2725 // This defaults to loading a pointer from the input and storing it to the 2726 // output, returning the chain. 2727 const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue(); 2728 const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue(); 2729 Tmp1 = DAG.getLoad(TLI.getPointerTy(), dl, Node->getOperand(0), 2730 Node->getOperand(2), MachinePointerInfo(VS), 2731 false, false, 0); 2732 Tmp1 = DAG.getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1), 2733 MachinePointerInfo(VD), false, false, 0); 2734 Results.push_back(Tmp1); 2735 break; 2736 } 2737 case ISD::EXTRACT_VECTOR_ELT: 2738 if (Node->getOperand(0).getValueType().getVectorNumElements() == 1) 2739 // This must be an access of the only element. Return it. 2740 Tmp1 = DAG.getNode(ISD::BITCAST, dl, Node->getValueType(0), 2741 Node->getOperand(0)); 2742 else 2743 Tmp1 = ExpandExtractFromVectorThroughStack(SDValue(Node, 0)); 2744 Results.push_back(Tmp1); 2745 break; 2746 case ISD::EXTRACT_SUBVECTOR: 2747 Results.push_back(ExpandExtractFromVectorThroughStack(SDValue(Node, 0))); 2748 break; 2749 case ISD::INSERT_SUBVECTOR: 2750 Results.push_back(ExpandInsertToVectorThroughStack(SDValue(Node, 0))); 2751 break; 2752 case ISD::CONCAT_VECTORS: { 2753 Results.push_back(ExpandVectorBuildThroughStack(Node)); 2754 break; 2755 } 2756 case ISD::SCALAR_TO_VECTOR: 2757 Results.push_back(ExpandSCALAR_TO_VECTOR(Node)); 2758 break; 2759 case ISD::INSERT_VECTOR_ELT: 2760 Results.push_back(ExpandINSERT_VECTOR_ELT(Node->getOperand(0), 2761 Node->getOperand(1), 2762 Node->getOperand(2), dl)); 2763 break; 2764 case ISD::VECTOR_SHUFFLE: { 2765 SmallVector<int, 8> Mask; 2766 cast<ShuffleVectorSDNode>(Node)->getMask(Mask); 2767 2768 EVT VT = Node->getValueType(0); 2769 EVT EltVT = VT.getVectorElementType(); 2770 if (!TLI.isTypeLegal(EltVT)) 2771 EltVT = TLI.getTypeToTransformTo(*DAG.getContext(), EltVT); 2772 unsigned NumElems = VT.getVectorNumElements(); 2773 SmallVector<SDValue, 8> Ops; 2774 for (unsigned i = 0; i != NumElems; ++i) { 2775 if (Mask[i] < 0) { 2776 Ops.push_back(DAG.getUNDEF(EltVT)); 2777 continue; 2778 } 2779 unsigned Idx = Mask[i]; 2780 if (Idx < NumElems) 2781 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, 2782 Node->getOperand(0), 2783 DAG.getIntPtrConstant(Idx))); 2784 else 2785 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, 2786 Node->getOperand(1), 2787 DAG.getIntPtrConstant(Idx - NumElems))); 2788 } 2789 Tmp1 = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &Ops[0], Ops.size()); 2790 Results.push_back(Tmp1); 2791 break; 2792 } 2793 case ISD::EXTRACT_ELEMENT: { 2794 EVT OpTy = Node->getOperand(0).getValueType(); 2795 if (cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue()) { 2796 // 1 -> Hi 2797 Tmp1 = DAG.getNode(ISD::SRL, dl, OpTy, Node->getOperand(0), 2798 DAG.getConstant(OpTy.getSizeInBits()/2, 2799 TLI.getShiftAmountTy(Node->getOperand(0).getValueType()))); 2800 Tmp1 = DAG.getNode(ISD::TRUNCATE, dl, Node->getValueType(0), Tmp1); 2801 } else { 2802 // 0 -> Lo 2803 Tmp1 = DAG.getNode(ISD::TRUNCATE, dl, Node->getValueType(0), 2804 Node->getOperand(0)); 2805 } 2806 Results.push_back(Tmp1); 2807 break; 2808 } 2809 case ISD::STACKSAVE: 2810 // Expand to CopyFromReg if the target set 2811 // StackPointerRegisterToSaveRestore. 2812 if (unsigned SP = TLI.getStackPointerRegisterToSaveRestore()) { 2813 Results.push_back(DAG.getCopyFromReg(Node->getOperand(0), dl, SP, 2814 Node->getValueType(0))); 2815 Results.push_back(Results[0].getValue(1)); 2816 } else { 2817 Results.push_back(DAG.getUNDEF(Node->getValueType(0))); 2818 Results.push_back(Node->getOperand(0)); 2819 } 2820 break; 2821 case ISD::STACKRESTORE: 2822 // Expand to CopyToReg if the target set 2823 // StackPointerRegisterToSaveRestore. 2824 if (unsigned SP = TLI.getStackPointerRegisterToSaveRestore()) { 2825 Results.push_back(DAG.getCopyToReg(Node->getOperand(0), dl, SP, 2826 Node->getOperand(1))); 2827 } else { 2828 Results.push_back(Node->getOperand(0)); 2829 } 2830 break; 2831 case ISD::FCOPYSIGN: 2832 Results.push_back(ExpandFCOPYSIGN(Node)); 2833 break; 2834 case ISD::FNEG: 2835 // Expand Y = FNEG(X) -> Y = SUB -0.0, X 2836 Tmp1 = DAG.getConstantFP(-0.0, Node->getValueType(0)); 2837 Tmp1 = DAG.getNode(ISD::FSUB, dl, Node->getValueType(0), Tmp1, 2838 Node->getOperand(0)); 2839 Results.push_back(Tmp1); 2840 break; 2841 case ISD::FABS: { 2842 // Expand Y = FABS(X) -> Y = (X >u 0.0) ? X : fneg(X). 2843 EVT VT = Node->getValueType(0); 2844 Tmp1 = Node->getOperand(0); 2845 Tmp2 = DAG.getConstantFP(0.0, VT); 2846 Tmp2 = DAG.getSetCC(dl, TLI.getSetCCResultType(Tmp1.getValueType()), 2847 Tmp1, Tmp2, ISD::SETUGT); 2848 Tmp3 = DAG.getNode(ISD::FNEG, dl, VT, Tmp1); 2849 Tmp1 = DAG.getNode(ISD::SELECT, dl, VT, Tmp2, Tmp1, Tmp3); 2850 Results.push_back(Tmp1); 2851 break; 2852 } 2853 case ISD::FSQRT: 2854 Results.push_back(ExpandFPLibCall(Node, RTLIB::SQRT_F32, RTLIB::SQRT_F64, 2855 RTLIB::SQRT_F80, RTLIB::SQRT_PPCF128)); 2856 break; 2857 case ISD::FSIN: 2858 Results.push_back(ExpandFPLibCall(Node, RTLIB::SIN_F32, RTLIB::SIN_F64, 2859 RTLIB::SIN_F80, RTLIB::SIN_PPCF128)); 2860 break; 2861 case ISD::FCOS: 2862 Results.push_back(ExpandFPLibCall(Node, RTLIB::COS_F32, RTLIB::COS_F64, 2863 RTLIB::COS_F80, RTLIB::COS_PPCF128)); 2864 break; 2865 case ISD::FLOG: 2866 Results.push_back(ExpandFPLibCall(Node, RTLIB::LOG_F32, RTLIB::LOG_F64, 2867 RTLIB::LOG_F80, RTLIB::LOG_PPCF128)); 2868 break; 2869 case ISD::FLOG2: 2870 Results.push_back(ExpandFPLibCall(Node, RTLIB::LOG2_F32, RTLIB::LOG2_F64, 2871 RTLIB::LOG2_F80, RTLIB::LOG2_PPCF128)); 2872 break; 2873 case ISD::FLOG10: 2874 Results.push_back(ExpandFPLibCall(Node, RTLIB::LOG10_F32, RTLIB::LOG10_F64, 2875 RTLIB::LOG10_F80, RTLIB::LOG10_PPCF128)); 2876 break; 2877 case ISD::FEXP: 2878 Results.push_back(ExpandFPLibCall(Node, RTLIB::EXP_F32, RTLIB::EXP_F64, 2879 RTLIB::EXP_F80, RTLIB::EXP_PPCF128)); 2880 break; 2881 case ISD::FEXP2: 2882 Results.push_back(ExpandFPLibCall(Node, RTLIB::EXP2_F32, RTLIB::EXP2_F64, 2883 RTLIB::EXP2_F80, RTLIB::EXP2_PPCF128)); 2884 break; 2885 case ISD::FTRUNC: 2886 Results.push_back(ExpandFPLibCall(Node, RTLIB::TRUNC_F32, RTLIB::TRUNC_F64, 2887 RTLIB::TRUNC_F80, RTLIB::TRUNC_PPCF128)); 2888 break; 2889 case ISD::FFLOOR: 2890 Results.push_back(ExpandFPLibCall(Node, RTLIB::FLOOR_F32, RTLIB::FLOOR_F64, 2891 RTLIB::FLOOR_F80, RTLIB::FLOOR_PPCF128)); 2892 break; 2893 case ISD::FCEIL: 2894 Results.push_back(ExpandFPLibCall(Node, RTLIB::CEIL_F32, RTLIB::CEIL_F64, 2895 RTLIB::CEIL_F80, RTLIB::CEIL_PPCF128)); 2896 break; 2897 case ISD::FRINT: 2898 Results.push_back(ExpandFPLibCall(Node, RTLIB::RINT_F32, RTLIB::RINT_F64, 2899 RTLIB::RINT_F80, RTLIB::RINT_PPCF128)); 2900 break; 2901 case ISD::FNEARBYINT: 2902 Results.push_back(ExpandFPLibCall(Node, RTLIB::NEARBYINT_F32, 2903 RTLIB::NEARBYINT_F64, 2904 RTLIB::NEARBYINT_F80, 2905 RTLIB::NEARBYINT_PPCF128)); 2906 break; 2907 case ISD::FPOWI: 2908 Results.push_back(ExpandFPLibCall(Node, RTLIB::POWI_F32, RTLIB::POWI_F64, 2909 RTLIB::POWI_F80, RTLIB::POWI_PPCF128)); 2910 break; 2911 case ISD::FPOW: 2912 Results.push_back(ExpandFPLibCall(Node, RTLIB::POW_F32, RTLIB::POW_F64, 2913 RTLIB::POW_F80, RTLIB::POW_PPCF128)); 2914 break; 2915 case ISD::FDIV: 2916 Results.push_back(ExpandFPLibCall(Node, RTLIB::DIV_F32, RTLIB::DIV_F64, 2917 RTLIB::DIV_F80, RTLIB::DIV_PPCF128)); 2918 break; 2919 case ISD::FREM: 2920 Results.push_back(ExpandFPLibCall(Node, RTLIB::REM_F32, RTLIB::REM_F64, 2921 RTLIB::REM_F80, RTLIB::REM_PPCF128)); 2922 break; 2923 case ISD::FMA: 2924 Results.push_back(ExpandFPLibCall(Node, RTLIB::FMA_F32, RTLIB::FMA_F64, 2925 RTLIB::FMA_F80, RTLIB::FMA_PPCF128)); 2926 break; 2927 case ISD::FP16_TO_FP32: 2928 Results.push_back(ExpandLibCall(RTLIB::FPEXT_F16_F32, Node, false)); 2929 break; 2930 case ISD::FP32_TO_FP16: 2931 Results.push_back(ExpandLibCall(RTLIB::FPROUND_F32_F16, Node, false)); 2932 break; 2933 case ISD::ConstantFP: { 2934 ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Node); 2935 // Check to see if this FP immediate is already legal. 2936 // If this is a legal constant, turn it into a TargetConstantFP node. 2937 if (!TLI.isFPImmLegal(CFP->getValueAPF(), Node->getValueType(0))) 2938 Results.push_back(ExpandConstantFP(CFP, true)); 2939 break; 2940 } 2941 case ISD::EHSELECTION: { 2942 unsigned Reg = TLI.getExceptionSelectorRegister(); 2943 assert(Reg && "Can't expand to unknown register!"); 2944 Results.push_back(DAG.getCopyFromReg(Node->getOperand(1), dl, Reg, 2945 Node->getValueType(0))); 2946 Results.push_back(Results[0].getValue(1)); 2947 break; 2948 } 2949 case ISD::EXCEPTIONADDR: { 2950 unsigned Reg = TLI.getExceptionAddressRegister(); 2951 assert(Reg && "Can't expand to unknown register!"); 2952 Results.push_back(DAG.getCopyFromReg(Node->getOperand(0), dl, Reg, 2953 Node->getValueType(0))); 2954 Results.push_back(Results[0].getValue(1)); 2955 break; 2956 } 2957 case ISD::SUB: { 2958 EVT VT = Node->getValueType(0); 2959 assert(TLI.isOperationLegalOrCustom(ISD::ADD, VT) && 2960 TLI.isOperationLegalOrCustom(ISD::XOR, VT) && 2961 "Don't know how to expand this subtraction!"); 2962 Tmp1 = DAG.getNode(ISD::XOR, dl, VT, Node->getOperand(1), 2963 DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), VT)); 2964 Tmp1 = DAG.getNode(ISD::ADD, dl, VT, Tmp2, DAG.getConstant(1, VT)); 2965 Results.push_back(DAG.getNode(ISD::ADD, dl, VT, Node->getOperand(0), Tmp1)); 2966 break; 2967 } 2968 case ISD::UREM: 2969 case ISD::SREM: { 2970 EVT VT = Node->getValueType(0); 2971 SDVTList VTs = DAG.getVTList(VT, VT); 2972 bool isSigned = Node->getOpcode() == ISD::SREM; 2973 unsigned DivOpc = isSigned ? ISD::SDIV : ISD::UDIV; 2974 unsigned DivRemOpc = isSigned ? ISD::SDIVREM : ISD::UDIVREM; 2975 Tmp2 = Node->getOperand(0); 2976 Tmp3 = Node->getOperand(1); 2977 if (TLI.isOperationLegalOrCustom(DivRemOpc, VT) || 2978 (isDivRemLibcallAvailable(Node, isSigned, TLI) && 2979 UseDivRem(Node, isSigned, false))) { 2980 Tmp1 = DAG.getNode(DivRemOpc, dl, VTs, Tmp2, Tmp3).getValue(1); 2981 } else if (TLI.isOperationLegalOrCustom(DivOpc, VT)) { 2982 // X % Y -> X-X/Y*Y 2983 Tmp1 = DAG.getNode(DivOpc, dl, VT, Tmp2, Tmp3); 2984 Tmp1 = DAG.getNode(ISD::MUL, dl, VT, Tmp1, Tmp3); 2985 Tmp1 = DAG.getNode(ISD::SUB, dl, VT, Tmp2, Tmp1); 2986 } else if (isSigned) 2987 Tmp1 = ExpandIntLibCall(Node, true, 2988 RTLIB::SREM_I8, 2989 RTLIB::SREM_I16, RTLIB::SREM_I32, 2990 RTLIB::SREM_I64, RTLIB::SREM_I128); 2991 else 2992 Tmp1 = ExpandIntLibCall(Node, false, 2993 RTLIB::UREM_I8, 2994 RTLIB::UREM_I16, RTLIB::UREM_I32, 2995 RTLIB::UREM_I64, RTLIB::UREM_I128); 2996 Results.push_back(Tmp1); 2997 break; 2998 } 2999 case ISD::UDIV: 3000 case ISD::SDIV: { 3001 bool isSigned = Node->getOpcode() == ISD::SDIV; 3002 unsigned DivRemOpc = isSigned ? ISD::SDIVREM : ISD::UDIVREM; 3003 EVT VT = Node->getValueType(0); 3004 SDVTList VTs = DAG.getVTList(VT, VT); 3005 if (TLI.isOperationLegalOrCustom(DivRemOpc, VT) || 3006 (isDivRemLibcallAvailable(Node, isSigned, TLI) && 3007 UseDivRem(Node, isSigned, true))) 3008 Tmp1 = DAG.getNode(DivRemOpc, dl, VTs, Node->getOperand(0), 3009 Node->getOperand(1)); 3010 else if (isSigned) 3011 Tmp1 = ExpandIntLibCall(Node, true, 3012 RTLIB::SDIV_I8, 3013 RTLIB::SDIV_I16, RTLIB::SDIV_I32, 3014 RTLIB::SDIV_I64, RTLIB::SDIV_I128); 3015 else 3016 Tmp1 = ExpandIntLibCall(Node, false, 3017 RTLIB::UDIV_I8, 3018 RTLIB::UDIV_I16, RTLIB::UDIV_I32, 3019 RTLIB::UDIV_I64, RTLIB::UDIV_I128); 3020 Results.push_back(Tmp1); 3021 break; 3022 } 3023 case ISD::MULHU: 3024 case ISD::MULHS: { 3025 unsigned ExpandOpcode = Node->getOpcode() == ISD::MULHU ? ISD::UMUL_LOHI : 3026 ISD::SMUL_LOHI; 3027 EVT VT = Node->getValueType(0); 3028 SDVTList VTs = DAG.getVTList(VT, VT); 3029 assert(TLI.isOperationLegalOrCustom(ExpandOpcode, VT) && 3030 "If this wasn't legal, it shouldn't have been created!"); 3031 Tmp1 = DAG.getNode(ExpandOpcode, dl, VTs, Node->getOperand(0), 3032 Node->getOperand(1)); 3033 Results.push_back(Tmp1.getValue(1)); 3034 break; 3035 } 3036 case ISD::SDIVREM: 3037 case ISD::UDIVREM: 3038 // Expand into divrem libcall 3039 ExpandDivRemLibCall(Node, Results); 3040 break; 3041 case ISD::MUL: { 3042 EVT VT = Node->getValueType(0); 3043 SDVTList VTs = DAG.getVTList(VT, VT); 3044 // See if multiply or divide can be lowered using two-result operations. 3045 // We just need the low half of the multiply; try both the signed 3046 // and unsigned forms. If the target supports both SMUL_LOHI and 3047 // UMUL_LOHI, form a preference by checking which forms of plain 3048 // MULH it supports. 3049 bool HasSMUL_LOHI = TLI.isOperationLegalOrCustom(ISD::SMUL_LOHI, VT); 3050 bool HasUMUL_LOHI = TLI.isOperationLegalOrCustom(ISD::UMUL_LOHI, VT); 3051 bool HasMULHS = TLI.isOperationLegalOrCustom(ISD::MULHS, VT); 3052 bool HasMULHU = TLI.isOperationLegalOrCustom(ISD::MULHU, VT); 3053 unsigned OpToUse = 0; 3054 if (HasSMUL_LOHI && !HasMULHS) { 3055 OpToUse = ISD::SMUL_LOHI; 3056 } else if (HasUMUL_LOHI && !HasMULHU) { 3057 OpToUse = ISD::UMUL_LOHI; 3058 } else if (HasSMUL_LOHI) { 3059 OpToUse = ISD::SMUL_LOHI; 3060 } else if (HasUMUL_LOHI) { 3061 OpToUse = ISD::UMUL_LOHI; 3062 } 3063 if (OpToUse) { 3064 Results.push_back(DAG.getNode(OpToUse, dl, VTs, Node->getOperand(0), 3065 Node->getOperand(1))); 3066 break; 3067 } 3068 Tmp1 = ExpandIntLibCall(Node, false, 3069 RTLIB::MUL_I8, 3070 RTLIB::MUL_I16, RTLIB::MUL_I32, 3071 RTLIB::MUL_I64, RTLIB::MUL_I128); 3072 Results.push_back(Tmp1); 3073 break; 3074 } 3075 case ISD::SADDO: 3076 case ISD::SSUBO: { 3077 SDValue LHS = Node->getOperand(0); 3078 SDValue RHS = Node->getOperand(1); 3079 SDValue Sum = DAG.getNode(Node->getOpcode() == ISD::SADDO ? 3080 ISD::ADD : ISD::SUB, dl, LHS.getValueType(), 3081 LHS, RHS); 3082 Results.push_back(Sum); 3083 EVT OType = Node->getValueType(1); 3084 3085 SDValue Zero = DAG.getConstant(0, LHS.getValueType()); 3086 3087 // LHSSign -> LHS >= 0 3088 // RHSSign -> RHS >= 0 3089 // SumSign -> Sum >= 0 3090 // 3091 // Add: 3092 // Overflow -> (LHSSign == RHSSign) && (LHSSign != SumSign) 3093 // Sub: 3094 // Overflow -> (LHSSign != RHSSign) && (LHSSign != SumSign) 3095 // 3096 SDValue LHSSign = DAG.getSetCC(dl, OType, LHS, Zero, ISD::SETGE); 3097 SDValue RHSSign = DAG.getSetCC(dl, OType, RHS, Zero, ISD::SETGE); 3098 SDValue SignsMatch = DAG.getSetCC(dl, OType, LHSSign, RHSSign, 3099 Node->getOpcode() == ISD::SADDO ? 3100 ISD::SETEQ : ISD::SETNE); 3101 3102 SDValue SumSign = DAG.getSetCC(dl, OType, Sum, Zero, ISD::SETGE); 3103 SDValue SumSignNE = DAG.getSetCC(dl, OType, LHSSign, SumSign, ISD::SETNE); 3104 3105 SDValue Cmp = DAG.getNode(ISD::AND, dl, OType, SignsMatch, SumSignNE); 3106 Results.push_back(Cmp); 3107 break; 3108 } 3109 case ISD::UADDO: 3110 case ISD::USUBO: { 3111 SDValue LHS = Node->getOperand(0); 3112 SDValue RHS = Node->getOperand(1); 3113 SDValue Sum = DAG.getNode(Node->getOpcode() == ISD::UADDO ? 3114 ISD::ADD : ISD::SUB, dl, LHS.getValueType(), 3115 LHS, RHS); 3116 Results.push_back(Sum); 3117 Results.push_back(DAG.getSetCC(dl, Node->getValueType(1), Sum, LHS, 3118 Node->getOpcode () == ISD::UADDO ? 3119 ISD::SETULT : ISD::SETUGT)); 3120 break; 3121 } 3122 case ISD::UMULO: 3123 case ISD::SMULO: { 3124 EVT VT = Node->getValueType(0); 3125 EVT WideVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits() * 2); 3126 SDValue LHS = Node->getOperand(0); 3127 SDValue RHS = Node->getOperand(1); 3128 SDValue BottomHalf; 3129 SDValue TopHalf; 3130 static const unsigned Ops[2][3] = 3131 { { ISD::MULHU, ISD::UMUL_LOHI, ISD::ZERO_EXTEND }, 3132 { ISD::MULHS, ISD::SMUL_LOHI, ISD::SIGN_EXTEND }}; 3133 bool isSigned = Node->getOpcode() == ISD::SMULO; 3134 if (TLI.isOperationLegalOrCustom(Ops[isSigned][0], VT)) { 3135 BottomHalf = DAG.getNode(ISD::MUL, dl, VT, LHS, RHS); 3136 TopHalf = DAG.getNode(Ops[isSigned][0], dl, VT, LHS, RHS); 3137 } else if (TLI.isOperationLegalOrCustom(Ops[isSigned][1], VT)) { 3138 BottomHalf = DAG.getNode(Ops[isSigned][1], dl, DAG.getVTList(VT, VT), LHS, 3139 RHS); 3140 TopHalf = BottomHalf.getValue(1); 3141 } else if (TLI.isTypeLegal(EVT::getIntegerVT(*DAG.getContext(), 3142 VT.getSizeInBits() * 2))) { 3143 LHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, LHS); 3144 RHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, RHS); 3145 Tmp1 = DAG.getNode(ISD::MUL, dl, WideVT, LHS, RHS); 3146 BottomHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, Tmp1, 3147 DAG.getIntPtrConstant(0)); 3148 TopHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, Tmp1, 3149 DAG.getIntPtrConstant(1)); 3150 } else { 3151 // We can fall back to a libcall with an illegal type for the MUL if we 3152 // have a libcall big enough. 3153 // Also, we can fall back to a division in some cases, but that's a big 3154 // performance hit in the general case. 3155 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 3156 if (WideVT == MVT::i16) 3157 LC = RTLIB::MUL_I16; 3158 else if (WideVT == MVT::i32) 3159 LC = RTLIB::MUL_I32; 3160 else if (WideVT == MVT::i64) 3161 LC = RTLIB::MUL_I64; 3162 else if (WideVT == MVT::i128) 3163 LC = RTLIB::MUL_I128; 3164 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Cannot expand this operation!"); 3165 3166 // The high part is obtained by SRA'ing all but one of the bits of low 3167 // part. 3168 unsigned LoSize = VT.getSizeInBits(); 3169 SDValue HiLHS = DAG.getNode(ISD::SRA, dl, VT, RHS, 3170 DAG.getConstant(LoSize-1, TLI.getPointerTy())); 3171 SDValue HiRHS = DAG.getNode(ISD::SRA, dl, VT, LHS, 3172 DAG.getConstant(LoSize-1, TLI.getPointerTy())); 3173 3174 // Here we're passing the 2 arguments explicitly as 4 arguments that are 3175 // pre-lowered to the correct types. This all depends upon WideVT not 3176 // being a legal type for the architecture and thus has to be split to 3177 // two arguments. 3178 SDValue Args[] = { LHS, HiLHS, RHS, HiRHS }; 3179 SDValue Ret = ExpandLibCall(LC, WideVT, Args, 4, isSigned, dl); 3180 BottomHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, Ret, 3181 DAG.getIntPtrConstant(0)); 3182 TopHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, Ret, 3183 DAG.getIntPtrConstant(1)); 3184 // Ret is a node with an illegal type. Because such things are not 3185 // generally permitted during this phase of legalization, delete the 3186 // node. The above EXTRACT_ELEMENT nodes should have been folded. 3187 DAG.DeleteNode(Ret.getNode()); 3188 } 3189 3190 if (isSigned) { 3191 Tmp1 = DAG.getConstant(VT.getSizeInBits() - 1, 3192 TLI.getShiftAmountTy(BottomHalf.getValueType())); 3193 Tmp1 = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, Tmp1); 3194 TopHalf = DAG.getSetCC(dl, TLI.getSetCCResultType(VT), TopHalf, Tmp1, 3195 ISD::SETNE); 3196 } else { 3197 TopHalf = DAG.getSetCC(dl, TLI.getSetCCResultType(VT), TopHalf, 3198 DAG.getConstant(0, VT), ISD::SETNE); 3199 } 3200 Results.push_back(BottomHalf); 3201 Results.push_back(TopHalf); 3202 break; 3203 } 3204 case ISD::BUILD_PAIR: { 3205 EVT PairTy = Node->getValueType(0); 3206 Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, dl, PairTy, Node->getOperand(0)); 3207 Tmp2 = DAG.getNode(ISD::ANY_EXTEND, dl, PairTy, Node->getOperand(1)); 3208 Tmp2 = DAG.getNode(ISD::SHL, dl, PairTy, Tmp2, 3209 DAG.getConstant(PairTy.getSizeInBits()/2, 3210 TLI.getShiftAmountTy(PairTy))); 3211 Results.push_back(DAG.getNode(ISD::OR, dl, PairTy, Tmp1, Tmp2)); 3212 break; 3213 } 3214 case ISD::SELECT: 3215 Tmp1 = Node->getOperand(0); 3216 Tmp2 = Node->getOperand(1); 3217 Tmp3 = Node->getOperand(2); 3218 if (Tmp1.getOpcode() == ISD::SETCC) { 3219 Tmp1 = DAG.getSelectCC(dl, Tmp1.getOperand(0), Tmp1.getOperand(1), 3220 Tmp2, Tmp3, 3221 cast<CondCodeSDNode>(Tmp1.getOperand(2))->get()); 3222 } else { 3223 Tmp1 = DAG.getSelectCC(dl, Tmp1, 3224 DAG.getConstant(0, Tmp1.getValueType()), 3225 Tmp2, Tmp3, ISD::SETNE); 3226 } 3227 Results.push_back(Tmp1); 3228 break; 3229 case ISD::BR_JT: { 3230 SDValue Chain = Node->getOperand(0); 3231 SDValue Table = Node->getOperand(1); 3232 SDValue Index = Node->getOperand(2); 3233 3234 EVT PTy = TLI.getPointerTy(); 3235 3236 const TargetData &TD = *TLI.getTargetData(); 3237 unsigned EntrySize = 3238 DAG.getMachineFunction().getJumpTableInfo()->getEntrySize(TD); 3239 3240 Index = DAG.getNode(ISD::MUL, dl, PTy, 3241 Index, DAG.getConstant(EntrySize, PTy)); 3242 SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Index, Table); 3243 3244 EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), EntrySize * 8); 3245 SDValue LD = DAG.getExtLoad(ISD::SEXTLOAD, dl, PTy, Chain, Addr, 3246 MachinePointerInfo::getJumpTable(), MemVT, 3247 false, false, 0); 3248 Addr = LD; 3249 if (TM.getRelocationModel() == Reloc::PIC_) { 3250 // For PIC, the sequence is: 3251 // BRIND(load(Jumptable + index) + RelocBase) 3252 // RelocBase can be JumpTable, GOT or some sort of global base. 3253 Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr, 3254 TLI.getPICJumpTableRelocBase(Table, DAG)); 3255 } 3256 Tmp1 = DAG.getNode(ISD::BRIND, dl, MVT::Other, LD.getValue(1), Addr); 3257 Results.push_back(Tmp1); 3258 break; 3259 } 3260 case ISD::BRCOND: 3261 // Expand brcond's setcc into its constituent parts and create a BR_CC 3262 // Node. 3263 Tmp1 = Node->getOperand(0); 3264 Tmp2 = Node->getOperand(1); 3265 if (Tmp2.getOpcode() == ISD::SETCC) { 3266 Tmp1 = DAG.getNode(ISD::BR_CC, dl, MVT::Other, 3267 Tmp1, Tmp2.getOperand(2), 3268 Tmp2.getOperand(0), Tmp2.getOperand(1), 3269 Node->getOperand(2)); 3270 } else { 3271 // We test only the i1 bit. Skip the AND if UNDEF. 3272 Tmp3 = (Tmp2.getOpcode() == ISD::UNDEF) ? Tmp2 : 3273 DAG.getNode(ISD::AND, dl, Tmp2.getValueType(), Tmp2, 3274 DAG.getConstant(1, Tmp2.getValueType())); 3275 Tmp1 = DAG.getNode(ISD::BR_CC, dl, MVT::Other, Tmp1, 3276 DAG.getCondCode(ISD::SETNE), Tmp3, 3277 DAG.getConstant(0, Tmp3.getValueType()), 3278 Node->getOperand(2)); 3279 } 3280 Results.push_back(Tmp1); 3281 break; 3282 case ISD::SETCC: { 3283 Tmp1 = Node->getOperand(0); 3284 Tmp2 = Node->getOperand(1); 3285 Tmp3 = Node->getOperand(2); 3286 LegalizeSetCCCondCode(Node->getValueType(0), Tmp1, Tmp2, Tmp3, dl); 3287 3288 // If we expanded the SETCC into an AND/OR, return the new node 3289 if (Tmp2.getNode() == 0) { 3290 Results.push_back(Tmp1); 3291 break; 3292 } 3293 3294 // Otherwise, SETCC for the given comparison type must be completely 3295 // illegal; expand it into a SELECT_CC. 3296 EVT VT = Node->getValueType(0); 3297 Tmp1 = DAG.getNode(ISD::SELECT_CC, dl, VT, Tmp1, Tmp2, 3298 DAG.getConstant(1, VT), DAG.getConstant(0, VT), Tmp3); 3299 Results.push_back(Tmp1); 3300 break; 3301 } 3302 case ISD::SELECT_CC: { 3303 Tmp1 = Node->getOperand(0); // LHS 3304 Tmp2 = Node->getOperand(1); // RHS 3305 Tmp3 = Node->getOperand(2); // True 3306 Tmp4 = Node->getOperand(3); // False 3307 SDValue CC = Node->getOperand(4); 3308 3309 LegalizeSetCCCondCode(TLI.getSetCCResultType(Tmp1.getValueType()), 3310 Tmp1, Tmp2, CC, dl); 3311 3312 assert(!Tmp2.getNode() && "Can't legalize SELECT_CC with legal condition!"); 3313 Tmp2 = DAG.getConstant(0, Tmp1.getValueType()); 3314 CC = DAG.getCondCode(ISD::SETNE); 3315 Tmp1 = DAG.getNode(ISD::SELECT_CC, dl, Node->getValueType(0), Tmp1, Tmp2, 3316 Tmp3, Tmp4, CC); 3317 Results.push_back(Tmp1); 3318 break; 3319 } 3320 case ISD::BR_CC: { 3321 Tmp1 = Node->getOperand(0); // Chain 3322 Tmp2 = Node->getOperand(2); // LHS 3323 Tmp3 = Node->getOperand(3); // RHS 3324 Tmp4 = Node->getOperand(1); // CC 3325 3326 LegalizeSetCCCondCode(TLI.getSetCCResultType(Tmp2.getValueType()), 3327 Tmp2, Tmp3, Tmp4, dl); 3328 3329 assert(!Tmp3.getNode() && "Can't legalize BR_CC with legal condition!"); 3330 Tmp3 = DAG.getConstant(0, Tmp2.getValueType()); 3331 Tmp4 = DAG.getCondCode(ISD::SETNE); 3332 Tmp1 = DAG.getNode(ISD::BR_CC, dl, Node->getValueType(0), Tmp1, Tmp4, Tmp2, 3333 Tmp3, Node->getOperand(4)); 3334 Results.push_back(Tmp1); 3335 break; 3336 } 3337 case ISD::BUILD_VECTOR: 3338 Results.push_back(ExpandBUILD_VECTOR(Node)); 3339 break; 3340 case ISD::SRA: 3341 case ISD::SRL: 3342 case ISD::SHL: { 3343 // Scalarize vector SRA/SRL/SHL. 3344 EVT VT = Node->getValueType(0); 3345 assert(VT.isVector() && "Unable to legalize non-vector shift"); 3346 assert(TLI.isTypeLegal(VT.getScalarType())&& "Element type must be legal"); 3347 unsigned NumElem = VT.getVectorNumElements(); 3348 3349 SmallVector<SDValue, 8> Scalars; 3350 for (unsigned Idx = 0; Idx < NumElem; Idx++) { 3351 SDValue Ex = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, 3352 VT.getScalarType(), 3353 Node->getOperand(0), DAG.getIntPtrConstant(Idx)); 3354 SDValue Sh = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, 3355 VT.getScalarType(), 3356 Node->getOperand(1), DAG.getIntPtrConstant(Idx)); 3357 Scalars.push_back(DAG.getNode(Node->getOpcode(), dl, 3358 VT.getScalarType(), Ex, Sh)); 3359 } 3360 SDValue Result = 3361 DAG.getNode(ISD::BUILD_VECTOR, dl, Node->getValueType(0), 3362 &Scalars[0], Scalars.size()); 3363 DAG.ReplaceAllUsesWith(SDValue(Node, 0), Result, this); 3364 break; 3365 } 3366 case ISD::GLOBAL_OFFSET_TABLE: 3367 case ISD::GlobalAddress: 3368 case ISD::GlobalTLSAddress: 3369 case ISD::ExternalSymbol: 3370 case ISD::ConstantPool: 3371 case ISD::JumpTable: 3372 case ISD::INTRINSIC_W_CHAIN: 3373 case ISD::INTRINSIC_WO_CHAIN: 3374 case ISD::INTRINSIC_VOID: 3375 // FIXME: Custom lowering for these operations shouldn't return null! 3376 break; 3377 } 3378 3379 // Replace the original node with the legalized result. 3380 if (!Results.empty()) 3381 DAG.ReplaceAllUsesWith(Node, Results.data(), this); 3382} 3383 3384void SelectionDAGLegalize::PromoteNode(SDNode *Node) { 3385 SmallVector<SDValue, 8> Results; 3386 EVT OVT = Node->getValueType(0); 3387 if (Node->getOpcode() == ISD::UINT_TO_FP || 3388 Node->getOpcode() == ISD::SINT_TO_FP || 3389 Node->getOpcode() == ISD::SETCC) { 3390 OVT = Node->getOperand(0).getValueType(); 3391 } 3392 EVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), OVT); 3393 DebugLoc dl = Node->getDebugLoc(); 3394 SDValue Tmp1, Tmp2, Tmp3; 3395 switch (Node->getOpcode()) { 3396 case ISD::CTTZ: 3397 case ISD::CTLZ: 3398 case ISD::CTPOP: 3399 // Zero extend the argument. 3400 Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, Node->getOperand(0)); 3401 // Perform the larger operation. 3402 Tmp1 = DAG.getNode(Node->getOpcode(), dl, NVT, Tmp1); 3403 if (Node->getOpcode() == ISD::CTTZ) { 3404 //if Tmp1 == sizeinbits(NVT) then Tmp1 = sizeinbits(Old VT) 3405 Tmp2 = DAG.getSetCC(dl, TLI.getSetCCResultType(NVT), 3406 Tmp1, DAG.getConstant(NVT.getSizeInBits(), NVT), 3407 ISD::SETEQ); 3408 Tmp1 = DAG.getNode(ISD::SELECT, dl, NVT, Tmp2, 3409 DAG.getConstant(OVT.getSizeInBits(), NVT), Tmp1); 3410 } else if (Node->getOpcode() == ISD::CTLZ) { 3411 // Tmp1 = Tmp1 - (sizeinbits(NVT) - sizeinbits(Old VT)) 3412 Tmp1 = DAG.getNode(ISD::SUB, dl, NVT, Tmp1, 3413 DAG.getConstant(NVT.getSizeInBits() - 3414 OVT.getSizeInBits(), NVT)); 3415 } 3416 Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, OVT, Tmp1)); 3417 break; 3418 case ISD::BSWAP: { 3419 unsigned DiffBits = NVT.getSizeInBits() - OVT.getSizeInBits(); 3420 Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, Node->getOperand(0)); 3421 Tmp1 = DAG.getNode(ISD::BSWAP, dl, NVT, Tmp1); 3422 Tmp1 = DAG.getNode(ISD::SRL, dl, NVT, Tmp1, 3423 DAG.getConstant(DiffBits, TLI.getShiftAmountTy(NVT))); 3424 Results.push_back(Tmp1); 3425 break; 3426 } 3427 case ISD::FP_TO_UINT: 3428 case ISD::FP_TO_SINT: 3429 Tmp1 = PromoteLegalFP_TO_INT(Node->getOperand(0), Node->getValueType(0), 3430 Node->getOpcode() == ISD::FP_TO_SINT, dl); 3431 Results.push_back(Tmp1); 3432 break; 3433 case ISD::UINT_TO_FP: 3434 case ISD::SINT_TO_FP: 3435 Tmp1 = PromoteLegalINT_TO_FP(Node->getOperand(0), Node->getValueType(0), 3436 Node->getOpcode() == ISD::SINT_TO_FP, dl); 3437 Results.push_back(Tmp1); 3438 break; 3439 case ISD::AND: 3440 case ISD::OR: 3441 case ISD::XOR: { 3442 unsigned ExtOp, TruncOp; 3443 if (OVT.isVector()) { 3444 ExtOp = ISD::BITCAST; 3445 TruncOp = ISD::BITCAST; 3446 } else { 3447 assert(OVT.isInteger() && "Cannot promote logic operation"); 3448 ExtOp = ISD::ANY_EXTEND; 3449 TruncOp = ISD::TRUNCATE; 3450 } 3451 // Promote each of the values to the new type. 3452 Tmp1 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(0)); 3453 Tmp2 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(1)); 3454 // Perform the larger operation, then convert back 3455 Tmp1 = DAG.getNode(Node->getOpcode(), dl, NVT, Tmp1, Tmp2); 3456 Results.push_back(DAG.getNode(TruncOp, dl, OVT, Tmp1)); 3457 break; 3458 } 3459 case ISD::SELECT: { 3460 unsigned ExtOp, TruncOp; 3461 if (Node->getValueType(0).isVector()) { 3462 ExtOp = ISD::BITCAST; 3463 TruncOp = ISD::BITCAST; 3464 } else if (Node->getValueType(0).isInteger()) { 3465 ExtOp = ISD::ANY_EXTEND; 3466 TruncOp = ISD::TRUNCATE; 3467 } else { 3468 ExtOp = ISD::FP_EXTEND; 3469 TruncOp = ISD::FP_ROUND; 3470 } 3471 Tmp1 = Node->getOperand(0); 3472 // Promote each of the values to the new type. 3473 Tmp2 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(1)); 3474 Tmp3 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(2)); 3475 // Perform the larger operation, then round down. 3476 Tmp1 = DAG.getNode(ISD::SELECT, dl, NVT, Tmp1, Tmp2, Tmp3); 3477 if (TruncOp != ISD::FP_ROUND) 3478 Tmp1 = DAG.getNode(TruncOp, dl, Node->getValueType(0), Tmp1); 3479 else 3480 Tmp1 = DAG.getNode(TruncOp, dl, Node->getValueType(0), Tmp1, 3481 DAG.getIntPtrConstant(0)); 3482 Results.push_back(Tmp1); 3483 break; 3484 } 3485 case ISD::VECTOR_SHUFFLE: { 3486 SmallVector<int, 8> Mask; 3487 cast<ShuffleVectorSDNode>(Node)->getMask(Mask); 3488 3489 // Cast the two input vectors. 3490 Tmp1 = DAG.getNode(ISD::BITCAST, dl, NVT, Node->getOperand(0)); 3491 Tmp2 = DAG.getNode(ISD::BITCAST, dl, NVT, Node->getOperand(1)); 3492 3493 // Convert the shuffle mask to the right # elements. 3494 Tmp1 = ShuffleWithNarrowerEltType(NVT, OVT, dl, Tmp1, Tmp2, Mask); 3495 Tmp1 = DAG.getNode(ISD::BITCAST, dl, OVT, Tmp1); 3496 Results.push_back(Tmp1); 3497 break; 3498 } 3499 case ISD::SETCC: { 3500 unsigned ExtOp = ISD::FP_EXTEND; 3501 if (NVT.isInteger()) { 3502 ISD::CondCode CCCode = 3503 cast<CondCodeSDNode>(Node->getOperand(2))->get(); 3504 ExtOp = isSignedIntSetCC(CCCode) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 3505 } 3506 Tmp1 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(0)); 3507 Tmp2 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(1)); 3508 Results.push_back(DAG.getNode(ISD::SETCC, dl, Node->getValueType(0), 3509 Tmp1, Tmp2, Node->getOperand(2))); 3510 break; 3511 } 3512 } 3513 3514 // Replace the original node with the legalized result. 3515 if (!Results.empty()) 3516 DAG.ReplaceAllUsesWith(Node, Results.data(), this); 3517} 3518 3519// SelectionDAG::Legalize - This is the entry point for the file. 3520// 3521void SelectionDAG::Legalize() { 3522 /// run - This is the main entry point to this class. 3523 /// 3524 SelectionDAGLegalize(*this).LegalizeDAG(); 3525} 3526