LegalizeDAG.cpp revision 1809d5fa216bbdc505502468b7bd85629e1e44bc
1//===-- LegalizeDAG.cpp - Implement SelectionDAG::Legalize ----------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the SelectionDAG::Legalize method. 11// 12//===----------------------------------------------------------------------===// 13 14#include "llvm/Analysis/DebugInfo.h" 15#include "llvm/CodeGen/Analysis.h" 16#include "llvm/CodeGen/MachineFunction.h" 17#include "llvm/CodeGen/MachineFrameInfo.h" 18#include "llvm/CodeGen/MachineJumpTableInfo.h" 19#include "llvm/CodeGen/MachineModuleInfo.h" 20#include "llvm/CodeGen/PseudoSourceValue.h" 21#include "llvm/CodeGen/SelectionDAG.h" 22#include "llvm/Target/TargetFrameLowering.h" 23#include "llvm/Target/TargetLowering.h" 24#include "llvm/Target/TargetData.h" 25#include "llvm/Target/TargetMachine.h" 26#include "llvm/Target/TargetOptions.h" 27#include "llvm/CallingConv.h" 28#include "llvm/Constants.h" 29#include "llvm/DerivedTypes.h" 30#include "llvm/Function.h" 31#include "llvm/GlobalVariable.h" 32#include "llvm/LLVMContext.h" 33#include "llvm/Support/CommandLine.h" 34#include "llvm/Support/Debug.h" 35#include "llvm/Support/ErrorHandling.h" 36#include "llvm/Support/MathExtras.h" 37#include "llvm/Support/raw_ostream.h" 38#include "llvm/ADT/DenseMap.h" 39#include "llvm/ADT/SmallVector.h" 40#include "llvm/ADT/SmallPtrSet.h" 41using namespace llvm; 42 43//===----------------------------------------------------------------------===// 44/// SelectionDAGLegalize - This takes an arbitrary SelectionDAG as input and 45/// hacks on it until the target machine can handle it. This involves 46/// eliminating value sizes the machine cannot handle (promoting small sizes to 47/// large sizes or splitting up large values into small values) as well as 48/// eliminating operations the machine cannot handle. 49/// 50/// This code also does a small amount of optimization and recognition of idioms 51/// as part of its processing. For example, if a target does not support a 52/// 'setcc' instruction efficiently, but does support 'brcc' instruction, this 53/// will attempt merge setcc and brc instructions into brcc's. 54/// 55namespace { 56class SelectionDAGLegalize { 57 const TargetMachine &TM; 58 const TargetLowering &TLI; 59 SelectionDAG &DAG; 60 CodeGenOpt::Level OptLevel; 61 62 // Libcall insertion helpers. 63 64 /// LastCALLSEQ_END - This keeps track of the CALLSEQ_END node that has been 65 /// legalized. We use this to ensure that calls are properly serialized 66 /// against each other, including inserted libcalls. 67 SDValue LastCALLSEQ_END; 68 69 /// IsLegalizingCall - This member is used *only* for purposes of providing 70 /// helpful assertions that a libcall isn't created while another call is 71 /// being legalized (which could lead to non-serialized call sequences). 72 bool IsLegalizingCall; 73 74 enum LegalizeAction { 75 Legal, // The target natively supports this operation. 76 Promote, // This operation should be executed in a larger type. 77 Expand // Try to expand this to other ops, otherwise use a libcall. 78 }; 79 80 /// ValueTypeActions - This is a bitvector that contains two bits for each 81 /// value type, where the two bits correspond to the LegalizeAction enum. 82 /// This can be queried with "getTypeAction(VT)". 83 TargetLowering::ValueTypeActionImpl ValueTypeActions; 84 85 /// LegalizedNodes - For nodes that are of legal width, and that have more 86 /// than one use, this map indicates what regularized operand to use. This 87 /// allows us to avoid legalizing the same thing more than once. 88 DenseMap<SDValue, SDValue> LegalizedNodes; 89 90 void AddLegalizedOperand(SDValue From, SDValue To) { 91 LegalizedNodes.insert(std::make_pair(From, To)); 92 // If someone requests legalization of the new node, return itself. 93 if (From != To) 94 LegalizedNodes.insert(std::make_pair(To, To)); 95 96 // Transfer SDDbgValues. 97 DAG.TransferDbgValues(From, To); 98 } 99 100public: 101 SelectionDAGLegalize(SelectionDAG &DAG, CodeGenOpt::Level ol); 102 103 /// getTypeAction - Return how we should legalize values of this type, either 104 /// it is already legal or we need to expand it into multiple registers of 105 /// smaller integer type, or we need to promote it to a larger type. 106 LegalizeAction getTypeAction(EVT VT) const { 107 return (LegalizeAction)ValueTypeActions.getTypeAction(VT); 108 } 109 110 /// isTypeLegal - Return true if this type is legal on this target. 111 /// 112 bool isTypeLegal(EVT VT) const { 113 return getTypeAction(VT) == Legal; 114 } 115 116 void LegalizeDAG(); 117 118private: 119 /// LegalizeOp - We know that the specified value has a legal type. 120 /// Recursively ensure that the operands have legal types, then return the 121 /// result. 122 SDValue LegalizeOp(SDValue O); 123 124 SDValue OptimizeFloatStore(StoreSDNode *ST); 125 126 /// PerformInsertVectorEltInMemory - Some target cannot handle a variable 127 /// insertion index for the INSERT_VECTOR_ELT instruction. In this case, it 128 /// is necessary to spill the vector being inserted into to memory, perform 129 /// the insert there, and then read the result back. 130 SDValue PerformInsertVectorEltInMemory(SDValue Vec, SDValue Val, 131 SDValue Idx, DebugLoc dl); 132 SDValue ExpandINSERT_VECTOR_ELT(SDValue Vec, SDValue Val, 133 SDValue Idx, DebugLoc dl); 134 135 /// ShuffleWithNarrowerEltType - Return a vector shuffle operation which 136 /// performs the same shuffe in terms of order or result bytes, but on a type 137 /// whose vector element type is narrower than the original shuffle type. 138 /// e.g. <v4i32> <0, 1, 0, 1> -> v8i16 <0, 1, 2, 3, 0, 1, 2, 3> 139 SDValue ShuffleWithNarrowerEltType(EVT NVT, EVT VT, DebugLoc dl, 140 SDValue N1, SDValue N2, 141 SmallVectorImpl<int> &Mask) const; 142 143 bool LegalizeAllNodesNotLeadingTo(SDNode *N, SDNode *Dest, 144 SmallPtrSet<SDNode*, 32> &NodesLeadingTo); 145 146 void LegalizeSetCCCondCode(EVT VT, SDValue &LHS, SDValue &RHS, SDValue &CC, 147 DebugLoc dl); 148 149 SDValue ExpandLibCall(RTLIB::Libcall LC, SDNode *Node, bool isSigned); 150 std::pair<SDValue, SDValue> ExpandChainLibCall(RTLIB::Libcall LC, 151 SDNode *Node, bool isSigned); 152 SDValue ExpandFPLibCall(SDNode *Node, RTLIB::Libcall Call_F32, 153 RTLIB::Libcall Call_F64, RTLIB::Libcall Call_F80, 154 RTLIB::Libcall Call_PPCF128); 155 SDValue ExpandIntLibCall(SDNode *Node, bool isSigned, 156 RTLIB::Libcall Call_I8, 157 RTLIB::Libcall Call_I16, 158 RTLIB::Libcall Call_I32, 159 RTLIB::Libcall Call_I64, 160 RTLIB::Libcall Call_I128); 161 SDValue ExpandDivRemLibCall(SDNode *Node, bool isSigned, bool isDIV); 162 163 SDValue EmitStackConvert(SDValue SrcOp, EVT SlotVT, EVT DestVT, DebugLoc dl); 164 SDValue ExpandBUILD_VECTOR(SDNode *Node); 165 SDValue ExpandSCALAR_TO_VECTOR(SDNode *Node); 166 void ExpandDYNAMIC_STACKALLOC(SDNode *Node, 167 SmallVectorImpl<SDValue> &Results); 168 SDValue ExpandFCOPYSIGN(SDNode *Node); 169 SDValue ExpandLegalINT_TO_FP(bool isSigned, SDValue LegalOp, EVT DestVT, 170 DebugLoc dl); 171 SDValue PromoteLegalINT_TO_FP(SDValue LegalOp, EVT DestVT, bool isSigned, 172 DebugLoc dl); 173 SDValue PromoteLegalFP_TO_INT(SDValue LegalOp, EVT DestVT, bool isSigned, 174 DebugLoc dl); 175 176 SDValue ExpandBSWAP(SDValue Op, DebugLoc dl); 177 SDValue ExpandBitCount(unsigned Opc, SDValue Op, DebugLoc dl); 178 179 SDValue ExpandExtractFromVectorThroughStack(SDValue Op); 180 SDValue ExpandInsertToVectorThroughStack(SDValue Op); 181 SDValue ExpandVectorBuildThroughStack(SDNode* Node); 182 183 std::pair<SDValue, SDValue> ExpandAtomic(SDNode *Node); 184 185 void ExpandNode(SDNode *Node, SmallVectorImpl<SDValue> &Results); 186 void PromoteNode(SDNode *Node, SmallVectorImpl<SDValue> &Results); 187}; 188} 189 190/// ShuffleWithNarrowerEltType - Return a vector shuffle operation which 191/// performs the same shuffe in terms of order or result bytes, but on a type 192/// whose vector element type is narrower than the original shuffle type. 193/// e.g. <v4i32> <0, 1, 0, 1> -> v8i16 <0, 1, 2, 3, 0, 1, 2, 3> 194SDValue 195SelectionDAGLegalize::ShuffleWithNarrowerEltType(EVT NVT, EVT VT, DebugLoc dl, 196 SDValue N1, SDValue N2, 197 SmallVectorImpl<int> &Mask) const { 198 unsigned NumMaskElts = VT.getVectorNumElements(); 199 unsigned NumDestElts = NVT.getVectorNumElements(); 200 unsigned NumEltsGrowth = NumDestElts / NumMaskElts; 201 202 assert(NumEltsGrowth && "Cannot promote to vector type with fewer elts!"); 203 204 if (NumEltsGrowth == 1) 205 return DAG.getVectorShuffle(NVT, dl, N1, N2, &Mask[0]); 206 207 SmallVector<int, 8> NewMask; 208 for (unsigned i = 0; i != NumMaskElts; ++i) { 209 int Idx = Mask[i]; 210 for (unsigned j = 0; j != NumEltsGrowth; ++j) { 211 if (Idx < 0) 212 NewMask.push_back(-1); 213 else 214 NewMask.push_back(Idx * NumEltsGrowth + j); 215 } 216 } 217 assert(NewMask.size() == NumDestElts && "Non-integer NumEltsGrowth?"); 218 assert(TLI.isShuffleMaskLegal(NewMask, NVT) && "Shuffle not legal?"); 219 return DAG.getVectorShuffle(NVT, dl, N1, N2, &NewMask[0]); 220} 221 222SelectionDAGLegalize::SelectionDAGLegalize(SelectionDAG &dag, 223 CodeGenOpt::Level ol) 224 : TM(dag.getTarget()), TLI(dag.getTargetLoweringInfo()), 225 DAG(dag), OptLevel(ol), 226 ValueTypeActions(TLI.getValueTypeActions()) { 227 assert(MVT::LAST_VALUETYPE <= MVT::MAX_ALLOWED_VALUETYPE && 228 "Too many value types for ValueTypeActions to hold!"); 229} 230 231void SelectionDAGLegalize::LegalizeDAG() { 232 LastCALLSEQ_END = DAG.getEntryNode(); 233 IsLegalizingCall = false; 234 235 // The legalize process is inherently a bottom-up recursive process (users 236 // legalize their uses before themselves). Given infinite stack space, we 237 // could just start legalizing on the root and traverse the whole graph. In 238 // practice however, this causes us to run out of stack space on large basic 239 // blocks. To avoid this problem, compute an ordering of the nodes where each 240 // node is only legalized after all of its operands are legalized. 241 DAG.AssignTopologicalOrder(); 242 for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(), 243 E = prior(DAG.allnodes_end()); I != llvm::next(E); ++I) 244 LegalizeOp(SDValue(I, 0)); 245 246 // Finally, it's possible the root changed. Get the new root. 247 SDValue OldRoot = DAG.getRoot(); 248 assert(LegalizedNodes.count(OldRoot) && "Root didn't get legalized?"); 249 DAG.setRoot(LegalizedNodes[OldRoot]); 250 251 LegalizedNodes.clear(); 252 253 // Remove dead nodes now. 254 DAG.RemoveDeadNodes(); 255} 256 257 258/// FindCallEndFromCallStart - Given a chained node that is part of a call 259/// sequence, find the CALLSEQ_END node that terminates the call sequence. 260static SDNode *FindCallEndFromCallStart(SDNode *Node, int depth = 0) { 261 // Nested CALLSEQ_START/END constructs aren't yet legal, 262 // but we can DTRT and handle them correctly here. 263 if (Node->getOpcode() == ISD::CALLSEQ_START) 264 depth++; 265 else if (Node->getOpcode() == ISD::CALLSEQ_END) { 266 depth--; 267 if (depth == 0) 268 return Node; 269 } 270 if (Node->use_empty()) 271 return 0; // No CallSeqEnd 272 273 // The chain is usually at the end. 274 SDValue TheChain(Node, Node->getNumValues()-1); 275 if (TheChain.getValueType() != MVT::Other) { 276 // Sometimes it's at the beginning. 277 TheChain = SDValue(Node, 0); 278 if (TheChain.getValueType() != MVT::Other) { 279 // Otherwise, hunt for it. 280 for (unsigned i = 1, e = Node->getNumValues(); i != e; ++i) 281 if (Node->getValueType(i) == MVT::Other) { 282 TheChain = SDValue(Node, i); 283 break; 284 } 285 286 // Otherwise, we walked into a node without a chain. 287 if (TheChain.getValueType() != MVT::Other) 288 return 0; 289 } 290 } 291 292 for (SDNode::use_iterator UI = Node->use_begin(), 293 E = Node->use_end(); UI != E; ++UI) { 294 295 // Make sure to only follow users of our token chain. 296 SDNode *User = *UI; 297 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) 298 if (User->getOperand(i) == TheChain) 299 if (SDNode *Result = FindCallEndFromCallStart(User, depth)) 300 return Result; 301 } 302 return 0; 303} 304 305/// FindCallStartFromCallEnd - Given a chained node that is part of a call 306/// sequence, find the CALLSEQ_START node that initiates the call sequence. 307static SDNode *FindCallStartFromCallEnd(SDNode *Node) { 308 int nested = 0; 309 assert(Node && "Didn't find callseq_start for a call??"); 310 while (Node->getOpcode() != ISD::CALLSEQ_START || nested) { 311 Node = Node->getOperand(0).getNode(); 312 assert(Node->getOperand(0).getValueType() == MVT::Other && 313 "Node doesn't have a token chain argument!"); 314 switch (Node->getOpcode()) { 315 default: 316 break; 317 case ISD::CALLSEQ_START: 318 if (!nested) 319 return Node; 320 nested--; 321 break; 322 case ISD::CALLSEQ_END: 323 nested++; 324 break; 325 } 326 } 327 return 0; 328} 329 330/// LegalizeAllNodesNotLeadingTo - Recursively walk the uses of N, looking to 331/// see if any uses can reach Dest. If no dest operands can get to dest, 332/// legalize them, legalize ourself, and return false, otherwise, return true. 333/// 334/// Keep track of the nodes we fine that actually do lead to Dest in 335/// NodesLeadingTo. This avoids retraversing them exponential number of times. 336/// 337bool SelectionDAGLegalize::LegalizeAllNodesNotLeadingTo(SDNode *N, SDNode *Dest, 338 SmallPtrSet<SDNode*, 32> &NodesLeadingTo) { 339 if (N == Dest) return true; // N certainly leads to Dest :) 340 341 // If we've already processed this node and it does lead to Dest, there is no 342 // need to reprocess it. 343 if (NodesLeadingTo.count(N)) return true; 344 345 // If the first result of this node has been already legalized, then it cannot 346 // reach N. 347 if (LegalizedNodes.count(SDValue(N, 0))) return false; 348 349 // Okay, this node has not already been legalized. Check and legalize all 350 // operands. If none lead to Dest, then we can legalize this node. 351 bool OperandsLeadToDest = false; 352 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) 353 OperandsLeadToDest |= // If an operand leads to Dest, so do we. 354 LegalizeAllNodesNotLeadingTo(N->getOperand(i).getNode(), Dest, 355 NodesLeadingTo); 356 357 if (OperandsLeadToDest) { 358 NodesLeadingTo.insert(N); 359 return true; 360 } 361 362 // Okay, this node looks safe, legalize it and return false. 363 LegalizeOp(SDValue(N, 0)); 364 return false; 365} 366 367/// ExpandConstantFP - Expands the ConstantFP node to an integer constant or 368/// a load from the constant pool. 369static SDValue ExpandConstantFP(ConstantFPSDNode *CFP, bool UseCP, 370 SelectionDAG &DAG, const TargetLowering &TLI) { 371 bool Extend = false; 372 DebugLoc dl = CFP->getDebugLoc(); 373 374 // If a FP immediate is precise when represented as a float and if the 375 // target can do an extending load from float to double, we put it into 376 // the constant pool as a float, even if it's is statically typed as a 377 // double. This shrinks FP constants and canonicalizes them for targets where 378 // an FP extending load is the same cost as a normal load (such as on the x87 379 // fp stack or PPC FP unit). 380 EVT VT = CFP->getValueType(0); 381 ConstantFP *LLVMC = const_cast<ConstantFP*>(CFP->getConstantFPValue()); 382 if (!UseCP) { 383 assert((VT == MVT::f64 || VT == MVT::f32) && "Invalid type expansion"); 384 return DAG.getConstant(LLVMC->getValueAPF().bitcastToAPInt(), 385 (VT == MVT::f64) ? MVT::i64 : MVT::i32); 386 } 387 388 EVT OrigVT = VT; 389 EVT SVT = VT; 390 while (SVT != MVT::f32) { 391 SVT = (MVT::SimpleValueType)(SVT.getSimpleVT().SimpleTy - 1); 392 if (ConstantFPSDNode::isValueValidForType(SVT, CFP->getValueAPF()) && 393 // Only do this if the target has a native EXTLOAD instruction from 394 // smaller type. 395 TLI.isLoadExtLegal(ISD::EXTLOAD, SVT) && 396 TLI.ShouldShrinkFPConstant(OrigVT)) { 397 const Type *SType = SVT.getTypeForEVT(*DAG.getContext()); 398 LLVMC = cast<ConstantFP>(ConstantExpr::getFPTrunc(LLVMC, SType)); 399 VT = SVT; 400 Extend = true; 401 } 402 } 403 404 SDValue CPIdx = DAG.getConstantPool(LLVMC, TLI.getPointerTy()); 405 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment(); 406 if (Extend) 407 return DAG.getExtLoad(ISD::EXTLOAD, dl, OrigVT, 408 DAG.getEntryNode(), 409 CPIdx, MachinePointerInfo::getConstantPool(), 410 VT, false, false, Alignment); 411 return DAG.getLoad(OrigVT, dl, DAG.getEntryNode(), CPIdx, 412 MachinePointerInfo::getConstantPool(), false, false, 413 Alignment); 414} 415 416/// ExpandUnalignedStore - Expands an unaligned store to 2 half-size stores. 417static 418SDValue ExpandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG, 419 const TargetLowering &TLI) { 420 SDValue Chain = ST->getChain(); 421 SDValue Ptr = ST->getBasePtr(); 422 SDValue Val = ST->getValue(); 423 EVT VT = Val.getValueType(); 424 int Alignment = ST->getAlignment(); 425 DebugLoc dl = ST->getDebugLoc(); 426 if (ST->getMemoryVT().isFloatingPoint() || 427 ST->getMemoryVT().isVector()) { 428 EVT intVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits()); 429 if (TLI.isTypeLegal(intVT)) { 430 // Expand to a bitconvert of the value to the integer type of the 431 // same size, then a (misaligned) int store. 432 // FIXME: Does not handle truncating floating point stores! 433 SDValue Result = DAG.getNode(ISD::BITCAST, dl, intVT, Val); 434 return DAG.getStore(Chain, dl, Result, Ptr, ST->getPointerInfo(), 435 ST->isVolatile(), ST->isNonTemporal(), Alignment); 436 } else { 437 // Do a (aligned) store to a stack slot, then copy from the stack slot 438 // to the final destination using (unaligned) integer loads and stores. 439 EVT StoredVT = ST->getMemoryVT(); 440 EVT RegVT = 441 TLI.getRegisterType(*DAG.getContext(), 442 EVT::getIntegerVT(*DAG.getContext(), 443 StoredVT.getSizeInBits())); 444 unsigned StoredBytes = StoredVT.getSizeInBits() / 8; 445 unsigned RegBytes = RegVT.getSizeInBits() / 8; 446 unsigned NumRegs = (StoredBytes + RegBytes - 1) / RegBytes; 447 448 // Make sure the stack slot is also aligned for the register type. 449 SDValue StackPtr = DAG.CreateStackTemporary(StoredVT, RegVT); 450 451 // Perform the original store, only redirected to the stack slot. 452 SDValue Store = DAG.getTruncStore(Chain, dl, 453 Val, StackPtr, MachinePointerInfo(), 454 StoredVT, false, false, 0); 455 SDValue Increment = DAG.getConstant(RegBytes, TLI.getPointerTy()); 456 SmallVector<SDValue, 8> Stores; 457 unsigned Offset = 0; 458 459 // Do all but one copies using the full register width. 460 for (unsigned i = 1; i < NumRegs; i++) { 461 // Load one integer register's worth from the stack slot. 462 SDValue Load = DAG.getLoad(RegVT, dl, Store, StackPtr, 463 MachinePointerInfo(), 464 false, false, 0); 465 // Store it to the final location. Remember the store. 466 Stores.push_back(DAG.getStore(Load.getValue(1), dl, Load, Ptr, 467 ST->getPointerInfo().getWithOffset(Offset), 468 ST->isVolatile(), ST->isNonTemporal(), 469 MinAlign(ST->getAlignment(), Offset))); 470 // Increment the pointers. 471 Offset += RegBytes; 472 StackPtr = DAG.getNode(ISD::ADD, dl, StackPtr.getValueType(), StackPtr, 473 Increment); 474 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 475 } 476 477 // The last store may be partial. Do a truncating store. On big-endian 478 // machines this requires an extending load from the stack slot to ensure 479 // that the bits are in the right place. 480 EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), 481 8 * (StoredBytes - Offset)); 482 483 // Load from the stack slot. 484 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Store, StackPtr, 485 MachinePointerInfo(), 486 MemVT, false, false, 0); 487 488 Stores.push_back(DAG.getTruncStore(Load.getValue(1), dl, Load, Ptr, 489 ST->getPointerInfo() 490 .getWithOffset(Offset), 491 MemVT, ST->isVolatile(), 492 ST->isNonTemporal(), 493 MinAlign(ST->getAlignment(), Offset))); 494 // The order of the stores doesn't matter - say it with a TokenFactor. 495 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Stores[0], 496 Stores.size()); 497 } 498 } 499 assert(ST->getMemoryVT().isInteger() && 500 !ST->getMemoryVT().isVector() && 501 "Unaligned store of unknown type."); 502 // Get the half-size VT 503 EVT NewStoredVT = ST->getMemoryVT().getHalfSizedIntegerVT(*DAG.getContext()); 504 int NumBits = NewStoredVT.getSizeInBits(); 505 int IncrementSize = NumBits / 8; 506 507 // Divide the stored value in two parts. 508 SDValue ShiftAmount = DAG.getConstant(NumBits, 509 TLI.getShiftAmountTy(Val.getValueType())); 510 SDValue Lo = Val; 511 SDValue Hi = DAG.getNode(ISD::SRL, dl, VT, Val, ShiftAmount); 512 513 // Store the two parts 514 SDValue Store1, Store2; 515 Store1 = DAG.getTruncStore(Chain, dl, TLI.isLittleEndian()?Lo:Hi, Ptr, 516 ST->getPointerInfo(), NewStoredVT, 517 ST->isVolatile(), ST->isNonTemporal(), Alignment); 518 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, 519 DAG.getConstant(IncrementSize, TLI.getPointerTy())); 520 Alignment = MinAlign(Alignment, IncrementSize); 521 Store2 = DAG.getTruncStore(Chain, dl, TLI.isLittleEndian()?Hi:Lo, Ptr, 522 ST->getPointerInfo().getWithOffset(IncrementSize), 523 NewStoredVT, ST->isVolatile(), ST->isNonTemporal(), 524 Alignment); 525 526 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Store1, Store2); 527} 528 529/// ExpandUnalignedLoad - Expands an unaligned load to 2 half-size loads. 530static 531SDValue ExpandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG, 532 const TargetLowering &TLI) { 533 SDValue Chain = LD->getChain(); 534 SDValue Ptr = LD->getBasePtr(); 535 EVT VT = LD->getValueType(0); 536 EVT LoadedVT = LD->getMemoryVT(); 537 DebugLoc dl = LD->getDebugLoc(); 538 if (VT.isFloatingPoint() || VT.isVector()) { 539 EVT intVT = EVT::getIntegerVT(*DAG.getContext(), LoadedVT.getSizeInBits()); 540 if (TLI.isTypeLegal(intVT)) { 541 // Expand to a (misaligned) integer load of the same size, 542 // then bitconvert to floating point or vector. 543 SDValue newLoad = DAG.getLoad(intVT, dl, Chain, Ptr, LD->getPointerInfo(), 544 LD->isVolatile(), 545 LD->isNonTemporal(), LD->getAlignment()); 546 SDValue Result = DAG.getNode(ISD::BITCAST, dl, LoadedVT, newLoad); 547 if (VT.isFloatingPoint() && LoadedVT != VT) 548 Result = DAG.getNode(ISD::FP_EXTEND, dl, VT, Result); 549 550 SDValue Ops[] = { Result, Chain }; 551 return DAG.getMergeValues(Ops, 2, dl); 552 } 553 554 // Copy the value to a (aligned) stack slot using (unaligned) integer 555 // loads and stores, then do a (aligned) load from the stack slot. 556 EVT RegVT = TLI.getRegisterType(*DAG.getContext(), intVT); 557 unsigned LoadedBytes = LoadedVT.getSizeInBits() / 8; 558 unsigned RegBytes = RegVT.getSizeInBits() / 8; 559 unsigned NumRegs = (LoadedBytes + RegBytes - 1) / RegBytes; 560 561 // Make sure the stack slot is also aligned for the register type. 562 SDValue StackBase = DAG.CreateStackTemporary(LoadedVT, RegVT); 563 564 SDValue Increment = DAG.getConstant(RegBytes, TLI.getPointerTy()); 565 SmallVector<SDValue, 8> Stores; 566 SDValue StackPtr = StackBase; 567 unsigned Offset = 0; 568 569 // Do all but one copies using the full register width. 570 for (unsigned i = 1; i < NumRegs; i++) { 571 // Load one integer register's worth from the original location. 572 SDValue Load = DAG.getLoad(RegVT, dl, Chain, Ptr, 573 LD->getPointerInfo().getWithOffset(Offset), 574 LD->isVolatile(), LD->isNonTemporal(), 575 MinAlign(LD->getAlignment(), Offset)); 576 // Follow the load with a store to the stack slot. Remember the store. 577 Stores.push_back(DAG.getStore(Load.getValue(1), dl, Load, StackPtr, 578 MachinePointerInfo(), false, false, 0)); 579 // Increment the pointers. 580 Offset += RegBytes; 581 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 582 StackPtr = DAG.getNode(ISD::ADD, dl, StackPtr.getValueType(), StackPtr, 583 Increment); 584 } 585 586 // The last copy may be partial. Do an extending load. 587 EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), 588 8 * (LoadedBytes - Offset)); 589 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Chain, Ptr, 590 LD->getPointerInfo().getWithOffset(Offset), 591 MemVT, LD->isVolatile(), 592 LD->isNonTemporal(), 593 MinAlign(LD->getAlignment(), Offset)); 594 // Follow the load with a store to the stack slot. Remember the store. 595 // On big-endian machines this requires a truncating store to ensure 596 // that the bits end up in the right place. 597 Stores.push_back(DAG.getTruncStore(Load.getValue(1), dl, Load, StackPtr, 598 MachinePointerInfo(), MemVT, 599 false, false, 0)); 600 601 // The order of the stores doesn't matter - say it with a TokenFactor. 602 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Stores[0], 603 Stores.size()); 604 605 // Finally, perform the original load only redirected to the stack slot. 606 Load = DAG.getExtLoad(LD->getExtensionType(), dl, VT, TF, StackBase, 607 MachinePointerInfo(), LoadedVT, false, false, 0); 608 609 // Callers expect a MERGE_VALUES node. 610 SDValue Ops[] = { Load, TF }; 611 return DAG.getMergeValues(Ops, 2, dl); 612 } 613 assert(LoadedVT.isInteger() && !LoadedVT.isVector() && 614 "Unaligned load of unsupported type."); 615 616 // Compute the new VT that is half the size of the old one. This is an 617 // integer MVT. 618 unsigned NumBits = LoadedVT.getSizeInBits(); 619 EVT NewLoadedVT; 620 NewLoadedVT = EVT::getIntegerVT(*DAG.getContext(), NumBits/2); 621 NumBits >>= 1; 622 623 unsigned Alignment = LD->getAlignment(); 624 unsigned IncrementSize = NumBits / 8; 625 ISD::LoadExtType HiExtType = LD->getExtensionType(); 626 627 // If the original load is NON_EXTLOAD, the hi part load must be ZEXTLOAD. 628 if (HiExtType == ISD::NON_EXTLOAD) 629 HiExtType = ISD::ZEXTLOAD; 630 631 // Load the value in two parts 632 SDValue Lo, Hi; 633 if (TLI.isLittleEndian()) { 634 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, LD->getPointerInfo(), 635 NewLoadedVT, LD->isVolatile(), 636 LD->isNonTemporal(), Alignment); 637 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, 638 DAG.getConstant(IncrementSize, TLI.getPointerTy())); 639 Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, 640 LD->getPointerInfo().getWithOffset(IncrementSize), 641 NewLoadedVT, LD->isVolatile(), 642 LD->isNonTemporal(), MinAlign(Alignment,IncrementSize)); 643 } else { 644 Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, LD->getPointerInfo(), 645 NewLoadedVT, LD->isVolatile(), 646 LD->isNonTemporal(), Alignment); 647 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, 648 DAG.getConstant(IncrementSize, TLI.getPointerTy())); 649 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, 650 LD->getPointerInfo().getWithOffset(IncrementSize), 651 NewLoadedVT, LD->isVolatile(), 652 LD->isNonTemporal(), MinAlign(Alignment,IncrementSize)); 653 } 654 655 // aggregate the two parts 656 SDValue ShiftAmount = DAG.getConstant(NumBits, 657 TLI.getShiftAmountTy(Hi.getValueType())); 658 SDValue Result = DAG.getNode(ISD::SHL, dl, VT, Hi, ShiftAmount); 659 Result = DAG.getNode(ISD::OR, dl, VT, Result, Lo); 660 661 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1), 662 Hi.getValue(1)); 663 664 SDValue Ops[] = { Result, TF }; 665 return DAG.getMergeValues(Ops, 2, dl); 666} 667 668/// PerformInsertVectorEltInMemory - Some target cannot handle a variable 669/// insertion index for the INSERT_VECTOR_ELT instruction. In this case, it 670/// is necessary to spill the vector being inserted into to memory, perform 671/// the insert there, and then read the result back. 672SDValue SelectionDAGLegalize:: 673PerformInsertVectorEltInMemory(SDValue Vec, SDValue Val, SDValue Idx, 674 DebugLoc dl) { 675 SDValue Tmp1 = Vec; 676 SDValue Tmp2 = Val; 677 SDValue Tmp3 = Idx; 678 679 // If the target doesn't support this, we have to spill the input vector 680 // to a temporary stack slot, update the element, then reload it. This is 681 // badness. We could also load the value into a vector register (either 682 // with a "move to register" or "extload into register" instruction, then 683 // permute it into place, if the idx is a constant and if the idx is 684 // supported by the target. 685 EVT VT = Tmp1.getValueType(); 686 EVT EltVT = VT.getVectorElementType(); 687 EVT IdxVT = Tmp3.getValueType(); 688 EVT PtrVT = TLI.getPointerTy(); 689 SDValue StackPtr = DAG.CreateStackTemporary(VT); 690 691 int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex(); 692 693 // Store the vector. 694 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, Tmp1, StackPtr, 695 MachinePointerInfo::getFixedStack(SPFI), 696 false, false, 0); 697 698 // Truncate or zero extend offset to target pointer type. 699 unsigned CastOpc = IdxVT.bitsGT(PtrVT) ? ISD::TRUNCATE : ISD::ZERO_EXTEND; 700 Tmp3 = DAG.getNode(CastOpc, dl, PtrVT, Tmp3); 701 // Add the offset to the index. 702 unsigned EltSize = EltVT.getSizeInBits()/8; 703 Tmp3 = DAG.getNode(ISD::MUL, dl, IdxVT, Tmp3,DAG.getConstant(EltSize, IdxVT)); 704 SDValue StackPtr2 = DAG.getNode(ISD::ADD, dl, IdxVT, Tmp3, StackPtr); 705 // Store the scalar value. 706 Ch = DAG.getTruncStore(Ch, dl, Tmp2, StackPtr2, MachinePointerInfo(), EltVT, 707 false, false, 0); 708 // Load the updated vector. 709 return DAG.getLoad(VT, dl, Ch, StackPtr, 710 MachinePointerInfo::getFixedStack(SPFI), false, false, 0); 711} 712 713 714SDValue SelectionDAGLegalize:: 715ExpandINSERT_VECTOR_ELT(SDValue Vec, SDValue Val, SDValue Idx, DebugLoc dl) { 716 if (ConstantSDNode *InsertPos = dyn_cast<ConstantSDNode>(Idx)) { 717 // SCALAR_TO_VECTOR requires that the type of the value being inserted 718 // match the element type of the vector being created, except for 719 // integers in which case the inserted value can be over width. 720 EVT EltVT = Vec.getValueType().getVectorElementType(); 721 if (Val.getValueType() == EltVT || 722 (EltVT.isInteger() && Val.getValueType().bitsGE(EltVT))) { 723 SDValue ScVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 724 Vec.getValueType(), Val); 725 726 unsigned NumElts = Vec.getValueType().getVectorNumElements(); 727 // We generate a shuffle of InVec and ScVec, so the shuffle mask 728 // should be 0,1,2,3,4,5... with the appropriate element replaced with 729 // elt 0 of the RHS. 730 SmallVector<int, 8> ShufOps; 731 for (unsigned i = 0; i != NumElts; ++i) 732 ShufOps.push_back(i != InsertPos->getZExtValue() ? i : NumElts); 733 734 return DAG.getVectorShuffle(Vec.getValueType(), dl, Vec, ScVec, 735 &ShufOps[0]); 736 } 737 } 738 return PerformInsertVectorEltInMemory(Vec, Val, Idx, dl); 739} 740 741SDValue SelectionDAGLegalize::OptimizeFloatStore(StoreSDNode* ST) { 742 // Turn 'store float 1.0, Ptr' -> 'store int 0x12345678, Ptr' 743 // FIXME: We shouldn't do this for TargetConstantFP's. 744 // FIXME: move this to the DAG Combiner! Note that we can't regress due 745 // to phase ordering between legalized code and the dag combiner. This 746 // probably means that we need to integrate dag combiner and legalizer 747 // together. 748 // We generally can't do this one for long doubles. 749 SDValue Tmp1 = ST->getChain(); 750 SDValue Tmp2 = ST->getBasePtr(); 751 SDValue Tmp3; 752 unsigned Alignment = ST->getAlignment(); 753 bool isVolatile = ST->isVolatile(); 754 bool isNonTemporal = ST->isNonTemporal(); 755 DebugLoc dl = ST->getDebugLoc(); 756 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(ST->getValue())) { 757 if (CFP->getValueType(0) == MVT::f32 && 758 getTypeAction(MVT::i32) == Legal) { 759 Tmp3 = DAG.getConstant(CFP->getValueAPF(). 760 bitcastToAPInt().zextOrTrunc(32), 761 MVT::i32); 762 return DAG.getStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(), 763 isVolatile, isNonTemporal, Alignment); 764 } 765 766 if (CFP->getValueType(0) == MVT::f64) { 767 // If this target supports 64-bit registers, do a single 64-bit store. 768 if (getTypeAction(MVT::i64) == Legal) { 769 Tmp3 = DAG.getConstant(CFP->getValueAPF().bitcastToAPInt(). 770 zextOrTrunc(64), MVT::i64); 771 return DAG.getStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(), 772 isVolatile, isNonTemporal, Alignment); 773 } 774 775 if (getTypeAction(MVT::i32) == Legal && !ST->isVolatile()) { 776 // Otherwise, if the target supports 32-bit registers, use 2 32-bit 777 // stores. If the target supports neither 32- nor 64-bits, this 778 // xform is certainly not worth it. 779 const APInt &IntVal =CFP->getValueAPF().bitcastToAPInt(); 780 SDValue Lo = DAG.getConstant(IntVal.trunc(32), MVT::i32); 781 SDValue Hi = DAG.getConstant(IntVal.lshr(32).trunc(32), MVT::i32); 782 if (TLI.isBigEndian()) std::swap(Lo, Hi); 783 784 Lo = DAG.getStore(Tmp1, dl, Lo, Tmp2, ST->getPointerInfo(), isVolatile, 785 isNonTemporal, Alignment); 786 Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2, 787 DAG.getIntPtrConstant(4)); 788 Hi = DAG.getStore(Tmp1, dl, Hi, Tmp2, 789 ST->getPointerInfo().getWithOffset(4), 790 isVolatile, isNonTemporal, MinAlign(Alignment, 4U)); 791 792 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo, Hi); 793 } 794 } 795 } 796 return SDValue(0, 0); 797} 798 799/// LegalizeOp - We know that the specified value has a legal type, and 800/// that its operands are legal. Now ensure that the operation itself 801/// is legal, recursively ensuring that the operands' operations remain 802/// legal. 803SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) { 804 if (Op.getOpcode() == ISD::TargetConstant) // Allow illegal target nodes. 805 return Op; 806 807 SDNode *Node = Op.getNode(); 808 DebugLoc dl = Node->getDebugLoc(); 809 810 for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i) 811 assert(getTypeAction(Node->getValueType(i)) == Legal && 812 "Unexpected illegal type!"); 813 814 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) 815 assert((isTypeLegal(Node->getOperand(i).getValueType()) || 816 Node->getOperand(i).getOpcode() == ISD::TargetConstant) && 817 "Unexpected illegal type!"); 818 819 // Note that LegalizeOp may be reentered even from single-use nodes, which 820 // means that we always must cache transformed nodes. 821 DenseMap<SDValue, SDValue>::iterator I = LegalizedNodes.find(Op); 822 if (I != LegalizedNodes.end()) return I->second; 823 824 SDValue Tmp1, Tmp2, Tmp3, Tmp4; 825 SDValue Result = Op; 826 bool isCustom = false; 827 828 // Figure out the correct action; the way to query this varies by opcode 829 TargetLowering::LegalizeAction Action = TargetLowering::Legal; 830 bool SimpleFinishLegalizing = true; 831 switch (Node->getOpcode()) { 832 case ISD::INTRINSIC_W_CHAIN: 833 case ISD::INTRINSIC_WO_CHAIN: 834 case ISD::INTRINSIC_VOID: 835 case ISD::VAARG: 836 case ISD::STACKSAVE: 837 Action = TLI.getOperationAction(Node->getOpcode(), MVT::Other); 838 break; 839 case ISD::SINT_TO_FP: 840 case ISD::UINT_TO_FP: 841 case ISD::EXTRACT_VECTOR_ELT: 842 Action = TLI.getOperationAction(Node->getOpcode(), 843 Node->getOperand(0).getValueType()); 844 break; 845 case ISD::FP_ROUND_INREG: 846 case ISD::SIGN_EXTEND_INREG: { 847 EVT InnerType = cast<VTSDNode>(Node->getOperand(1))->getVT(); 848 Action = TLI.getOperationAction(Node->getOpcode(), InnerType); 849 break; 850 } 851 case ISD::SELECT_CC: 852 case ISD::SETCC: 853 case ISD::BR_CC: { 854 unsigned CCOperand = Node->getOpcode() == ISD::SELECT_CC ? 4 : 855 Node->getOpcode() == ISD::SETCC ? 2 : 1; 856 unsigned CompareOperand = Node->getOpcode() == ISD::BR_CC ? 2 : 0; 857 EVT OpVT = Node->getOperand(CompareOperand).getValueType(); 858 ISD::CondCode CCCode = 859 cast<CondCodeSDNode>(Node->getOperand(CCOperand))->get(); 860 Action = TLI.getCondCodeAction(CCCode, OpVT); 861 if (Action == TargetLowering::Legal) { 862 if (Node->getOpcode() == ISD::SELECT_CC) 863 Action = TLI.getOperationAction(Node->getOpcode(), 864 Node->getValueType(0)); 865 else 866 Action = TLI.getOperationAction(Node->getOpcode(), OpVT); 867 } 868 break; 869 } 870 case ISD::LOAD: 871 case ISD::STORE: 872 // FIXME: Model these properly. LOAD and STORE are complicated, and 873 // STORE expects the unlegalized operand in some cases. 874 SimpleFinishLegalizing = false; 875 break; 876 case ISD::CALLSEQ_START: 877 case ISD::CALLSEQ_END: 878 // FIXME: This shouldn't be necessary. These nodes have special properties 879 // dealing with the recursive nature of legalization. Removing this 880 // special case should be done as part of making LegalizeDAG non-recursive. 881 SimpleFinishLegalizing = false; 882 break; 883 case ISD::EXTRACT_ELEMENT: 884 case ISD::FLT_ROUNDS_: 885 case ISD::SADDO: 886 case ISD::SSUBO: 887 case ISD::UADDO: 888 case ISD::USUBO: 889 case ISD::SMULO: 890 case ISD::UMULO: 891 case ISD::FPOWI: 892 case ISD::MERGE_VALUES: 893 case ISD::EH_RETURN: 894 case ISD::FRAME_TO_ARGS_OFFSET: 895 case ISD::EH_SJLJ_SETJMP: 896 case ISD::EH_SJLJ_LONGJMP: 897 case ISD::EH_SJLJ_DISPATCHSETUP: 898 // These operations lie about being legal: when they claim to be legal, 899 // they should actually be expanded. 900 Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0)); 901 if (Action == TargetLowering::Legal) 902 Action = TargetLowering::Expand; 903 break; 904 case ISD::TRAMPOLINE: 905 case ISD::FRAMEADDR: 906 case ISD::RETURNADDR: 907 // These operations lie about being legal: when they claim to be legal, 908 // they should actually be custom-lowered. 909 Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0)); 910 if (Action == TargetLowering::Legal) 911 Action = TargetLowering::Custom; 912 break; 913 case ISD::BUILD_VECTOR: 914 // A weird case: legalization for BUILD_VECTOR never legalizes the 915 // operands! 916 // FIXME: This really sucks... changing it isn't semantically incorrect, 917 // but it massively pessimizes the code for floating-point BUILD_VECTORs 918 // because ConstantFP operands get legalized into constant pool loads 919 // before the BUILD_VECTOR code can see them. It doesn't usually bite, 920 // though, because BUILD_VECTORS usually get lowered into other nodes 921 // which get legalized properly. 922 SimpleFinishLegalizing = false; 923 break; 924 default: 925 if (Node->getOpcode() >= ISD::BUILTIN_OP_END) { 926 Action = TargetLowering::Legal; 927 } else { 928 Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0)); 929 } 930 break; 931 } 932 933 if (SimpleFinishLegalizing) { 934 SmallVector<SDValue, 8> Ops, ResultVals; 935 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) 936 Ops.push_back(LegalizeOp(Node->getOperand(i))); 937 switch (Node->getOpcode()) { 938 default: break; 939 case ISD::BR: 940 case ISD::BRIND: 941 case ISD::BR_JT: 942 case ISD::BR_CC: 943 case ISD::BRCOND: 944 // Branches tweak the chain to include LastCALLSEQ_END 945 Ops[0] = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ops[0], 946 LastCALLSEQ_END); 947 Ops[0] = LegalizeOp(Ops[0]); 948 LastCALLSEQ_END = DAG.getEntryNode(); 949 break; 950 case ISD::SHL: 951 case ISD::SRL: 952 case ISD::SRA: 953 case ISD::ROTL: 954 case ISD::ROTR: 955 // Legalizing shifts/rotates requires adjusting the shift amount 956 // to the appropriate width. 957 if (!Ops[1].getValueType().isVector()) 958 Ops[1] = LegalizeOp(DAG.getShiftAmountOperand(Ops[0].getValueType(), 959 Ops[1])); 960 break; 961 case ISD::SRL_PARTS: 962 case ISD::SRA_PARTS: 963 case ISD::SHL_PARTS: 964 // Legalizing shifts/rotates requires adjusting the shift amount 965 // to the appropriate width. 966 if (!Ops[2].getValueType().isVector()) 967 Ops[2] = LegalizeOp(DAG.getShiftAmountOperand(Ops[0].getValueType(), 968 Ops[2])); 969 break; 970 } 971 972 Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(), Ops.data(), 973 Ops.size()), 0); 974 switch (Action) { 975 case TargetLowering::Legal: 976 for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i) 977 ResultVals.push_back(Result.getValue(i)); 978 break; 979 case TargetLowering::Custom: 980 // FIXME: The handling for custom lowering with multiple results is 981 // a complete mess. 982 Tmp1 = TLI.LowerOperation(Result, DAG); 983 if (Tmp1.getNode()) { 984 for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i) { 985 if (e == 1) 986 ResultVals.push_back(Tmp1); 987 else 988 ResultVals.push_back(Tmp1.getValue(i)); 989 } 990 break; 991 } 992 993 // FALL THROUGH 994 case TargetLowering::Expand: 995 ExpandNode(Result.getNode(), ResultVals); 996 break; 997 case TargetLowering::Promote: 998 PromoteNode(Result.getNode(), ResultVals); 999 break; 1000 } 1001 if (!ResultVals.empty()) { 1002 for (unsigned i = 0, e = ResultVals.size(); i != e; ++i) { 1003 if (ResultVals[i] != SDValue(Node, i)) 1004 ResultVals[i] = LegalizeOp(ResultVals[i]); 1005 AddLegalizedOperand(SDValue(Node, i), ResultVals[i]); 1006 } 1007 return ResultVals[Op.getResNo()]; 1008 } 1009 } 1010 1011 switch (Node->getOpcode()) { 1012 default: 1013#ifndef NDEBUG 1014 dbgs() << "NODE: "; 1015 Node->dump( &DAG); 1016 dbgs() << "\n"; 1017#endif 1018 assert(0 && "Do not know how to legalize this operator!"); 1019 1020 case ISD::BUILD_VECTOR: 1021 switch (TLI.getOperationAction(ISD::BUILD_VECTOR, Node->getValueType(0))) { 1022 default: assert(0 && "This action is not supported yet!"); 1023 case TargetLowering::Custom: 1024 Tmp3 = TLI.LowerOperation(Result, DAG); 1025 if (Tmp3.getNode()) { 1026 Result = Tmp3; 1027 break; 1028 } 1029 // FALLTHROUGH 1030 case TargetLowering::Expand: 1031 Result = ExpandBUILD_VECTOR(Result.getNode()); 1032 break; 1033 } 1034 break; 1035 case ISD::CALLSEQ_START: { 1036 SDNode *CallEnd = FindCallEndFromCallStart(Node); 1037 1038 // Recursively Legalize all of the inputs of the call end that do not lead 1039 // to this call start. This ensures that any libcalls that need be inserted 1040 // are inserted *before* the CALLSEQ_START. 1041 {SmallPtrSet<SDNode*, 32> NodesLeadingTo; 1042 for (unsigned i = 0, e = CallEnd->getNumOperands(); i != e; ++i) 1043 LegalizeAllNodesNotLeadingTo(CallEnd->getOperand(i).getNode(), Node, 1044 NodesLeadingTo); 1045 } 1046 1047 // Now that we have legalized all of the inputs (which may have inserted 1048 // libcalls), create the new CALLSEQ_START node. 1049 Tmp1 = LegalizeOp(Node->getOperand(0)); // Legalize the chain. 1050 1051 // Merge in the last call to ensure that this call starts after the last 1052 // call ended. 1053 if (LastCALLSEQ_END.getOpcode() != ISD::EntryToken) { 1054 Tmp1 = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 1055 Tmp1, LastCALLSEQ_END); 1056 Tmp1 = LegalizeOp(Tmp1); 1057 } 1058 1059 // Do not try to legalize the target-specific arguments (#1+). 1060 if (Tmp1 != Node->getOperand(0)) { 1061 SmallVector<SDValue, 8> Ops(Node->op_begin(), Node->op_end()); 1062 Ops[0] = Tmp1; 1063 Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(), &Ops[0], 1064 Ops.size()), Result.getResNo()); 1065 } 1066 1067 // Remember that the CALLSEQ_START is legalized. 1068 AddLegalizedOperand(Op.getValue(0), Result); 1069 if (Node->getNumValues() == 2) // If this has a flag result, remember it. 1070 AddLegalizedOperand(Op.getValue(1), Result.getValue(1)); 1071 1072 // Now that the callseq_start and all of the non-call nodes above this call 1073 // sequence have been legalized, legalize the call itself. During this 1074 // process, no libcalls can/will be inserted, guaranteeing that no calls 1075 // can overlap. 1076 assert(!IsLegalizingCall && "Inconsistent sequentialization of calls!"); 1077 // Note that we are selecting this call! 1078 LastCALLSEQ_END = SDValue(CallEnd, 0); 1079 IsLegalizingCall = true; 1080 1081 // Legalize the call, starting from the CALLSEQ_END. 1082 LegalizeOp(LastCALLSEQ_END); 1083 assert(!IsLegalizingCall && "CALLSEQ_END should have cleared this!"); 1084 return Result; 1085 } 1086 case ISD::CALLSEQ_END: 1087 // If the CALLSEQ_START node hasn't been legalized first, legalize it. This 1088 // will cause this node to be legalized as well as handling libcalls right. 1089 if (LastCALLSEQ_END.getNode() != Node) { 1090 LegalizeOp(SDValue(FindCallStartFromCallEnd(Node), 0)); 1091 DenseMap<SDValue, SDValue>::iterator I = LegalizedNodes.find(Op); 1092 assert(I != LegalizedNodes.end() && 1093 "Legalizing the call start should have legalized this node!"); 1094 return I->second; 1095 } 1096 1097 // Otherwise, the call start has been legalized and everything is going 1098 // according to plan. Just legalize ourselves normally here. 1099 Tmp1 = LegalizeOp(Node->getOperand(0)); // Legalize the chain. 1100 // Do not try to legalize the target-specific arguments (#1+), except for 1101 // an optional flag input. 1102 if (Node->getOperand(Node->getNumOperands()-1).getValueType() != MVT::Glue){ 1103 if (Tmp1 != Node->getOperand(0)) { 1104 SmallVector<SDValue, 8> Ops(Node->op_begin(), Node->op_end()); 1105 Ops[0] = Tmp1; 1106 Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(), 1107 &Ops[0], Ops.size()), 1108 Result.getResNo()); 1109 } 1110 } else { 1111 Tmp2 = LegalizeOp(Node->getOperand(Node->getNumOperands()-1)); 1112 if (Tmp1 != Node->getOperand(0) || 1113 Tmp2 != Node->getOperand(Node->getNumOperands()-1)) { 1114 SmallVector<SDValue, 8> Ops(Node->op_begin(), Node->op_end()); 1115 Ops[0] = Tmp1; 1116 Ops.back() = Tmp2; 1117 Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(), 1118 &Ops[0], Ops.size()), 1119 Result.getResNo()); 1120 } 1121 } 1122 assert(IsLegalizingCall && "Call sequence imbalance between start/end?"); 1123 // This finishes up call legalization. 1124 IsLegalizingCall = false; 1125 1126 // If the CALLSEQ_END node has a flag, remember that we legalized it. 1127 AddLegalizedOperand(SDValue(Node, 0), Result.getValue(0)); 1128 if (Node->getNumValues() == 2) 1129 AddLegalizedOperand(SDValue(Node, 1), Result.getValue(1)); 1130 return Result.getValue(Op.getResNo()); 1131 case ISD::LOAD: { 1132 LoadSDNode *LD = cast<LoadSDNode>(Node); 1133 Tmp1 = LegalizeOp(LD->getChain()); // Legalize the chain. 1134 Tmp2 = LegalizeOp(LD->getBasePtr()); // Legalize the base pointer. 1135 1136 ISD::LoadExtType ExtType = LD->getExtensionType(); 1137 if (ExtType == ISD::NON_EXTLOAD) { 1138 EVT VT = Node->getValueType(0); 1139 Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(), 1140 Tmp1, Tmp2, LD->getOffset()), 1141 Result.getResNo()); 1142 Tmp3 = Result.getValue(0); 1143 Tmp4 = Result.getValue(1); 1144 1145 switch (TLI.getOperationAction(Node->getOpcode(), VT)) { 1146 default: assert(0 && "This action is not supported yet!"); 1147 case TargetLowering::Legal: 1148 // If this is an unaligned load and the target doesn't support it, 1149 // expand it. 1150 if (!TLI.allowsUnalignedMemoryAccesses(LD->getMemoryVT())) { 1151 const Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext()); 1152 unsigned ABIAlignment = TLI.getTargetData()->getABITypeAlignment(Ty); 1153 if (LD->getAlignment() < ABIAlignment){ 1154 Result = ExpandUnalignedLoad(cast<LoadSDNode>(Result.getNode()), 1155 DAG, TLI); 1156 Tmp3 = Result.getOperand(0); 1157 Tmp4 = Result.getOperand(1); 1158 Tmp3 = LegalizeOp(Tmp3); 1159 Tmp4 = LegalizeOp(Tmp4); 1160 } 1161 } 1162 break; 1163 case TargetLowering::Custom: 1164 Tmp1 = TLI.LowerOperation(Tmp3, DAG); 1165 if (Tmp1.getNode()) { 1166 Tmp3 = LegalizeOp(Tmp1); 1167 Tmp4 = LegalizeOp(Tmp1.getValue(1)); 1168 } 1169 break; 1170 case TargetLowering::Promote: { 1171 // Only promote a load of vector type to another. 1172 assert(VT.isVector() && "Cannot promote this load!"); 1173 // Change base type to a different vector type. 1174 EVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), VT); 1175 1176 Tmp1 = DAG.getLoad(NVT, dl, Tmp1, Tmp2, LD->getPointerInfo(), 1177 LD->isVolatile(), LD->isNonTemporal(), 1178 LD->getAlignment()); 1179 Tmp3 = LegalizeOp(DAG.getNode(ISD::BITCAST, dl, VT, Tmp1)); 1180 Tmp4 = LegalizeOp(Tmp1.getValue(1)); 1181 break; 1182 } 1183 } 1184 // Since loads produce two values, make sure to remember that we 1185 // legalized both of them. 1186 AddLegalizedOperand(SDValue(Node, 0), Tmp3); 1187 AddLegalizedOperand(SDValue(Node, 1), Tmp4); 1188 return Op.getResNo() ? Tmp4 : Tmp3; 1189 } 1190 1191 EVT SrcVT = LD->getMemoryVT(); 1192 unsigned SrcWidth = SrcVT.getSizeInBits(); 1193 unsigned Alignment = LD->getAlignment(); 1194 bool isVolatile = LD->isVolatile(); 1195 bool isNonTemporal = LD->isNonTemporal(); 1196 1197 if (SrcWidth != SrcVT.getStoreSizeInBits() && 1198 // Some targets pretend to have an i1 loading operation, and actually 1199 // load an i8. This trick is correct for ZEXTLOAD because the top 7 1200 // bits are guaranteed to be zero; it helps the optimizers understand 1201 // that these bits are zero. It is also useful for EXTLOAD, since it 1202 // tells the optimizers that those bits are undefined. It would be 1203 // nice to have an effective generic way of getting these benefits... 1204 // Until such a way is found, don't insist on promoting i1 here. 1205 (SrcVT != MVT::i1 || 1206 TLI.getLoadExtAction(ExtType, MVT::i1) == TargetLowering::Promote)) { 1207 // Promote to a byte-sized load if not loading an integral number of 1208 // bytes. For example, promote EXTLOAD:i20 -> EXTLOAD:i24. 1209 unsigned NewWidth = SrcVT.getStoreSizeInBits(); 1210 EVT NVT = EVT::getIntegerVT(*DAG.getContext(), NewWidth); 1211 SDValue Ch; 1212 1213 // The extra bits are guaranteed to be zero, since we stored them that 1214 // way. A zext load from NVT thus automatically gives zext from SrcVT. 1215 1216 ISD::LoadExtType NewExtType = 1217 ExtType == ISD::ZEXTLOAD ? ISD::ZEXTLOAD : ISD::EXTLOAD; 1218 1219 Result = DAG.getExtLoad(NewExtType, dl, Node->getValueType(0), 1220 Tmp1, Tmp2, LD->getPointerInfo(), 1221 NVT, isVolatile, isNonTemporal, Alignment); 1222 1223 Ch = Result.getValue(1); // The chain. 1224 1225 if (ExtType == ISD::SEXTLOAD) 1226 // Having the top bits zero doesn't help when sign extending. 1227 Result = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, 1228 Result.getValueType(), 1229 Result, DAG.getValueType(SrcVT)); 1230 else if (ExtType == ISD::ZEXTLOAD || NVT == Result.getValueType()) 1231 // All the top bits are guaranteed to be zero - inform the optimizers. 1232 Result = DAG.getNode(ISD::AssertZext, dl, 1233 Result.getValueType(), Result, 1234 DAG.getValueType(SrcVT)); 1235 1236 Tmp1 = LegalizeOp(Result); 1237 Tmp2 = LegalizeOp(Ch); 1238 } else if (SrcWidth & (SrcWidth - 1)) { 1239 // If not loading a power-of-2 number of bits, expand as two loads. 1240 assert(!SrcVT.isVector() && "Unsupported extload!"); 1241 unsigned RoundWidth = 1 << Log2_32(SrcWidth); 1242 assert(RoundWidth < SrcWidth); 1243 unsigned ExtraWidth = SrcWidth - RoundWidth; 1244 assert(ExtraWidth < RoundWidth); 1245 assert(!(RoundWidth % 8) && !(ExtraWidth % 8) && 1246 "Load size not an integral number of bytes!"); 1247 EVT RoundVT = EVT::getIntegerVT(*DAG.getContext(), RoundWidth); 1248 EVT ExtraVT = EVT::getIntegerVT(*DAG.getContext(), ExtraWidth); 1249 SDValue Lo, Hi, Ch; 1250 unsigned IncrementSize; 1251 1252 if (TLI.isLittleEndian()) { 1253 // EXTLOAD:i24 -> ZEXTLOAD:i16 | (shl EXTLOAD@+2:i8, 16) 1254 // Load the bottom RoundWidth bits. 1255 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, Node->getValueType(0), 1256 Tmp1, Tmp2, 1257 LD->getPointerInfo(), RoundVT, isVolatile, 1258 isNonTemporal, Alignment); 1259 1260 // Load the remaining ExtraWidth bits. 1261 IncrementSize = RoundWidth / 8; 1262 Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2, 1263 DAG.getIntPtrConstant(IncrementSize)); 1264 Hi = DAG.getExtLoad(ExtType, dl, Node->getValueType(0), Tmp1, Tmp2, 1265 LD->getPointerInfo().getWithOffset(IncrementSize), 1266 ExtraVT, isVolatile, isNonTemporal, 1267 MinAlign(Alignment, IncrementSize)); 1268 1269 // Build a factor node to remember that this load is independent of 1270 // the other one. 1271 Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1), 1272 Hi.getValue(1)); 1273 1274 // Move the top bits to the right place. 1275 Hi = DAG.getNode(ISD::SHL, dl, Hi.getValueType(), Hi, 1276 DAG.getConstant(RoundWidth, 1277 TLI.getShiftAmountTy(Hi.getValueType()))); 1278 1279 // Join the hi and lo parts. 1280 Result = DAG.getNode(ISD::OR, dl, Node->getValueType(0), Lo, Hi); 1281 } else { 1282 // Big endian - avoid unaligned loads. 1283 // EXTLOAD:i24 -> (shl EXTLOAD:i16, 8) | ZEXTLOAD@+2:i8 1284 // Load the top RoundWidth bits. 1285 Hi = DAG.getExtLoad(ExtType, dl, Node->getValueType(0), Tmp1, Tmp2, 1286 LD->getPointerInfo(), RoundVT, isVolatile, 1287 isNonTemporal, Alignment); 1288 1289 // Load the remaining ExtraWidth bits. 1290 IncrementSize = RoundWidth / 8; 1291 Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2, 1292 DAG.getIntPtrConstant(IncrementSize)); 1293 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, 1294 dl, Node->getValueType(0), Tmp1, Tmp2, 1295 LD->getPointerInfo().getWithOffset(IncrementSize), 1296 ExtraVT, isVolatile, isNonTemporal, 1297 MinAlign(Alignment, IncrementSize)); 1298 1299 // Build a factor node to remember that this load is independent of 1300 // the other one. 1301 Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1), 1302 Hi.getValue(1)); 1303 1304 // Move the top bits to the right place. 1305 Hi = DAG.getNode(ISD::SHL, dl, Hi.getValueType(), Hi, 1306 DAG.getConstant(ExtraWidth, 1307 TLI.getShiftAmountTy(Hi.getValueType()))); 1308 1309 // Join the hi and lo parts. 1310 Result = DAG.getNode(ISD::OR, dl, Node->getValueType(0), Lo, Hi); 1311 } 1312 1313 Tmp1 = LegalizeOp(Result); 1314 Tmp2 = LegalizeOp(Ch); 1315 } else { 1316 switch (TLI.getLoadExtAction(ExtType, SrcVT)) { 1317 default: assert(0 && "This action is not supported yet!"); 1318 case TargetLowering::Custom: 1319 isCustom = true; 1320 // FALLTHROUGH 1321 case TargetLowering::Legal: 1322 Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(), 1323 Tmp1, Tmp2, LD->getOffset()), 1324 Result.getResNo()); 1325 Tmp1 = Result.getValue(0); 1326 Tmp2 = Result.getValue(1); 1327 1328 if (isCustom) { 1329 Tmp3 = TLI.LowerOperation(Result, DAG); 1330 if (Tmp3.getNode()) { 1331 Tmp1 = LegalizeOp(Tmp3); 1332 Tmp2 = LegalizeOp(Tmp3.getValue(1)); 1333 } 1334 } else { 1335 // If this is an unaligned load and the target doesn't support it, 1336 // expand it. 1337 if (!TLI.allowsUnalignedMemoryAccesses(LD->getMemoryVT())) { 1338 const Type *Ty = 1339 LD->getMemoryVT().getTypeForEVT(*DAG.getContext()); 1340 unsigned ABIAlignment = 1341 TLI.getTargetData()->getABITypeAlignment(Ty); 1342 if (LD->getAlignment() < ABIAlignment){ 1343 Result = ExpandUnalignedLoad(cast<LoadSDNode>(Result.getNode()), 1344 DAG, TLI); 1345 Tmp1 = Result.getOperand(0); 1346 Tmp2 = Result.getOperand(1); 1347 Tmp1 = LegalizeOp(Tmp1); 1348 Tmp2 = LegalizeOp(Tmp2); 1349 } 1350 } 1351 } 1352 break; 1353 case TargetLowering::Expand: 1354 if (!TLI.isLoadExtLegal(ISD::EXTLOAD, SrcVT) && isTypeLegal(SrcVT)) { 1355 SDValue Load = DAG.getLoad(SrcVT, dl, Tmp1, Tmp2, 1356 LD->getPointerInfo(), 1357 LD->isVolatile(), LD->isNonTemporal(), 1358 LD->getAlignment()); 1359 unsigned ExtendOp; 1360 switch (ExtType) { 1361 case ISD::EXTLOAD: 1362 ExtendOp = (SrcVT.isFloatingPoint() ? 1363 ISD::FP_EXTEND : ISD::ANY_EXTEND); 1364 break; 1365 case ISD::SEXTLOAD: ExtendOp = ISD::SIGN_EXTEND; break; 1366 case ISD::ZEXTLOAD: ExtendOp = ISD::ZERO_EXTEND; break; 1367 default: llvm_unreachable("Unexpected extend load type!"); 1368 } 1369 Result = DAG.getNode(ExtendOp, dl, Node->getValueType(0), Load); 1370 Tmp1 = LegalizeOp(Result); // Relegalize new nodes. 1371 Tmp2 = LegalizeOp(Load.getValue(1)); 1372 break; 1373 } 1374 // FIXME: This does not work for vectors on most targets. Sign- and 1375 // zero-extend operations are currently folded into extending loads, 1376 // whether they are legal or not, and then we end up here without any 1377 // support for legalizing them. 1378 assert(ExtType != ISD::EXTLOAD && 1379 "EXTLOAD should always be supported!"); 1380 // Turn the unsupported load into an EXTLOAD followed by an explicit 1381 // zero/sign extend inreg. 1382 Result = DAG.getExtLoad(ISD::EXTLOAD, dl, Node->getValueType(0), 1383 Tmp1, Tmp2, LD->getPointerInfo(), SrcVT, 1384 LD->isVolatile(), LD->isNonTemporal(), 1385 LD->getAlignment()); 1386 SDValue ValRes; 1387 if (ExtType == ISD::SEXTLOAD) 1388 ValRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, 1389 Result.getValueType(), 1390 Result, DAG.getValueType(SrcVT)); 1391 else 1392 ValRes = DAG.getZeroExtendInReg(Result, dl, SrcVT.getScalarType()); 1393 Tmp1 = LegalizeOp(ValRes); // Relegalize new nodes. 1394 Tmp2 = LegalizeOp(Result.getValue(1)); // Relegalize new nodes. 1395 break; 1396 } 1397 } 1398 1399 // Since loads produce two values, make sure to remember that we legalized 1400 // both of them. 1401 AddLegalizedOperand(SDValue(Node, 0), Tmp1); 1402 AddLegalizedOperand(SDValue(Node, 1), Tmp2); 1403 return Op.getResNo() ? Tmp2 : Tmp1; 1404 } 1405 case ISD::STORE: { 1406 StoreSDNode *ST = cast<StoreSDNode>(Node); 1407 Tmp1 = LegalizeOp(ST->getChain()); // Legalize the chain. 1408 Tmp2 = LegalizeOp(ST->getBasePtr()); // Legalize the pointer. 1409 unsigned Alignment = ST->getAlignment(); 1410 bool isVolatile = ST->isVolatile(); 1411 bool isNonTemporal = ST->isNonTemporal(); 1412 1413 if (!ST->isTruncatingStore()) { 1414 if (SDNode *OptStore = OptimizeFloatStore(ST).getNode()) { 1415 Result = SDValue(OptStore, 0); 1416 break; 1417 } 1418 1419 { 1420 Tmp3 = LegalizeOp(ST->getValue()); 1421 Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(), 1422 Tmp1, Tmp3, Tmp2, 1423 ST->getOffset()), 1424 Result.getResNo()); 1425 1426 EVT VT = Tmp3.getValueType(); 1427 switch (TLI.getOperationAction(ISD::STORE, VT)) { 1428 default: assert(0 && "This action is not supported yet!"); 1429 case TargetLowering::Legal: 1430 // If this is an unaligned store and the target doesn't support it, 1431 // expand it. 1432 if (!TLI.allowsUnalignedMemoryAccesses(ST->getMemoryVT())) { 1433 const Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext()); 1434 unsigned ABIAlignment= TLI.getTargetData()->getABITypeAlignment(Ty); 1435 if (ST->getAlignment() < ABIAlignment) 1436 Result = ExpandUnalignedStore(cast<StoreSDNode>(Result.getNode()), 1437 DAG, TLI); 1438 } 1439 break; 1440 case TargetLowering::Custom: 1441 Tmp1 = TLI.LowerOperation(Result, DAG); 1442 if (Tmp1.getNode()) Result = Tmp1; 1443 break; 1444 case TargetLowering::Promote: 1445 assert(VT.isVector() && "Unknown legal promote case!"); 1446 Tmp3 = DAG.getNode(ISD::BITCAST, dl, 1447 TLI.getTypeToPromoteTo(ISD::STORE, VT), Tmp3); 1448 Result = DAG.getStore(Tmp1, dl, Tmp3, Tmp2, 1449 ST->getPointerInfo(), isVolatile, 1450 isNonTemporal, Alignment); 1451 break; 1452 } 1453 break; 1454 } 1455 } else { 1456 Tmp3 = LegalizeOp(ST->getValue()); 1457 1458 EVT StVT = ST->getMemoryVT(); 1459 unsigned StWidth = StVT.getSizeInBits(); 1460 1461 if (StWidth != StVT.getStoreSizeInBits()) { 1462 // Promote to a byte-sized store with upper bits zero if not 1463 // storing an integral number of bytes. For example, promote 1464 // TRUNCSTORE:i1 X -> TRUNCSTORE:i8 (and X, 1) 1465 EVT NVT = EVT::getIntegerVT(*DAG.getContext(), 1466 StVT.getStoreSizeInBits()); 1467 Tmp3 = DAG.getZeroExtendInReg(Tmp3, dl, StVT); 1468 Result = DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(), 1469 NVT, isVolatile, isNonTemporal, Alignment); 1470 } else if (StWidth & (StWidth - 1)) { 1471 // If not storing a power-of-2 number of bits, expand as two stores. 1472 assert(!StVT.isVector() && "Unsupported truncstore!"); 1473 unsigned RoundWidth = 1 << Log2_32(StWidth); 1474 assert(RoundWidth < StWidth); 1475 unsigned ExtraWidth = StWidth - RoundWidth; 1476 assert(ExtraWidth < RoundWidth); 1477 assert(!(RoundWidth % 8) && !(ExtraWidth % 8) && 1478 "Store size not an integral number of bytes!"); 1479 EVT RoundVT = EVT::getIntegerVT(*DAG.getContext(), RoundWidth); 1480 EVT ExtraVT = EVT::getIntegerVT(*DAG.getContext(), ExtraWidth); 1481 SDValue Lo, Hi; 1482 unsigned IncrementSize; 1483 1484 if (TLI.isLittleEndian()) { 1485 // TRUNCSTORE:i24 X -> TRUNCSTORE:i16 X, TRUNCSTORE@+2:i8 (srl X, 16) 1486 // Store the bottom RoundWidth bits. 1487 Lo = DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(), 1488 RoundVT, 1489 isVolatile, isNonTemporal, Alignment); 1490 1491 // Store the remaining ExtraWidth bits. 1492 IncrementSize = RoundWidth / 8; 1493 Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2, 1494 DAG.getIntPtrConstant(IncrementSize)); 1495 Hi = DAG.getNode(ISD::SRL, dl, Tmp3.getValueType(), Tmp3, 1496 DAG.getConstant(RoundWidth, 1497 TLI.getShiftAmountTy(Tmp3.getValueType()))); 1498 Hi = DAG.getTruncStore(Tmp1, dl, Hi, Tmp2, 1499 ST->getPointerInfo().getWithOffset(IncrementSize), 1500 ExtraVT, isVolatile, isNonTemporal, 1501 MinAlign(Alignment, IncrementSize)); 1502 } else { 1503 // Big endian - avoid unaligned stores. 1504 // TRUNCSTORE:i24 X -> TRUNCSTORE:i16 (srl X, 8), TRUNCSTORE@+2:i8 X 1505 // Store the top RoundWidth bits. 1506 Hi = DAG.getNode(ISD::SRL, dl, Tmp3.getValueType(), Tmp3, 1507 DAG.getConstant(ExtraWidth, 1508 TLI.getShiftAmountTy(Tmp3.getValueType()))); 1509 Hi = DAG.getTruncStore(Tmp1, dl, Hi, Tmp2, ST->getPointerInfo(), 1510 RoundVT, isVolatile, isNonTemporal, Alignment); 1511 1512 // Store the remaining ExtraWidth bits. 1513 IncrementSize = RoundWidth / 8; 1514 Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2, 1515 DAG.getIntPtrConstant(IncrementSize)); 1516 Lo = DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2, 1517 ST->getPointerInfo().getWithOffset(IncrementSize), 1518 ExtraVT, isVolatile, isNonTemporal, 1519 MinAlign(Alignment, IncrementSize)); 1520 } 1521 1522 // The order of the stores doesn't matter. 1523 Result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo, Hi); 1524 } else { 1525 if (Tmp1 != ST->getChain() || Tmp3 != ST->getValue() || 1526 Tmp2 != ST->getBasePtr()) 1527 Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(), 1528 Tmp1, Tmp3, Tmp2, 1529 ST->getOffset()), 1530 Result.getResNo()); 1531 1532 switch (TLI.getTruncStoreAction(ST->getValue().getValueType(), StVT)) { 1533 default: assert(0 && "This action is not supported yet!"); 1534 case TargetLowering::Legal: 1535 // If this is an unaligned store and the target doesn't support it, 1536 // expand it. 1537 if (!TLI.allowsUnalignedMemoryAccesses(ST->getMemoryVT())) { 1538 const Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext()); 1539 unsigned ABIAlignment= TLI.getTargetData()->getABITypeAlignment(Ty); 1540 if (ST->getAlignment() < ABIAlignment) 1541 Result = ExpandUnalignedStore(cast<StoreSDNode>(Result.getNode()), 1542 DAG, TLI); 1543 } 1544 break; 1545 case TargetLowering::Custom: 1546 Result = TLI.LowerOperation(Result, DAG); 1547 break; 1548 case Expand: 1549 // TRUNCSTORE:i16 i32 -> STORE i16 1550 assert(isTypeLegal(StVT) && "Do not know how to expand this store!"); 1551 Tmp3 = DAG.getNode(ISD::TRUNCATE, dl, StVT, Tmp3); 1552 Result = DAG.getStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(), 1553 isVolatile, isNonTemporal, Alignment); 1554 break; 1555 } 1556 } 1557 } 1558 break; 1559 } 1560 } 1561 assert(Result.getValueType() == Op.getValueType() && 1562 "Bad legalization!"); 1563 1564 // Make sure that the generated code is itself legal. 1565 if (Result != Op) 1566 Result = LegalizeOp(Result); 1567 1568 // Note that LegalizeOp may be reentered even from single-use nodes, which 1569 // means that we always must cache transformed nodes. 1570 AddLegalizedOperand(Op, Result); 1571 return Result; 1572} 1573 1574SDValue SelectionDAGLegalize::ExpandExtractFromVectorThroughStack(SDValue Op) { 1575 SDValue Vec = Op.getOperand(0); 1576 SDValue Idx = Op.getOperand(1); 1577 DebugLoc dl = Op.getDebugLoc(); 1578 // Store the value to a temporary stack slot, then LOAD the returned part. 1579 SDValue StackPtr = DAG.CreateStackTemporary(Vec.getValueType()); 1580 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, 1581 MachinePointerInfo(), false, false, 0); 1582 1583 // Add the offset to the index. 1584 unsigned EltSize = 1585 Vec.getValueType().getVectorElementType().getSizeInBits()/8; 1586 Idx = DAG.getNode(ISD::MUL, dl, Idx.getValueType(), Idx, 1587 DAG.getConstant(EltSize, Idx.getValueType())); 1588 1589 if (Idx.getValueType().bitsGT(TLI.getPointerTy())) 1590 Idx = DAG.getNode(ISD::TRUNCATE, dl, TLI.getPointerTy(), Idx); 1591 else 1592 Idx = DAG.getNode(ISD::ZERO_EXTEND, dl, TLI.getPointerTy(), Idx); 1593 1594 StackPtr = DAG.getNode(ISD::ADD, dl, Idx.getValueType(), Idx, StackPtr); 1595 1596 if (Op.getValueType().isVector()) 1597 return DAG.getLoad(Op.getValueType(), dl, Ch, StackPtr,MachinePointerInfo(), 1598 false, false, 0); 1599 return DAG.getExtLoad(ISD::EXTLOAD, dl, Op.getValueType(), Ch, StackPtr, 1600 MachinePointerInfo(), 1601 Vec.getValueType().getVectorElementType(), 1602 false, false, 0); 1603} 1604 1605SDValue SelectionDAGLegalize::ExpandInsertToVectorThroughStack(SDValue Op) { 1606 assert(Op.getValueType().isVector() && "Non-vector insert subvector!"); 1607 1608 SDValue Vec = Op.getOperand(0); 1609 SDValue Part = Op.getOperand(1); 1610 SDValue Idx = Op.getOperand(2); 1611 DebugLoc dl = Op.getDebugLoc(); 1612 1613 // Store the value to a temporary stack slot, then LOAD the returned part. 1614 1615 SDValue StackPtr = DAG.CreateStackTemporary(Vec.getValueType()); 1616 int FI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex(); 1617 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(FI); 1618 1619 // First store the whole vector. 1620 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, PtrInfo, 1621 false, false, 0); 1622 1623 // Then store the inserted part. 1624 1625 // Add the offset to the index. 1626 unsigned EltSize = 1627 Vec.getValueType().getVectorElementType().getSizeInBits()/8; 1628 1629 Idx = DAG.getNode(ISD::MUL, dl, Idx.getValueType(), Idx, 1630 DAG.getConstant(EltSize, Idx.getValueType())); 1631 1632 if (Idx.getValueType().bitsGT(TLI.getPointerTy())) 1633 Idx = DAG.getNode(ISD::TRUNCATE, dl, TLI.getPointerTy(), Idx); 1634 else 1635 Idx = DAG.getNode(ISD::ZERO_EXTEND, dl, TLI.getPointerTy(), Idx); 1636 1637 SDValue SubStackPtr = DAG.getNode(ISD::ADD, dl, Idx.getValueType(), Idx, 1638 StackPtr); 1639 1640 // Store the subvector. 1641 Ch = DAG.getStore(DAG.getEntryNode(), dl, Part, SubStackPtr, 1642 MachinePointerInfo(), false, false, 0); 1643 1644 // Finally, load the updated vector. 1645 return DAG.getLoad(Op.getValueType(), dl, Ch, StackPtr, PtrInfo, 1646 false, false, 0); 1647} 1648 1649SDValue SelectionDAGLegalize::ExpandVectorBuildThroughStack(SDNode* Node) { 1650 // We can't handle this case efficiently. Allocate a sufficiently 1651 // aligned object on the stack, store each element into it, then load 1652 // the result as a vector. 1653 // Create the stack frame object. 1654 EVT VT = Node->getValueType(0); 1655 EVT EltVT = VT.getVectorElementType(); 1656 DebugLoc dl = Node->getDebugLoc(); 1657 SDValue FIPtr = DAG.CreateStackTemporary(VT); 1658 int FI = cast<FrameIndexSDNode>(FIPtr.getNode())->getIndex(); 1659 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(FI); 1660 1661 // Emit a store of each element to the stack slot. 1662 SmallVector<SDValue, 8> Stores; 1663 unsigned TypeByteSize = EltVT.getSizeInBits() / 8; 1664 // Store (in the right endianness) the elements to memory. 1665 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) { 1666 // Ignore undef elements. 1667 if (Node->getOperand(i).getOpcode() == ISD::UNDEF) continue; 1668 1669 unsigned Offset = TypeByteSize*i; 1670 1671 SDValue Idx = DAG.getConstant(Offset, FIPtr.getValueType()); 1672 Idx = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr, Idx); 1673 1674 // If the destination vector element type is narrower than the source 1675 // element type, only store the bits necessary. 1676 if (EltVT.bitsLT(Node->getOperand(i).getValueType().getScalarType())) { 1677 Stores.push_back(DAG.getTruncStore(DAG.getEntryNode(), dl, 1678 Node->getOperand(i), Idx, 1679 PtrInfo.getWithOffset(Offset), 1680 EltVT, false, false, 0)); 1681 } else 1682 Stores.push_back(DAG.getStore(DAG.getEntryNode(), dl, 1683 Node->getOperand(i), Idx, 1684 PtrInfo.getWithOffset(Offset), 1685 false, false, 0)); 1686 } 1687 1688 SDValue StoreChain; 1689 if (!Stores.empty()) // Not all undef elements? 1690 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 1691 &Stores[0], Stores.size()); 1692 else 1693 StoreChain = DAG.getEntryNode(); 1694 1695 // Result is a load from the stack slot. 1696 return DAG.getLoad(VT, dl, StoreChain, FIPtr, PtrInfo, false, false, 0); 1697} 1698 1699SDValue SelectionDAGLegalize::ExpandFCOPYSIGN(SDNode* Node) { 1700 DebugLoc dl = Node->getDebugLoc(); 1701 SDValue Tmp1 = Node->getOperand(0); 1702 SDValue Tmp2 = Node->getOperand(1); 1703 1704 // Get the sign bit of the RHS. First obtain a value that has the same 1705 // sign as the sign bit, i.e. negative if and only if the sign bit is 1. 1706 SDValue SignBit; 1707 EVT FloatVT = Tmp2.getValueType(); 1708 EVT IVT = EVT::getIntegerVT(*DAG.getContext(), FloatVT.getSizeInBits()); 1709 if (isTypeLegal(IVT)) { 1710 // Convert to an integer with the same sign bit. 1711 SignBit = DAG.getNode(ISD::BITCAST, dl, IVT, Tmp2); 1712 } else { 1713 // Store the float to memory, then load the sign part out as an integer. 1714 MVT LoadTy = TLI.getPointerTy(); 1715 // First create a temporary that is aligned for both the load and store. 1716 SDValue StackPtr = DAG.CreateStackTemporary(FloatVT, LoadTy); 1717 // Then store the float to it. 1718 SDValue Ch = 1719 DAG.getStore(DAG.getEntryNode(), dl, Tmp2, StackPtr, MachinePointerInfo(), 1720 false, false, 0); 1721 if (TLI.isBigEndian()) { 1722 assert(FloatVT.isByteSized() && "Unsupported floating point type!"); 1723 // Load out a legal integer with the same sign bit as the float. 1724 SignBit = DAG.getLoad(LoadTy, dl, Ch, StackPtr, MachinePointerInfo(), 1725 false, false, 0); 1726 } else { // Little endian 1727 SDValue LoadPtr = StackPtr; 1728 // The float may be wider than the integer we are going to load. Advance 1729 // the pointer so that the loaded integer will contain the sign bit. 1730 unsigned Strides = (FloatVT.getSizeInBits()-1)/LoadTy.getSizeInBits(); 1731 unsigned ByteOffset = (Strides * LoadTy.getSizeInBits()) / 8; 1732 LoadPtr = DAG.getNode(ISD::ADD, dl, LoadPtr.getValueType(), 1733 LoadPtr, DAG.getIntPtrConstant(ByteOffset)); 1734 // Load a legal integer containing the sign bit. 1735 SignBit = DAG.getLoad(LoadTy, dl, Ch, LoadPtr, MachinePointerInfo(), 1736 false, false, 0); 1737 // Move the sign bit to the top bit of the loaded integer. 1738 unsigned BitShift = LoadTy.getSizeInBits() - 1739 (FloatVT.getSizeInBits() - 8 * ByteOffset); 1740 assert(BitShift < LoadTy.getSizeInBits() && "Pointer advanced wrong?"); 1741 if (BitShift) 1742 SignBit = DAG.getNode(ISD::SHL, dl, LoadTy, SignBit, 1743 DAG.getConstant(BitShift, 1744 TLI.getShiftAmountTy(SignBit.getValueType()))); 1745 } 1746 } 1747 // Now get the sign bit proper, by seeing whether the value is negative. 1748 SignBit = DAG.getSetCC(dl, TLI.getSetCCResultType(SignBit.getValueType()), 1749 SignBit, DAG.getConstant(0, SignBit.getValueType()), 1750 ISD::SETLT); 1751 // Get the absolute value of the result. 1752 SDValue AbsVal = DAG.getNode(ISD::FABS, dl, Tmp1.getValueType(), Tmp1); 1753 // Select between the nabs and abs value based on the sign bit of 1754 // the input. 1755 return DAG.getNode(ISD::SELECT, dl, AbsVal.getValueType(), SignBit, 1756 DAG.getNode(ISD::FNEG, dl, AbsVal.getValueType(), AbsVal), 1757 AbsVal); 1758} 1759 1760void SelectionDAGLegalize::ExpandDYNAMIC_STACKALLOC(SDNode* Node, 1761 SmallVectorImpl<SDValue> &Results) { 1762 unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore(); 1763 assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and" 1764 " not tell us which reg is the stack pointer!"); 1765 DebugLoc dl = Node->getDebugLoc(); 1766 EVT VT = Node->getValueType(0); 1767 SDValue Tmp1 = SDValue(Node, 0); 1768 SDValue Tmp2 = SDValue(Node, 1); 1769 SDValue Tmp3 = Node->getOperand(2); 1770 SDValue Chain = Tmp1.getOperand(0); 1771 1772 // Chain the dynamic stack allocation so that it doesn't modify the stack 1773 // pointer when other instructions are using the stack. 1774 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(0, true)); 1775 1776 SDValue Size = Tmp2.getOperand(1); 1777 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT); 1778 Chain = SP.getValue(1); 1779 unsigned Align = cast<ConstantSDNode>(Tmp3)->getZExtValue(); 1780 unsigned StackAlign = TM.getFrameLowering()->getStackAlignment(); 1781 if (Align > StackAlign) 1782 SP = DAG.getNode(ISD::AND, dl, VT, SP, 1783 DAG.getConstant(-(uint64_t)Align, VT)); 1784 Tmp1 = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value 1785 Chain = DAG.getCopyToReg(Chain, dl, SPReg, Tmp1); // Output chain 1786 1787 Tmp2 = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, true), 1788 DAG.getIntPtrConstant(0, true), SDValue()); 1789 1790 Results.push_back(Tmp1); 1791 Results.push_back(Tmp2); 1792} 1793 1794/// LegalizeSetCCCondCode - Legalize a SETCC with given LHS and RHS and 1795/// condition code CC on the current target. This routine expands SETCC with 1796/// illegal condition code into AND / OR of multiple SETCC values. 1797void SelectionDAGLegalize::LegalizeSetCCCondCode(EVT VT, 1798 SDValue &LHS, SDValue &RHS, 1799 SDValue &CC, 1800 DebugLoc dl) { 1801 EVT OpVT = LHS.getValueType(); 1802 ISD::CondCode CCCode = cast<CondCodeSDNode>(CC)->get(); 1803 switch (TLI.getCondCodeAction(CCCode, OpVT)) { 1804 default: assert(0 && "Unknown condition code action!"); 1805 case TargetLowering::Legal: 1806 // Nothing to do. 1807 break; 1808 case TargetLowering::Expand: { 1809 ISD::CondCode CC1 = ISD::SETCC_INVALID, CC2 = ISD::SETCC_INVALID; 1810 unsigned Opc = 0; 1811 switch (CCCode) { 1812 default: assert(0 && "Don't know how to expand this condition!"); 1813 case ISD::SETOEQ: CC1 = ISD::SETEQ; CC2 = ISD::SETO; Opc = ISD::AND; break; 1814 case ISD::SETOGT: CC1 = ISD::SETGT; CC2 = ISD::SETO; Opc = ISD::AND; break; 1815 case ISD::SETOGE: CC1 = ISD::SETGE; CC2 = ISD::SETO; Opc = ISD::AND; break; 1816 case ISD::SETOLT: CC1 = ISD::SETLT; CC2 = ISD::SETO; Opc = ISD::AND; break; 1817 case ISD::SETOLE: CC1 = ISD::SETLE; CC2 = ISD::SETO; Opc = ISD::AND; break; 1818 case ISD::SETONE: CC1 = ISD::SETNE; CC2 = ISD::SETO; Opc = ISD::AND; break; 1819 case ISD::SETUEQ: CC1 = ISD::SETEQ; CC2 = ISD::SETUO; Opc = ISD::OR; break; 1820 case ISD::SETUGT: CC1 = ISD::SETGT; CC2 = ISD::SETUO; Opc = ISD::OR; break; 1821 case ISD::SETUGE: CC1 = ISD::SETGE; CC2 = ISD::SETUO; Opc = ISD::OR; break; 1822 case ISD::SETULT: CC1 = ISD::SETLT; CC2 = ISD::SETUO; Opc = ISD::OR; break; 1823 case ISD::SETULE: CC1 = ISD::SETLE; CC2 = ISD::SETUO; Opc = ISD::OR; break; 1824 case ISD::SETUNE: CC1 = ISD::SETNE; CC2 = ISD::SETUO; Opc = ISD::OR; break; 1825 // FIXME: Implement more expansions. 1826 } 1827 1828 SDValue SetCC1 = DAG.getSetCC(dl, VT, LHS, RHS, CC1); 1829 SDValue SetCC2 = DAG.getSetCC(dl, VT, LHS, RHS, CC2); 1830 LHS = DAG.getNode(Opc, dl, VT, SetCC1, SetCC2); 1831 RHS = SDValue(); 1832 CC = SDValue(); 1833 break; 1834 } 1835 } 1836} 1837 1838/// EmitStackConvert - Emit a store/load combination to the stack. This stores 1839/// SrcOp to a stack slot of type SlotVT, truncating it if needed. It then does 1840/// a load from the stack slot to DestVT, extending it if needed. 1841/// The resultant code need not be legal. 1842SDValue SelectionDAGLegalize::EmitStackConvert(SDValue SrcOp, 1843 EVT SlotVT, 1844 EVT DestVT, 1845 DebugLoc dl) { 1846 // Create the stack frame object. 1847 unsigned SrcAlign = 1848 TLI.getTargetData()->getPrefTypeAlignment(SrcOp.getValueType(). 1849 getTypeForEVT(*DAG.getContext())); 1850 SDValue FIPtr = DAG.CreateStackTemporary(SlotVT, SrcAlign); 1851 1852 FrameIndexSDNode *StackPtrFI = cast<FrameIndexSDNode>(FIPtr); 1853 int SPFI = StackPtrFI->getIndex(); 1854 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(SPFI); 1855 1856 unsigned SrcSize = SrcOp.getValueType().getSizeInBits(); 1857 unsigned SlotSize = SlotVT.getSizeInBits(); 1858 unsigned DestSize = DestVT.getSizeInBits(); 1859 const Type *DestType = DestVT.getTypeForEVT(*DAG.getContext()); 1860 unsigned DestAlign = TLI.getTargetData()->getPrefTypeAlignment(DestType); 1861 1862 // Emit a store to the stack slot. Use a truncstore if the input value is 1863 // later than DestVT. 1864 SDValue Store; 1865 1866 if (SrcSize > SlotSize) 1867 Store = DAG.getTruncStore(DAG.getEntryNode(), dl, SrcOp, FIPtr, 1868 PtrInfo, SlotVT, false, false, SrcAlign); 1869 else { 1870 assert(SrcSize == SlotSize && "Invalid store"); 1871 Store = DAG.getStore(DAG.getEntryNode(), dl, SrcOp, FIPtr, 1872 PtrInfo, false, false, SrcAlign); 1873 } 1874 1875 // Result is a load from the stack slot. 1876 if (SlotSize == DestSize) 1877 return DAG.getLoad(DestVT, dl, Store, FIPtr, PtrInfo, 1878 false, false, DestAlign); 1879 1880 assert(SlotSize < DestSize && "Unknown extension!"); 1881 return DAG.getExtLoad(ISD::EXTLOAD, dl, DestVT, Store, FIPtr, 1882 PtrInfo, SlotVT, false, false, DestAlign); 1883} 1884 1885SDValue SelectionDAGLegalize::ExpandSCALAR_TO_VECTOR(SDNode *Node) { 1886 DebugLoc dl = Node->getDebugLoc(); 1887 // Create a vector sized/aligned stack slot, store the value to element #0, 1888 // then load the whole vector back out. 1889 SDValue StackPtr = DAG.CreateStackTemporary(Node->getValueType(0)); 1890 1891 FrameIndexSDNode *StackPtrFI = cast<FrameIndexSDNode>(StackPtr); 1892 int SPFI = StackPtrFI->getIndex(); 1893 1894 SDValue Ch = DAG.getTruncStore(DAG.getEntryNode(), dl, Node->getOperand(0), 1895 StackPtr, 1896 MachinePointerInfo::getFixedStack(SPFI), 1897 Node->getValueType(0).getVectorElementType(), 1898 false, false, 0); 1899 return DAG.getLoad(Node->getValueType(0), dl, Ch, StackPtr, 1900 MachinePointerInfo::getFixedStack(SPFI), 1901 false, false, 0); 1902} 1903 1904 1905/// ExpandBUILD_VECTOR - Expand a BUILD_VECTOR node on targets that don't 1906/// support the operation, but do support the resultant vector type. 1907SDValue SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) { 1908 unsigned NumElems = Node->getNumOperands(); 1909 SDValue Value1, Value2; 1910 DebugLoc dl = Node->getDebugLoc(); 1911 EVT VT = Node->getValueType(0); 1912 EVT OpVT = Node->getOperand(0).getValueType(); 1913 EVT EltVT = VT.getVectorElementType(); 1914 1915 // If the only non-undef value is the low element, turn this into a 1916 // SCALAR_TO_VECTOR node. If this is { X, X, X, X }, determine X. 1917 bool isOnlyLowElement = true; 1918 bool MoreThanTwoValues = false; 1919 bool isConstant = true; 1920 for (unsigned i = 0; i < NumElems; ++i) { 1921 SDValue V = Node->getOperand(i); 1922 if (V.getOpcode() == ISD::UNDEF) 1923 continue; 1924 if (i > 0) 1925 isOnlyLowElement = false; 1926 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V)) 1927 isConstant = false; 1928 1929 if (!Value1.getNode()) { 1930 Value1 = V; 1931 } else if (!Value2.getNode()) { 1932 if (V != Value1) 1933 Value2 = V; 1934 } else if (V != Value1 && V != Value2) { 1935 MoreThanTwoValues = true; 1936 } 1937 } 1938 1939 if (!Value1.getNode()) 1940 return DAG.getUNDEF(VT); 1941 1942 if (isOnlyLowElement) 1943 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Node->getOperand(0)); 1944 1945 // If all elements are constants, create a load from the constant pool. 1946 if (isConstant) { 1947 std::vector<Constant*> CV; 1948 for (unsigned i = 0, e = NumElems; i != e; ++i) { 1949 if (ConstantFPSDNode *V = 1950 dyn_cast<ConstantFPSDNode>(Node->getOperand(i))) { 1951 CV.push_back(const_cast<ConstantFP *>(V->getConstantFPValue())); 1952 } else if (ConstantSDNode *V = 1953 dyn_cast<ConstantSDNode>(Node->getOperand(i))) { 1954 if (OpVT==EltVT) 1955 CV.push_back(const_cast<ConstantInt *>(V->getConstantIntValue())); 1956 else { 1957 // If OpVT and EltVT don't match, EltVT is not legal and the 1958 // element values have been promoted/truncated earlier. Undo this; 1959 // we don't want a v16i8 to become a v16i32 for example. 1960 const ConstantInt *CI = V->getConstantIntValue(); 1961 CV.push_back(ConstantInt::get(EltVT.getTypeForEVT(*DAG.getContext()), 1962 CI->getZExtValue())); 1963 } 1964 } else { 1965 assert(Node->getOperand(i).getOpcode() == ISD::UNDEF); 1966 const Type *OpNTy = EltVT.getTypeForEVT(*DAG.getContext()); 1967 CV.push_back(UndefValue::get(OpNTy)); 1968 } 1969 } 1970 Constant *CP = ConstantVector::get(CV); 1971 SDValue CPIdx = DAG.getConstantPool(CP, TLI.getPointerTy()); 1972 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment(); 1973 return DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 1974 MachinePointerInfo::getConstantPool(), 1975 false, false, Alignment); 1976 } 1977 1978 if (!MoreThanTwoValues) { 1979 SmallVector<int, 8> ShuffleVec(NumElems, -1); 1980 for (unsigned i = 0; i < NumElems; ++i) { 1981 SDValue V = Node->getOperand(i); 1982 if (V.getOpcode() == ISD::UNDEF) 1983 continue; 1984 ShuffleVec[i] = V == Value1 ? 0 : NumElems; 1985 } 1986 if (TLI.isShuffleMaskLegal(ShuffleVec, Node->getValueType(0))) { 1987 // Get the splatted value into the low element of a vector register. 1988 SDValue Vec1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value1); 1989 SDValue Vec2; 1990 if (Value2.getNode()) 1991 Vec2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value2); 1992 else 1993 Vec2 = DAG.getUNDEF(VT); 1994 1995 // Return shuffle(LowValVec, undef, <0,0,0,0>) 1996 return DAG.getVectorShuffle(VT, dl, Vec1, Vec2, ShuffleVec.data()); 1997 } 1998 } 1999 2000 // Otherwise, we can't handle this case efficiently. 2001 return ExpandVectorBuildThroughStack(Node); 2002} 2003 2004// ExpandLibCall - Expand a node into a call to a libcall. If the result value 2005// does not fit into a register, return the lo part and set the hi part to the 2006// by-reg argument. If it does fit into a single register, return the result 2007// and leave the Hi part unset. 2008SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, SDNode *Node, 2009 bool isSigned) { 2010 assert(!IsLegalizingCall && "Cannot overlap legalization of calls!"); 2011 // The input chain to this libcall is the entry node of the function. 2012 // Legalizing the call will automatically add the previous call to the 2013 // dependence. 2014 SDValue InChain = DAG.getEntryNode(); 2015 2016 TargetLowering::ArgListTy Args; 2017 TargetLowering::ArgListEntry Entry; 2018 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) { 2019 EVT ArgVT = Node->getOperand(i).getValueType(); 2020 const Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); 2021 Entry.Node = Node->getOperand(i); Entry.Ty = ArgTy; 2022 Entry.isSExt = isSigned; 2023 Entry.isZExt = !isSigned; 2024 Args.push_back(Entry); 2025 } 2026 SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC), 2027 TLI.getPointerTy()); 2028 2029 // Splice the libcall in wherever FindInputOutputChains tells us to. 2030 const Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext()); 2031 2032 // isTailCall may be true since the callee does not reference caller stack 2033 // frame. Check if it's in the right position. 2034 bool isTailCall = isInTailCallPosition(DAG, Node, TLI); 2035 std::pair<SDValue, SDValue> CallInfo = 2036 TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false, 2037 0, TLI.getLibcallCallingConv(LC), isTailCall, 2038 /*isReturnValueUsed=*/true, 2039 Callee, Args, DAG, Node->getDebugLoc()); 2040 2041 if (!CallInfo.second.getNode()) 2042 // It's a tailcall, return the chain (which is the DAG root). 2043 return DAG.getRoot(); 2044 2045 // Legalize the call sequence, starting with the chain. This will advance 2046 // the LastCALLSEQ_END to the legalized version of the CALLSEQ_END node that 2047 // was added by LowerCallTo (guaranteeing proper serialization of calls). 2048 LegalizeOp(CallInfo.second); 2049 return CallInfo.first; 2050} 2051 2052// ExpandChainLibCall - Expand a node into a call to a libcall. Similar to 2053// ExpandLibCall except that the first operand is the in-chain. 2054std::pair<SDValue, SDValue> 2055SelectionDAGLegalize::ExpandChainLibCall(RTLIB::Libcall LC, 2056 SDNode *Node, 2057 bool isSigned) { 2058 assert(!IsLegalizingCall && "Cannot overlap legalization of calls!"); 2059 SDValue InChain = Node->getOperand(0); 2060 2061 TargetLowering::ArgListTy Args; 2062 TargetLowering::ArgListEntry Entry; 2063 for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i) { 2064 EVT ArgVT = Node->getOperand(i).getValueType(); 2065 const Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); 2066 Entry.Node = Node->getOperand(i); 2067 Entry.Ty = ArgTy; 2068 Entry.isSExt = isSigned; 2069 Entry.isZExt = !isSigned; 2070 Args.push_back(Entry); 2071 } 2072 SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC), 2073 TLI.getPointerTy()); 2074 2075 // Splice the libcall in wherever FindInputOutputChains tells us to. 2076 const Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext()); 2077 std::pair<SDValue, SDValue> CallInfo = 2078 TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false, 2079 0, TLI.getLibcallCallingConv(LC), /*isTailCall=*/false, 2080 /*isReturnValueUsed=*/true, 2081 Callee, Args, DAG, Node->getDebugLoc()); 2082 2083 // Legalize the call sequence, starting with the chain. This will advance 2084 // the LastCALLSEQ_END to the legalized version of the CALLSEQ_END node that 2085 // was added by LowerCallTo (guaranteeing proper serialization of calls). 2086 LegalizeOp(CallInfo.second); 2087 return CallInfo; 2088} 2089 2090SDValue SelectionDAGLegalize::ExpandFPLibCall(SDNode* Node, 2091 RTLIB::Libcall Call_F32, 2092 RTLIB::Libcall Call_F64, 2093 RTLIB::Libcall Call_F80, 2094 RTLIB::Libcall Call_PPCF128) { 2095 RTLIB::Libcall LC; 2096 switch (Node->getValueType(0).getSimpleVT().SimpleTy) { 2097 default: assert(0 && "Unexpected request for libcall!"); 2098 case MVT::f32: LC = Call_F32; break; 2099 case MVT::f64: LC = Call_F64; break; 2100 case MVT::f80: LC = Call_F80; break; 2101 case MVT::ppcf128: LC = Call_PPCF128; break; 2102 } 2103 return ExpandLibCall(LC, Node, false); 2104} 2105 2106SDValue SelectionDAGLegalize::ExpandIntLibCall(SDNode* Node, bool isSigned, 2107 RTLIB::Libcall Call_I8, 2108 RTLIB::Libcall Call_I16, 2109 RTLIB::Libcall Call_I32, 2110 RTLIB::Libcall Call_I64, 2111 RTLIB::Libcall Call_I128) { 2112 RTLIB::Libcall LC; 2113 switch (Node->getValueType(0).getSimpleVT().SimpleTy) { 2114 default: assert(0 && "Unexpected request for libcall!"); 2115 case MVT::i8: LC = Call_I8; break; 2116 case MVT::i16: LC = Call_I16; break; 2117 case MVT::i32: LC = Call_I32; break; 2118 case MVT::i64: LC = Call_I64; break; 2119 case MVT::i128: LC = Call_I128; break; 2120 } 2121 return ExpandLibCall(LC, Node, isSigned); 2122} 2123 2124/// ExpandDivRemLibCall - Issue libcalls to __{u}divmod to compute div / rem 2125/// pairs. 2126SDValue SelectionDAGLegalize::ExpandDivRemLibCall(SDNode *Node, bool isSigned, 2127 bool isDIV) { 2128 RTLIB::Libcall LC; 2129 switch (Node->getValueType(0).getSimpleVT().SimpleTy) { 2130 default: assert(0 && "Unexpected request for libcall!"); 2131 case MVT::i8: LC= isSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8; break; 2132 case MVT::i16: LC= isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break; 2133 case MVT::i32: LC= isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break; 2134 case MVT::i64: LC= isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break; 2135 case MVT::i128: LC= isSigned ? RTLIB::SDIVREM_I128:RTLIB::UDIVREM_I128; break; 2136 } 2137 2138 if (!TLI.getLibcallName(LC)) 2139 return SDValue(); 2140 2141 // Only issue divrem libcall if both quotient and remainder are needed. 2142 unsigned OtherOpcode = 0; 2143 if (isSigned) { 2144 OtherOpcode = isDIV ? ISD::SREM : ISD::SDIV; 2145 } else { 2146 OtherOpcode = isDIV ? ISD::UREM : ISD::UDIV; 2147 } 2148 SDNode *OtherNode = 0; 2149 SDValue Op0 = Node->getOperand(0); 2150 SDValue Op1 = Node->getOperand(1); 2151 for (SDNode::use_iterator UI = Op0.getNode()->use_begin(), 2152 UE = Op0.getNode()->use_end(); UI != UE; ++UI) { 2153 SDNode *User = *UI; 2154 if (User == Node) 2155 continue; 2156 if (User->getOpcode() == OtherOpcode && 2157 User->getOperand(0) == Op0 && 2158 User->getOperand(1) == Op1) { 2159 OtherNode = User; 2160 break; 2161 } 2162 } 2163 if (!OtherNode) 2164 return SDValue(); 2165 2166 // If the libcall is already generated, no need to issue it again. 2167 DenseMap<SDValue, SDValue>::iterator I 2168 = LegalizedNodes.find(SDValue(OtherNode,0)); 2169 if (I != LegalizedNodes.end()) { 2170 OtherNode = I->second.getNode(); 2171 SDNode *Chain = OtherNode->getOperand(0).getNode(); 2172 for (SDNode::use_iterator UI = Chain->use_begin(), UE = Chain->use_end(); 2173 UI != UE; ++UI) { 2174 SDNode *User = *UI; 2175 if (User == OtherNode) 2176 continue; 2177 if (isDIV) { 2178 assert(User->getOpcode() == ISD::CopyFromReg); 2179 } else { 2180 assert(User->getOpcode() == ISD::LOAD); 2181 } 2182 return SDValue(User, 0); 2183 } 2184 } 2185 2186 // The input chain to this libcall is the entry node of the function. 2187 // Legalizing the call will automatically add the previous call to the 2188 // dependence. 2189 SDValue InChain = DAG.getEntryNode(); 2190 2191 EVT RetVT = Node->getValueType(0); 2192 const Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext()); 2193 2194 TargetLowering::ArgListTy Args; 2195 TargetLowering::ArgListEntry Entry; 2196 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) { 2197 EVT ArgVT = Node->getOperand(i).getValueType(); 2198 const Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); 2199 Entry.Node = Node->getOperand(i); Entry.Ty = ArgTy; 2200 Entry.isSExt = isSigned; 2201 Entry.isZExt = !isSigned; 2202 Args.push_back(Entry); 2203 } 2204 2205 // Also pass the return address of the remainder. 2206 SDValue FIPtr = DAG.CreateStackTemporary(RetVT); 2207 Entry.Node = FIPtr; 2208 Entry.Ty = RetTy->getPointerTo(); 2209 Entry.isSExt = isSigned; 2210 Entry.isZExt = !isSigned; 2211 Args.push_back(Entry); 2212 2213 SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC), 2214 TLI.getPointerTy()); 2215 2216 // Splice the libcall in wherever FindInputOutputChains tells us to. 2217 DebugLoc dl = Node->getDebugLoc(); 2218 std::pair<SDValue, SDValue> CallInfo = 2219 TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false, 2220 0, TLI.getLibcallCallingConv(LC), /*isTailCall=*/false, 2221 /*isReturnValueUsed=*/true, Callee, Args, DAG, dl); 2222 2223 // Legalize the call sequence, starting with the chain. This will advance 2224 // the LastCALLSEQ_END to the legalized version of the CALLSEQ_END node that 2225 // was added by LowerCallTo (guaranteeing proper serialization of calls). 2226 LegalizeOp(CallInfo.second); 2227 2228 // Remainder is loaded back from the stack frame. 2229 SDValue Rem = DAG.getLoad(RetVT, dl, LastCALLSEQ_END, FIPtr, 2230 MachinePointerInfo(), false, false, 0); 2231 return isDIV ? CallInfo.first : Rem; 2232} 2233 2234/// ExpandLegalINT_TO_FP - This function is responsible for legalizing a 2235/// INT_TO_FP operation of the specified operand when the target requests that 2236/// we expand it. At this point, we know that the result and operand types are 2237/// legal for the target. 2238SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned, 2239 SDValue Op0, 2240 EVT DestVT, 2241 DebugLoc dl) { 2242 if (Op0.getValueType() == MVT::i32) { 2243 // simple 32-bit [signed|unsigned] integer to float/double expansion 2244 2245 // Get the stack frame index of a 8 byte buffer. 2246 SDValue StackSlot = DAG.CreateStackTemporary(MVT::f64); 2247 2248 // word offset constant for Hi/Lo address computation 2249 SDValue WordOff = DAG.getConstant(sizeof(int), TLI.getPointerTy()); 2250 // set up Hi and Lo (into buffer) address based on endian 2251 SDValue Hi = StackSlot; 2252 SDValue Lo = DAG.getNode(ISD::ADD, dl, 2253 TLI.getPointerTy(), StackSlot, WordOff); 2254 if (TLI.isLittleEndian()) 2255 std::swap(Hi, Lo); 2256 2257 // if signed map to unsigned space 2258 SDValue Op0Mapped; 2259 if (isSigned) { 2260 // constant used to invert sign bit (signed to unsigned mapping) 2261 SDValue SignBit = DAG.getConstant(0x80000000u, MVT::i32); 2262 Op0Mapped = DAG.getNode(ISD::XOR, dl, MVT::i32, Op0, SignBit); 2263 } else { 2264 Op0Mapped = Op0; 2265 } 2266 // store the lo of the constructed double - based on integer input 2267 SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, 2268 Op0Mapped, Lo, MachinePointerInfo(), 2269 false, false, 0); 2270 // initial hi portion of constructed double 2271 SDValue InitialHi = DAG.getConstant(0x43300000u, MVT::i32); 2272 // store the hi of the constructed double - biased exponent 2273 SDValue Store2 = DAG.getStore(Store1, dl, InitialHi, Hi, 2274 MachinePointerInfo(), 2275 false, false, 0); 2276 // load the constructed double 2277 SDValue Load = DAG.getLoad(MVT::f64, dl, Store2, StackSlot, 2278 MachinePointerInfo(), false, false, 0); 2279 // FP constant to bias correct the final result 2280 SDValue Bias = DAG.getConstantFP(isSigned ? 2281 BitsToDouble(0x4330000080000000ULL) : 2282 BitsToDouble(0x4330000000000000ULL), 2283 MVT::f64); 2284 // subtract the bias 2285 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Load, Bias); 2286 // final result 2287 SDValue Result; 2288 // handle final rounding 2289 if (DestVT == MVT::f64) { 2290 // do nothing 2291 Result = Sub; 2292 } else if (DestVT.bitsLT(MVT::f64)) { 2293 Result = DAG.getNode(ISD::FP_ROUND, dl, DestVT, Sub, 2294 DAG.getIntPtrConstant(0)); 2295 } else if (DestVT.bitsGT(MVT::f64)) { 2296 Result = DAG.getNode(ISD::FP_EXTEND, dl, DestVT, Sub); 2297 } 2298 return Result; 2299 } 2300 assert(!isSigned && "Legalize cannot Expand SINT_TO_FP for i64 yet"); 2301 // Code below here assumes !isSigned without checking again. 2302 2303 // Implementation of unsigned i64 to f64 following the algorithm in 2304 // __floatundidf in compiler_rt. This implementation has the advantage 2305 // of performing rounding correctly, both in the default rounding mode 2306 // and in all alternate rounding modes. 2307 // TODO: Generalize this for use with other types. 2308 if (Op0.getValueType() == MVT::i64 && DestVT == MVT::f64) { 2309 SDValue TwoP52 = 2310 DAG.getConstant(UINT64_C(0x4330000000000000), MVT::i64); 2311 SDValue TwoP84PlusTwoP52 = 2312 DAG.getConstantFP(BitsToDouble(UINT64_C(0x4530000000100000)), MVT::f64); 2313 SDValue TwoP84 = 2314 DAG.getConstant(UINT64_C(0x4530000000000000), MVT::i64); 2315 2316 SDValue Lo = DAG.getZeroExtendInReg(Op0, dl, MVT::i32); 2317 SDValue Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Op0, 2318 DAG.getConstant(32, MVT::i64)); 2319 SDValue LoOr = DAG.getNode(ISD::OR, dl, MVT::i64, Lo, TwoP52); 2320 SDValue HiOr = DAG.getNode(ISD::OR, dl, MVT::i64, Hi, TwoP84); 2321 SDValue LoFlt = DAG.getNode(ISD::BITCAST, dl, MVT::f64, LoOr); 2322 SDValue HiFlt = DAG.getNode(ISD::BITCAST, dl, MVT::f64, HiOr); 2323 SDValue HiSub = DAG.getNode(ISD::FSUB, dl, MVT::f64, HiFlt, 2324 TwoP84PlusTwoP52); 2325 return DAG.getNode(ISD::FADD, dl, MVT::f64, LoFlt, HiSub); 2326 } 2327 2328 // Implementation of unsigned i64 to f32. 2329 // TODO: Generalize this for use with other types. 2330 if (Op0.getValueType() == MVT::i64 && DestVT == MVT::f32) { 2331 // For unsigned conversions, convert them to signed conversions using the 2332 // algorithm from the x86_64 __floatundidf in compiler_rt. 2333 if (!isSigned) { 2334 SDValue Fast = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, Op0); 2335 2336 SDValue ShiftConst = 2337 DAG.getConstant(1, TLI.getShiftAmountTy(Op0.getValueType())); 2338 SDValue Shr = DAG.getNode(ISD::SRL, dl, MVT::i64, Op0, ShiftConst); 2339 SDValue AndConst = DAG.getConstant(1, MVT::i64); 2340 SDValue And = DAG.getNode(ISD::AND, dl, MVT::i64, Op0, AndConst); 2341 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::i64, And, Shr); 2342 2343 SDValue SignCvt = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, Or); 2344 SDValue Slow = DAG.getNode(ISD::FADD, dl, MVT::f32, SignCvt, SignCvt); 2345 2346 // TODO: This really should be implemented using a branch rather than a 2347 // select. We happen to get lucky and machinesink does the right 2348 // thing most of the time. This would be a good candidate for a 2349 //pseudo-op, or, even better, for whole-function isel. 2350 SDValue SignBitTest = DAG.getSetCC(dl, TLI.getSetCCResultType(MVT::i64), 2351 Op0, DAG.getConstant(0, MVT::i64), ISD::SETLT); 2352 return DAG.getNode(ISD::SELECT, dl, MVT::f32, SignBitTest, Slow, Fast); 2353 } 2354 2355 // Otherwise, implement the fully general conversion. 2356 2357 SDValue And = DAG.getNode(ISD::AND, dl, MVT::i64, Op0, 2358 DAG.getConstant(UINT64_C(0xfffffffffffff800), MVT::i64)); 2359 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::i64, And, 2360 DAG.getConstant(UINT64_C(0x800), MVT::i64)); 2361 SDValue And2 = DAG.getNode(ISD::AND, dl, MVT::i64, Op0, 2362 DAG.getConstant(UINT64_C(0x7ff), MVT::i64)); 2363 SDValue Ne = DAG.getSetCC(dl, TLI.getSetCCResultType(MVT::i64), 2364 And2, DAG.getConstant(UINT64_C(0), MVT::i64), ISD::SETNE); 2365 SDValue Sel = DAG.getNode(ISD::SELECT, dl, MVT::i64, Ne, Or, Op0); 2366 SDValue Ge = DAG.getSetCC(dl, TLI.getSetCCResultType(MVT::i64), 2367 Op0, DAG.getConstant(UINT64_C(0x0020000000000000), MVT::i64), 2368 ISD::SETUGE); 2369 SDValue Sel2 = DAG.getNode(ISD::SELECT, dl, MVT::i64, Ge, Sel, Op0); 2370 EVT SHVT = TLI.getShiftAmountTy(Sel2.getValueType()); 2371 2372 SDValue Sh = DAG.getNode(ISD::SRL, dl, MVT::i64, Sel2, 2373 DAG.getConstant(32, SHVT)); 2374 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Sh); 2375 SDValue Fcvt = DAG.getNode(ISD::UINT_TO_FP, dl, MVT::f64, Trunc); 2376 SDValue TwoP32 = 2377 DAG.getConstantFP(BitsToDouble(UINT64_C(0x41f0000000000000)), MVT::f64); 2378 SDValue Fmul = DAG.getNode(ISD::FMUL, dl, MVT::f64, TwoP32, Fcvt); 2379 SDValue Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Sel2); 2380 SDValue Fcvt2 = DAG.getNode(ISD::UINT_TO_FP, dl, MVT::f64, Lo); 2381 SDValue Fadd = DAG.getNode(ISD::FADD, dl, MVT::f64, Fmul, Fcvt2); 2382 return DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, Fadd, 2383 DAG.getIntPtrConstant(0)); 2384 } 2385 2386 SDValue Tmp1 = DAG.getNode(ISD::SINT_TO_FP, dl, DestVT, Op0); 2387 2388 SDValue SignSet = DAG.getSetCC(dl, TLI.getSetCCResultType(Op0.getValueType()), 2389 Op0, DAG.getConstant(0, Op0.getValueType()), 2390 ISD::SETLT); 2391 SDValue Zero = DAG.getIntPtrConstant(0), Four = DAG.getIntPtrConstant(4); 2392 SDValue CstOffset = DAG.getNode(ISD::SELECT, dl, Zero.getValueType(), 2393 SignSet, Four, Zero); 2394 2395 // If the sign bit of the integer is set, the large number will be treated 2396 // as a negative number. To counteract this, the dynamic code adds an 2397 // offset depending on the data type. 2398 uint64_t FF; 2399 switch (Op0.getValueType().getSimpleVT().SimpleTy) { 2400 default: assert(0 && "Unsupported integer type!"); 2401 case MVT::i8 : FF = 0x43800000ULL; break; // 2^8 (as a float) 2402 case MVT::i16: FF = 0x47800000ULL; break; // 2^16 (as a float) 2403 case MVT::i32: FF = 0x4F800000ULL; break; // 2^32 (as a float) 2404 case MVT::i64: FF = 0x5F800000ULL; break; // 2^64 (as a float) 2405 } 2406 if (TLI.isLittleEndian()) FF <<= 32; 2407 Constant *FudgeFactor = ConstantInt::get( 2408 Type::getInt64Ty(*DAG.getContext()), FF); 2409 2410 SDValue CPIdx = DAG.getConstantPool(FudgeFactor, TLI.getPointerTy()); 2411 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment(); 2412 CPIdx = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), CPIdx, CstOffset); 2413 Alignment = std::min(Alignment, 4u); 2414 SDValue FudgeInReg; 2415 if (DestVT == MVT::f32) 2416 FudgeInReg = DAG.getLoad(MVT::f32, dl, DAG.getEntryNode(), CPIdx, 2417 MachinePointerInfo::getConstantPool(), 2418 false, false, Alignment); 2419 else { 2420 FudgeInReg = 2421 LegalizeOp(DAG.getExtLoad(ISD::EXTLOAD, dl, DestVT, 2422 DAG.getEntryNode(), CPIdx, 2423 MachinePointerInfo::getConstantPool(), 2424 MVT::f32, false, false, Alignment)); 2425 } 2426 2427 return DAG.getNode(ISD::FADD, dl, DestVT, Tmp1, FudgeInReg); 2428} 2429 2430/// PromoteLegalINT_TO_FP - This function is responsible for legalizing a 2431/// *INT_TO_FP operation of the specified operand when the target requests that 2432/// we promote it. At this point, we know that the result and operand types are 2433/// legal for the target, and that there is a legal UINT_TO_FP or SINT_TO_FP 2434/// operation that takes a larger input. 2435SDValue SelectionDAGLegalize::PromoteLegalINT_TO_FP(SDValue LegalOp, 2436 EVT DestVT, 2437 bool isSigned, 2438 DebugLoc dl) { 2439 // First step, figure out the appropriate *INT_TO_FP operation to use. 2440 EVT NewInTy = LegalOp.getValueType(); 2441 2442 unsigned OpToUse = 0; 2443 2444 // Scan for the appropriate larger type to use. 2445 while (1) { 2446 NewInTy = (MVT::SimpleValueType)(NewInTy.getSimpleVT().SimpleTy+1); 2447 assert(NewInTy.isInteger() && "Ran out of possibilities!"); 2448 2449 // If the target supports SINT_TO_FP of this type, use it. 2450 if (TLI.isOperationLegalOrCustom(ISD::SINT_TO_FP, NewInTy)) { 2451 OpToUse = ISD::SINT_TO_FP; 2452 break; 2453 } 2454 if (isSigned) continue; 2455 2456 // If the target supports UINT_TO_FP of this type, use it. 2457 if (TLI.isOperationLegalOrCustom(ISD::UINT_TO_FP, NewInTy)) { 2458 OpToUse = ISD::UINT_TO_FP; 2459 break; 2460 } 2461 2462 // Otherwise, try a larger type. 2463 } 2464 2465 // Okay, we found the operation and type to use. Zero extend our input to the 2466 // desired type then run the operation on it. 2467 return DAG.getNode(OpToUse, dl, DestVT, 2468 DAG.getNode(isSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, 2469 dl, NewInTy, LegalOp)); 2470} 2471 2472/// PromoteLegalFP_TO_INT - This function is responsible for legalizing a 2473/// FP_TO_*INT operation of the specified operand when the target requests that 2474/// we promote it. At this point, we know that the result and operand types are 2475/// legal for the target, and that there is a legal FP_TO_UINT or FP_TO_SINT 2476/// operation that returns a larger result. 2477SDValue SelectionDAGLegalize::PromoteLegalFP_TO_INT(SDValue LegalOp, 2478 EVT DestVT, 2479 bool isSigned, 2480 DebugLoc dl) { 2481 // First step, figure out the appropriate FP_TO*INT operation to use. 2482 EVT NewOutTy = DestVT; 2483 2484 unsigned OpToUse = 0; 2485 2486 // Scan for the appropriate larger type to use. 2487 while (1) { 2488 NewOutTy = (MVT::SimpleValueType)(NewOutTy.getSimpleVT().SimpleTy+1); 2489 assert(NewOutTy.isInteger() && "Ran out of possibilities!"); 2490 2491 if (TLI.isOperationLegalOrCustom(ISD::FP_TO_SINT, NewOutTy)) { 2492 OpToUse = ISD::FP_TO_SINT; 2493 break; 2494 } 2495 2496 if (TLI.isOperationLegalOrCustom(ISD::FP_TO_UINT, NewOutTy)) { 2497 OpToUse = ISD::FP_TO_UINT; 2498 break; 2499 } 2500 2501 // Otherwise, try a larger type. 2502 } 2503 2504 2505 // Okay, we found the operation and type to use. 2506 SDValue Operation = DAG.getNode(OpToUse, dl, NewOutTy, LegalOp); 2507 2508 // Truncate the result of the extended FP_TO_*INT operation to the desired 2509 // size. 2510 return DAG.getNode(ISD::TRUNCATE, dl, DestVT, Operation); 2511} 2512 2513/// ExpandBSWAP - Open code the operations for BSWAP of the specified operation. 2514/// 2515SDValue SelectionDAGLegalize::ExpandBSWAP(SDValue Op, DebugLoc dl) { 2516 EVT VT = Op.getValueType(); 2517 EVT SHVT = TLI.getShiftAmountTy(VT); 2518 SDValue Tmp1, Tmp2, Tmp3, Tmp4, Tmp5, Tmp6, Tmp7, Tmp8; 2519 switch (VT.getSimpleVT().SimpleTy) { 2520 default: assert(0 && "Unhandled Expand type in BSWAP!"); 2521 case MVT::i16: 2522 Tmp2 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(8, SHVT)); 2523 Tmp1 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(8, SHVT)); 2524 return DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 2525 case MVT::i32: 2526 Tmp4 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(24, SHVT)); 2527 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(8, SHVT)); 2528 Tmp2 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(8, SHVT)); 2529 Tmp1 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(24, SHVT)); 2530 Tmp3 = DAG.getNode(ISD::AND, dl, VT, Tmp3, DAG.getConstant(0xFF0000, VT)); 2531 Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, DAG.getConstant(0xFF00, VT)); 2532 Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp3); 2533 Tmp2 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp1); 2534 return DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp2); 2535 case MVT::i64: 2536 Tmp8 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(56, SHVT)); 2537 Tmp7 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(40, SHVT)); 2538 Tmp6 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(24, SHVT)); 2539 Tmp5 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(8, SHVT)); 2540 Tmp4 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(8, SHVT)); 2541 Tmp3 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(24, SHVT)); 2542 Tmp2 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(40, SHVT)); 2543 Tmp1 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(56, SHVT)); 2544 Tmp7 = DAG.getNode(ISD::AND, dl, VT, Tmp7, DAG.getConstant(255ULL<<48, VT)); 2545 Tmp6 = DAG.getNode(ISD::AND, dl, VT, Tmp6, DAG.getConstant(255ULL<<40, VT)); 2546 Tmp5 = DAG.getNode(ISD::AND, dl, VT, Tmp5, DAG.getConstant(255ULL<<32, VT)); 2547 Tmp4 = DAG.getNode(ISD::AND, dl, VT, Tmp4, DAG.getConstant(255ULL<<24, VT)); 2548 Tmp3 = DAG.getNode(ISD::AND, dl, VT, Tmp3, DAG.getConstant(255ULL<<16, VT)); 2549 Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, DAG.getConstant(255ULL<<8 , VT)); 2550 Tmp8 = DAG.getNode(ISD::OR, dl, VT, Tmp8, Tmp7); 2551 Tmp6 = DAG.getNode(ISD::OR, dl, VT, Tmp6, Tmp5); 2552 Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp3); 2553 Tmp2 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp1); 2554 Tmp8 = DAG.getNode(ISD::OR, dl, VT, Tmp8, Tmp6); 2555 Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp2); 2556 return DAG.getNode(ISD::OR, dl, VT, Tmp8, Tmp4); 2557 } 2558} 2559 2560/// SplatByte - Distribute ByteVal over NumBits bits. 2561// FIXME: Move this helper to a common place. 2562static APInt SplatByte(unsigned NumBits, uint8_t ByteVal) { 2563 APInt Val = APInt(NumBits, ByteVal); 2564 unsigned Shift = 8; 2565 for (unsigned i = NumBits; i > 8; i >>= 1) { 2566 Val = (Val << Shift) | Val; 2567 Shift <<= 1; 2568 } 2569 return Val; 2570} 2571 2572/// ExpandBitCount - Expand the specified bitcount instruction into operations. 2573/// 2574SDValue SelectionDAGLegalize::ExpandBitCount(unsigned Opc, SDValue Op, 2575 DebugLoc dl) { 2576 switch (Opc) { 2577 default: assert(0 && "Cannot expand this yet!"); 2578 case ISD::CTPOP: { 2579 EVT VT = Op.getValueType(); 2580 EVT ShVT = TLI.getShiftAmountTy(VT); 2581 unsigned Len = VT.getSizeInBits(); 2582 2583 assert(VT.isInteger() && Len <= 128 && Len % 8 == 0 && 2584 "CTPOP not implemented for this type."); 2585 2586 // This is the "best" algorithm from 2587 // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel 2588 2589 SDValue Mask55 = DAG.getConstant(SplatByte(Len, 0x55), VT); 2590 SDValue Mask33 = DAG.getConstant(SplatByte(Len, 0x33), VT); 2591 SDValue Mask0F = DAG.getConstant(SplatByte(Len, 0x0F), VT); 2592 SDValue Mask01 = DAG.getConstant(SplatByte(Len, 0x01), VT); 2593 2594 // v = v - ((v >> 1) & 0x55555555...) 2595 Op = DAG.getNode(ISD::SUB, dl, VT, Op, 2596 DAG.getNode(ISD::AND, dl, VT, 2597 DAG.getNode(ISD::SRL, dl, VT, Op, 2598 DAG.getConstant(1, ShVT)), 2599 Mask55)); 2600 // v = (v & 0x33333333...) + ((v >> 2) & 0x33333333...) 2601 Op = DAG.getNode(ISD::ADD, dl, VT, 2602 DAG.getNode(ISD::AND, dl, VT, Op, Mask33), 2603 DAG.getNode(ISD::AND, dl, VT, 2604 DAG.getNode(ISD::SRL, dl, VT, Op, 2605 DAG.getConstant(2, ShVT)), 2606 Mask33)); 2607 // v = (v + (v >> 4)) & 0x0F0F0F0F... 2608 Op = DAG.getNode(ISD::AND, dl, VT, 2609 DAG.getNode(ISD::ADD, dl, VT, Op, 2610 DAG.getNode(ISD::SRL, dl, VT, Op, 2611 DAG.getConstant(4, ShVT))), 2612 Mask0F); 2613 // v = (v * 0x01010101...) >> (Len - 8) 2614 Op = DAG.getNode(ISD::SRL, dl, VT, 2615 DAG.getNode(ISD::MUL, dl, VT, Op, Mask01), 2616 DAG.getConstant(Len - 8, ShVT)); 2617 2618 return Op; 2619 } 2620 case ISD::CTLZ: { 2621 // for now, we do this: 2622 // x = x | (x >> 1); 2623 // x = x | (x >> 2); 2624 // ... 2625 // x = x | (x >>16); 2626 // x = x | (x >>32); // for 64-bit input 2627 // return popcount(~x); 2628 // 2629 // but see also: http://www.hackersdelight.org/HDcode/nlz.cc 2630 EVT VT = Op.getValueType(); 2631 EVT ShVT = TLI.getShiftAmountTy(VT); 2632 unsigned len = VT.getSizeInBits(); 2633 for (unsigned i = 0; (1U << i) <= (len / 2); ++i) { 2634 SDValue Tmp3 = DAG.getConstant(1ULL << i, ShVT); 2635 Op = DAG.getNode(ISD::OR, dl, VT, Op, 2636 DAG.getNode(ISD::SRL, dl, VT, Op, Tmp3)); 2637 } 2638 Op = DAG.getNOT(dl, Op, VT); 2639 return DAG.getNode(ISD::CTPOP, dl, VT, Op); 2640 } 2641 case ISD::CTTZ: { 2642 // for now, we use: { return popcount(~x & (x - 1)); } 2643 // unless the target has ctlz but not ctpop, in which case we use: 2644 // { return 32 - nlz(~x & (x-1)); } 2645 // see also http://www.hackersdelight.org/HDcode/ntz.cc 2646 EVT VT = Op.getValueType(); 2647 SDValue Tmp3 = DAG.getNode(ISD::AND, dl, VT, 2648 DAG.getNOT(dl, Op, VT), 2649 DAG.getNode(ISD::SUB, dl, VT, Op, 2650 DAG.getConstant(1, VT))); 2651 // If ISD::CTLZ is legal and CTPOP isn't, then do that instead. 2652 if (!TLI.isOperationLegalOrCustom(ISD::CTPOP, VT) && 2653 TLI.isOperationLegalOrCustom(ISD::CTLZ, VT)) 2654 return DAG.getNode(ISD::SUB, dl, VT, 2655 DAG.getConstant(VT.getSizeInBits(), VT), 2656 DAG.getNode(ISD::CTLZ, dl, VT, Tmp3)); 2657 return DAG.getNode(ISD::CTPOP, dl, VT, Tmp3); 2658 } 2659 } 2660} 2661 2662std::pair <SDValue, SDValue> SelectionDAGLegalize::ExpandAtomic(SDNode *Node) { 2663 unsigned Opc = Node->getOpcode(); 2664 MVT VT = cast<AtomicSDNode>(Node)->getMemoryVT().getSimpleVT(); 2665 RTLIB::Libcall LC; 2666 2667 switch (Opc) { 2668 default: 2669 llvm_unreachable("Unhandled atomic intrinsic Expand!"); 2670 break; 2671 case ISD::ATOMIC_SWAP: 2672 switch (VT.SimpleTy) { 2673 default: llvm_unreachable("Unexpected value type for atomic!"); 2674 case MVT::i8: LC = RTLIB::SYNC_LOCK_TEST_AND_SET_1; break; 2675 case MVT::i16: LC = RTLIB::SYNC_LOCK_TEST_AND_SET_2; break; 2676 case MVT::i32: LC = RTLIB::SYNC_LOCK_TEST_AND_SET_4; break; 2677 case MVT::i64: LC = RTLIB::SYNC_LOCK_TEST_AND_SET_8; break; 2678 } 2679 break; 2680 case ISD::ATOMIC_CMP_SWAP: 2681 switch (VT.SimpleTy) { 2682 default: llvm_unreachable("Unexpected value type for atomic!"); 2683 case MVT::i8: LC = RTLIB::SYNC_VAL_COMPARE_AND_SWAP_1; break; 2684 case MVT::i16: LC = RTLIB::SYNC_VAL_COMPARE_AND_SWAP_2; break; 2685 case MVT::i32: LC = RTLIB::SYNC_VAL_COMPARE_AND_SWAP_4; break; 2686 case MVT::i64: LC = RTLIB::SYNC_VAL_COMPARE_AND_SWAP_8; break; 2687 } 2688 break; 2689 case ISD::ATOMIC_LOAD_ADD: 2690 switch (VT.SimpleTy) { 2691 default: llvm_unreachable("Unexpected value type for atomic!"); 2692 case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_ADD_1; break; 2693 case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_ADD_2; break; 2694 case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_ADD_4; break; 2695 case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_ADD_8; break; 2696 } 2697 break; 2698 case ISD::ATOMIC_LOAD_SUB: 2699 switch (VT.SimpleTy) { 2700 default: llvm_unreachable("Unexpected value type for atomic!"); 2701 case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_SUB_1; break; 2702 case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_SUB_2; break; 2703 case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_SUB_4; break; 2704 case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_SUB_8; break; 2705 } 2706 break; 2707 case ISD::ATOMIC_LOAD_AND: 2708 switch (VT.SimpleTy) { 2709 default: llvm_unreachable("Unexpected value type for atomic!"); 2710 case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_AND_1; break; 2711 case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_AND_2; break; 2712 case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_AND_4; break; 2713 case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_AND_8; break; 2714 } 2715 break; 2716 case ISD::ATOMIC_LOAD_OR: 2717 switch (VT.SimpleTy) { 2718 default: llvm_unreachable("Unexpected value type for atomic!"); 2719 case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_OR_1; break; 2720 case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_OR_2; break; 2721 case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_OR_4; break; 2722 case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_OR_8; break; 2723 } 2724 break; 2725 case ISD::ATOMIC_LOAD_XOR: 2726 switch (VT.SimpleTy) { 2727 default: llvm_unreachable("Unexpected value type for atomic!"); 2728 case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_XOR_1; break; 2729 case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_XOR_2; break; 2730 case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_XOR_4; break; 2731 case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_XOR_8; break; 2732 } 2733 break; 2734 case ISD::ATOMIC_LOAD_NAND: 2735 switch (VT.SimpleTy) { 2736 default: llvm_unreachable("Unexpected value type for atomic!"); 2737 case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_NAND_1; break; 2738 case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_NAND_2; break; 2739 case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_NAND_4; break; 2740 case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_NAND_8; break; 2741 } 2742 break; 2743 } 2744 2745 return ExpandChainLibCall(LC, Node, false); 2746} 2747 2748void SelectionDAGLegalize::ExpandNode(SDNode *Node, 2749 SmallVectorImpl<SDValue> &Results) { 2750 DebugLoc dl = Node->getDebugLoc(); 2751 SDValue Tmp1, Tmp2, Tmp3, Tmp4; 2752 switch (Node->getOpcode()) { 2753 case ISD::CTPOP: 2754 case ISD::CTLZ: 2755 case ISD::CTTZ: 2756 Tmp1 = ExpandBitCount(Node->getOpcode(), Node->getOperand(0), dl); 2757 Results.push_back(Tmp1); 2758 break; 2759 case ISD::BSWAP: 2760 Results.push_back(ExpandBSWAP(Node->getOperand(0), dl)); 2761 break; 2762 case ISD::FRAMEADDR: 2763 case ISD::RETURNADDR: 2764 case ISD::FRAME_TO_ARGS_OFFSET: 2765 Results.push_back(DAG.getConstant(0, Node->getValueType(0))); 2766 break; 2767 case ISD::FLT_ROUNDS_: 2768 Results.push_back(DAG.getConstant(1, Node->getValueType(0))); 2769 break; 2770 case ISD::EH_RETURN: 2771 case ISD::EH_LABEL: 2772 case ISD::PREFETCH: 2773 case ISD::VAEND: 2774 case ISD::EH_SJLJ_LONGJMP: 2775 case ISD::EH_SJLJ_DISPATCHSETUP: 2776 // If the target didn't expand these, there's nothing to do, so just 2777 // preserve the chain and be done. 2778 Results.push_back(Node->getOperand(0)); 2779 break; 2780 case ISD::EH_SJLJ_SETJMP: 2781 // If the target didn't expand this, just return 'zero' and preserve the 2782 // chain. 2783 Results.push_back(DAG.getConstant(0, MVT::i32)); 2784 Results.push_back(Node->getOperand(0)); 2785 break; 2786 case ISD::MEMBARRIER: { 2787 // If the target didn't lower this, lower it to '__sync_synchronize()' call 2788 TargetLowering::ArgListTy Args; 2789 std::pair<SDValue, SDValue> CallResult = 2790 TLI.LowerCallTo(Node->getOperand(0), Type::getVoidTy(*DAG.getContext()), 2791 false, false, false, false, 0, CallingConv::C, 2792 /*isTailCall=*/false, 2793 /*isReturnValueUsed=*/true, 2794 DAG.getExternalSymbol("__sync_synchronize", 2795 TLI.getPointerTy()), 2796 Args, DAG, dl); 2797 Results.push_back(CallResult.second); 2798 break; 2799 } 2800 // By default, atomic intrinsics are marked Legal and lowered. Targets 2801 // which don't support them directly, however, may want libcalls, in which 2802 // case they mark them Expand, and we get here. 2803 case ISD::ATOMIC_SWAP: 2804 case ISD::ATOMIC_LOAD_ADD: 2805 case ISD::ATOMIC_LOAD_SUB: 2806 case ISD::ATOMIC_LOAD_AND: 2807 case ISD::ATOMIC_LOAD_OR: 2808 case ISD::ATOMIC_LOAD_XOR: 2809 case ISD::ATOMIC_LOAD_NAND: 2810 case ISD::ATOMIC_LOAD_MIN: 2811 case ISD::ATOMIC_LOAD_MAX: 2812 case ISD::ATOMIC_LOAD_UMIN: 2813 case ISD::ATOMIC_LOAD_UMAX: 2814 case ISD::ATOMIC_CMP_SWAP: { 2815 std::pair<SDValue, SDValue> Tmp = ExpandAtomic(Node); 2816 Results.push_back(Tmp.first); 2817 Results.push_back(Tmp.second); 2818 break; 2819 } 2820 case ISD::DYNAMIC_STACKALLOC: 2821 ExpandDYNAMIC_STACKALLOC(Node, Results); 2822 break; 2823 case ISD::MERGE_VALUES: 2824 for (unsigned i = 0; i < Node->getNumValues(); i++) 2825 Results.push_back(Node->getOperand(i)); 2826 break; 2827 case ISD::UNDEF: { 2828 EVT VT = Node->getValueType(0); 2829 if (VT.isInteger()) 2830 Results.push_back(DAG.getConstant(0, VT)); 2831 else { 2832 assert(VT.isFloatingPoint() && "Unknown value type!"); 2833 Results.push_back(DAG.getConstantFP(0, VT)); 2834 } 2835 break; 2836 } 2837 case ISD::TRAP: { 2838 // If this operation is not supported, lower it to 'abort()' call 2839 TargetLowering::ArgListTy Args; 2840 std::pair<SDValue, SDValue> CallResult = 2841 TLI.LowerCallTo(Node->getOperand(0), Type::getVoidTy(*DAG.getContext()), 2842 false, false, false, false, 0, CallingConv::C, 2843 /*isTailCall=*/false, 2844 /*isReturnValueUsed=*/true, 2845 DAG.getExternalSymbol("abort", TLI.getPointerTy()), 2846 Args, DAG, dl); 2847 Results.push_back(CallResult.second); 2848 break; 2849 } 2850 case ISD::FP_ROUND: 2851 case ISD::BITCAST: 2852 Tmp1 = EmitStackConvert(Node->getOperand(0), Node->getValueType(0), 2853 Node->getValueType(0), dl); 2854 Results.push_back(Tmp1); 2855 break; 2856 case ISD::FP_EXTEND: 2857 Tmp1 = EmitStackConvert(Node->getOperand(0), 2858 Node->getOperand(0).getValueType(), 2859 Node->getValueType(0), dl); 2860 Results.push_back(Tmp1); 2861 break; 2862 case ISD::SIGN_EXTEND_INREG: { 2863 // NOTE: we could fall back on load/store here too for targets without 2864 // SAR. However, it is doubtful that any exist. 2865 EVT ExtraVT = cast<VTSDNode>(Node->getOperand(1))->getVT(); 2866 EVT VT = Node->getValueType(0); 2867 EVT ShiftAmountTy = TLI.getShiftAmountTy(VT); 2868 if (VT.isVector()) 2869 ShiftAmountTy = VT; 2870 unsigned BitsDiff = VT.getScalarType().getSizeInBits() - 2871 ExtraVT.getScalarType().getSizeInBits(); 2872 SDValue ShiftCst = DAG.getConstant(BitsDiff, ShiftAmountTy); 2873 Tmp1 = DAG.getNode(ISD::SHL, dl, Node->getValueType(0), 2874 Node->getOperand(0), ShiftCst); 2875 Tmp1 = DAG.getNode(ISD::SRA, dl, Node->getValueType(0), Tmp1, ShiftCst); 2876 Results.push_back(Tmp1); 2877 break; 2878 } 2879 case ISD::FP_ROUND_INREG: { 2880 // The only way we can lower this is to turn it into a TRUNCSTORE, 2881 // EXTLOAD pair, targetting a temporary location (a stack slot). 2882 2883 // NOTE: there is a choice here between constantly creating new stack 2884 // slots and always reusing the same one. We currently always create 2885 // new ones, as reuse may inhibit scheduling. 2886 EVT ExtraVT = cast<VTSDNode>(Node->getOperand(1))->getVT(); 2887 Tmp1 = EmitStackConvert(Node->getOperand(0), ExtraVT, 2888 Node->getValueType(0), dl); 2889 Results.push_back(Tmp1); 2890 break; 2891 } 2892 case ISD::SINT_TO_FP: 2893 case ISD::UINT_TO_FP: 2894 Tmp1 = ExpandLegalINT_TO_FP(Node->getOpcode() == ISD::SINT_TO_FP, 2895 Node->getOperand(0), Node->getValueType(0), dl); 2896 Results.push_back(Tmp1); 2897 break; 2898 case ISD::FP_TO_UINT: { 2899 SDValue True, False; 2900 EVT VT = Node->getOperand(0).getValueType(); 2901 EVT NVT = Node->getValueType(0); 2902 APFloat apf(APInt::getNullValue(VT.getSizeInBits())); 2903 APInt x = APInt::getSignBit(NVT.getSizeInBits()); 2904 (void)apf.convertFromAPInt(x, false, APFloat::rmNearestTiesToEven); 2905 Tmp1 = DAG.getConstantFP(apf, VT); 2906 Tmp2 = DAG.getSetCC(dl, TLI.getSetCCResultType(VT), 2907 Node->getOperand(0), 2908 Tmp1, ISD::SETLT); 2909 True = DAG.getNode(ISD::FP_TO_SINT, dl, NVT, Node->getOperand(0)); 2910 False = DAG.getNode(ISD::FP_TO_SINT, dl, NVT, 2911 DAG.getNode(ISD::FSUB, dl, VT, 2912 Node->getOperand(0), Tmp1)); 2913 False = DAG.getNode(ISD::XOR, dl, NVT, False, 2914 DAG.getConstant(x, NVT)); 2915 Tmp1 = DAG.getNode(ISD::SELECT, dl, NVT, Tmp2, True, False); 2916 Results.push_back(Tmp1); 2917 break; 2918 } 2919 case ISD::VAARG: { 2920 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 2921 EVT VT = Node->getValueType(0); 2922 Tmp1 = Node->getOperand(0); 2923 Tmp2 = Node->getOperand(1); 2924 unsigned Align = Node->getConstantOperandVal(3); 2925 2926 SDValue VAListLoad = DAG.getLoad(TLI.getPointerTy(), dl, Tmp1, Tmp2, 2927 MachinePointerInfo(V), false, false, 0); 2928 SDValue VAList = VAListLoad; 2929 2930 if (Align > TLI.getMinStackArgumentAlignment()) { 2931 assert(((Align & (Align-1)) == 0) && "Expected Align to be a power of 2"); 2932 2933 VAList = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), VAList, 2934 DAG.getConstant(Align - 1, 2935 TLI.getPointerTy())); 2936 2937 VAList = DAG.getNode(ISD::AND, dl, TLI.getPointerTy(), VAList, 2938 DAG.getConstant(-(int64_t)Align, 2939 TLI.getPointerTy())); 2940 } 2941 2942 // Increment the pointer, VAList, to the next vaarg 2943 Tmp3 = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), VAList, 2944 DAG.getConstant(TLI.getTargetData()-> 2945 getTypeAllocSize(VT.getTypeForEVT(*DAG.getContext())), 2946 TLI.getPointerTy())); 2947 // Store the incremented VAList to the legalized pointer 2948 Tmp3 = DAG.getStore(VAListLoad.getValue(1), dl, Tmp3, Tmp2, 2949 MachinePointerInfo(V), false, false, 0); 2950 // Load the actual argument out of the pointer VAList 2951 Results.push_back(DAG.getLoad(VT, dl, Tmp3, VAList, MachinePointerInfo(), 2952 false, false, 0)); 2953 Results.push_back(Results[0].getValue(1)); 2954 break; 2955 } 2956 case ISD::VACOPY: { 2957 // This defaults to loading a pointer from the input and storing it to the 2958 // output, returning the chain. 2959 const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue(); 2960 const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue(); 2961 Tmp1 = DAG.getLoad(TLI.getPointerTy(), dl, Node->getOperand(0), 2962 Node->getOperand(2), MachinePointerInfo(VS), 2963 false, false, 0); 2964 Tmp1 = DAG.getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1), 2965 MachinePointerInfo(VD), false, false, 0); 2966 Results.push_back(Tmp1); 2967 break; 2968 } 2969 case ISD::EXTRACT_VECTOR_ELT: 2970 if (Node->getOperand(0).getValueType().getVectorNumElements() == 1) 2971 // This must be an access of the only element. Return it. 2972 Tmp1 = DAG.getNode(ISD::BITCAST, dl, Node->getValueType(0), 2973 Node->getOperand(0)); 2974 else 2975 Tmp1 = ExpandExtractFromVectorThroughStack(SDValue(Node, 0)); 2976 Results.push_back(Tmp1); 2977 break; 2978 case ISD::EXTRACT_SUBVECTOR: 2979 Results.push_back(ExpandExtractFromVectorThroughStack(SDValue(Node, 0))); 2980 break; 2981 case ISD::INSERT_SUBVECTOR: 2982 Results.push_back(ExpandInsertToVectorThroughStack(SDValue(Node, 0))); 2983 break; 2984 case ISD::CONCAT_VECTORS: { 2985 Results.push_back(ExpandVectorBuildThroughStack(Node)); 2986 break; 2987 } 2988 case ISD::SCALAR_TO_VECTOR: 2989 Results.push_back(ExpandSCALAR_TO_VECTOR(Node)); 2990 break; 2991 case ISD::INSERT_VECTOR_ELT: 2992 Results.push_back(ExpandINSERT_VECTOR_ELT(Node->getOperand(0), 2993 Node->getOperand(1), 2994 Node->getOperand(2), dl)); 2995 break; 2996 case ISD::VECTOR_SHUFFLE: { 2997 SmallVector<int, 8> Mask; 2998 cast<ShuffleVectorSDNode>(Node)->getMask(Mask); 2999 3000 EVT VT = Node->getValueType(0); 3001 EVT EltVT = VT.getVectorElementType(); 3002 if (getTypeAction(EltVT) == Promote) 3003 EltVT = TLI.getTypeToTransformTo(*DAG.getContext(), EltVT); 3004 unsigned NumElems = VT.getVectorNumElements(); 3005 SmallVector<SDValue, 8> Ops; 3006 for (unsigned i = 0; i != NumElems; ++i) { 3007 if (Mask[i] < 0) { 3008 Ops.push_back(DAG.getUNDEF(EltVT)); 3009 continue; 3010 } 3011 unsigned Idx = Mask[i]; 3012 if (Idx < NumElems) 3013 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, 3014 Node->getOperand(0), 3015 DAG.getIntPtrConstant(Idx))); 3016 else 3017 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, 3018 Node->getOperand(1), 3019 DAG.getIntPtrConstant(Idx - NumElems))); 3020 } 3021 Tmp1 = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &Ops[0], Ops.size()); 3022 Results.push_back(Tmp1); 3023 break; 3024 } 3025 case ISD::EXTRACT_ELEMENT: { 3026 EVT OpTy = Node->getOperand(0).getValueType(); 3027 if (cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue()) { 3028 // 1 -> Hi 3029 Tmp1 = DAG.getNode(ISD::SRL, dl, OpTy, Node->getOperand(0), 3030 DAG.getConstant(OpTy.getSizeInBits()/2, 3031 TLI.getShiftAmountTy(Node->getOperand(0).getValueType()))); 3032 Tmp1 = DAG.getNode(ISD::TRUNCATE, dl, Node->getValueType(0), Tmp1); 3033 } else { 3034 // 0 -> Lo 3035 Tmp1 = DAG.getNode(ISD::TRUNCATE, dl, Node->getValueType(0), 3036 Node->getOperand(0)); 3037 } 3038 Results.push_back(Tmp1); 3039 break; 3040 } 3041 case ISD::STACKSAVE: 3042 // Expand to CopyFromReg if the target set 3043 // StackPointerRegisterToSaveRestore. 3044 if (unsigned SP = TLI.getStackPointerRegisterToSaveRestore()) { 3045 Results.push_back(DAG.getCopyFromReg(Node->getOperand(0), dl, SP, 3046 Node->getValueType(0))); 3047 Results.push_back(Results[0].getValue(1)); 3048 } else { 3049 Results.push_back(DAG.getUNDEF(Node->getValueType(0))); 3050 Results.push_back(Node->getOperand(0)); 3051 } 3052 break; 3053 case ISD::STACKRESTORE: 3054 // Expand to CopyToReg if the target set 3055 // StackPointerRegisterToSaveRestore. 3056 if (unsigned SP = TLI.getStackPointerRegisterToSaveRestore()) { 3057 Results.push_back(DAG.getCopyToReg(Node->getOperand(0), dl, SP, 3058 Node->getOperand(1))); 3059 } else { 3060 Results.push_back(Node->getOperand(0)); 3061 } 3062 break; 3063 case ISD::FCOPYSIGN: 3064 Results.push_back(ExpandFCOPYSIGN(Node)); 3065 break; 3066 case ISD::FNEG: 3067 // Expand Y = FNEG(X) -> Y = SUB -0.0, X 3068 Tmp1 = DAG.getConstantFP(-0.0, Node->getValueType(0)); 3069 Tmp1 = DAG.getNode(ISD::FSUB, dl, Node->getValueType(0), Tmp1, 3070 Node->getOperand(0)); 3071 Results.push_back(Tmp1); 3072 break; 3073 case ISD::FABS: { 3074 // Expand Y = FABS(X) -> Y = (X >u 0.0) ? X : fneg(X). 3075 EVT VT = Node->getValueType(0); 3076 Tmp1 = Node->getOperand(0); 3077 Tmp2 = DAG.getConstantFP(0.0, VT); 3078 Tmp2 = DAG.getSetCC(dl, TLI.getSetCCResultType(Tmp1.getValueType()), 3079 Tmp1, Tmp2, ISD::SETUGT); 3080 Tmp3 = DAG.getNode(ISD::FNEG, dl, VT, Tmp1); 3081 Tmp1 = DAG.getNode(ISD::SELECT, dl, VT, Tmp2, Tmp1, Tmp3); 3082 Results.push_back(Tmp1); 3083 break; 3084 } 3085 case ISD::FSQRT: 3086 Results.push_back(ExpandFPLibCall(Node, RTLIB::SQRT_F32, RTLIB::SQRT_F64, 3087 RTLIB::SQRT_F80, RTLIB::SQRT_PPCF128)); 3088 break; 3089 case ISD::FSIN: 3090 Results.push_back(ExpandFPLibCall(Node, RTLIB::SIN_F32, RTLIB::SIN_F64, 3091 RTLIB::SIN_F80, RTLIB::SIN_PPCF128)); 3092 break; 3093 case ISD::FCOS: 3094 Results.push_back(ExpandFPLibCall(Node, RTLIB::COS_F32, RTLIB::COS_F64, 3095 RTLIB::COS_F80, RTLIB::COS_PPCF128)); 3096 break; 3097 case ISD::FLOG: 3098 Results.push_back(ExpandFPLibCall(Node, RTLIB::LOG_F32, RTLIB::LOG_F64, 3099 RTLIB::LOG_F80, RTLIB::LOG_PPCF128)); 3100 break; 3101 case ISD::FLOG2: 3102 Results.push_back(ExpandFPLibCall(Node, RTLIB::LOG2_F32, RTLIB::LOG2_F64, 3103 RTLIB::LOG2_F80, RTLIB::LOG2_PPCF128)); 3104 break; 3105 case ISD::FLOG10: 3106 Results.push_back(ExpandFPLibCall(Node, RTLIB::LOG10_F32, RTLIB::LOG10_F64, 3107 RTLIB::LOG10_F80, RTLIB::LOG10_PPCF128)); 3108 break; 3109 case ISD::FEXP: 3110 Results.push_back(ExpandFPLibCall(Node, RTLIB::EXP_F32, RTLIB::EXP_F64, 3111 RTLIB::EXP_F80, RTLIB::EXP_PPCF128)); 3112 break; 3113 case ISD::FEXP2: 3114 Results.push_back(ExpandFPLibCall(Node, RTLIB::EXP2_F32, RTLIB::EXP2_F64, 3115 RTLIB::EXP2_F80, RTLIB::EXP2_PPCF128)); 3116 break; 3117 case ISD::FTRUNC: 3118 Results.push_back(ExpandFPLibCall(Node, RTLIB::TRUNC_F32, RTLIB::TRUNC_F64, 3119 RTLIB::TRUNC_F80, RTLIB::TRUNC_PPCF128)); 3120 break; 3121 case ISD::FFLOOR: 3122 Results.push_back(ExpandFPLibCall(Node, RTLIB::FLOOR_F32, RTLIB::FLOOR_F64, 3123 RTLIB::FLOOR_F80, RTLIB::FLOOR_PPCF128)); 3124 break; 3125 case ISD::FCEIL: 3126 Results.push_back(ExpandFPLibCall(Node, RTLIB::CEIL_F32, RTLIB::CEIL_F64, 3127 RTLIB::CEIL_F80, RTLIB::CEIL_PPCF128)); 3128 break; 3129 case ISD::FRINT: 3130 Results.push_back(ExpandFPLibCall(Node, RTLIB::RINT_F32, RTLIB::RINT_F64, 3131 RTLIB::RINT_F80, RTLIB::RINT_PPCF128)); 3132 break; 3133 case ISD::FNEARBYINT: 3134 Results.push_back(ExpandFPLibCall(Node, RTLIB::NEARBYINT_F32, 3135 RTLIB::NEARBYINT_F64, 3136 RTLIB::NEARBYINT_F80, 3137 RTLIB::NEARBYINT_PPCF128)); 3138 break; 3139 case ISD::FPOWI: 3140 Results.push_back(ExpandFPLibCall(Node, RTLIB::POWI_F32, RTLIB::POWI_F64, 3141 RTLIB::POWI_F80, RTLIB::POWI_PPCF128)); 3142 break; 3143 case ISD::FPOW: 3144 Results.push_back(ExpandFPLibCall(Node, RTLIB::POW_F32, RTLIB::POW_F64, 3145 RTLIB::POW_F80, RTLIB::POW_PPCF128)); 3146 break; 3147 case ISD::FDIV: 3148 Results.push_back(ExpandFPLibCall(Node, RTLIB::DIV_F32, RTLIB::DIV_F64, 3149 RTLIB::DIV_F80, RTLIB::DIV_PPCF128)); 3150 break; 3151 case ISD::FREM: 3152 Results.push_back(ExpandFPLibCall(Node, RTLIB::REM_F32, RTLIB::REM_F64, 3153 RTLIB::REM_F80, RTLIB::REM_PPCF128)); 3154 break; 3155 case ISD::FP16_TO_FP32: 3156 Results.push_back(ExpandLibCall(RTLIB::FPEXT_F16_F32, Node, false)); 3157 break; 3158 case ISD::FP32_TO_FP16: 3159 Results.push_back(ExpandLibCall(RTLIB::FPROUND_F32_F16, Node, false)); 3160 break; 3161 case ISD::ConstantFP: { 3162 ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Node); 3163 // Check to see if this FP immediate is already legal. 3164 // If this is a legal constant, turn it into a TargetConstantFP node. 3165 if (TLI.isFPImmLegal(CFP->getValueAPF(), Node->getValueType(0))) 3166 Results.push_back(SDValue(Node, 0)); 3167 else 3168 Results.push_back(ExpandConstantFP(CFP, true, DAG, TLI)); 3169 break; 3170 } 3171 case ISD::EHSELECTION: { 3172 unsigned Reg = TLI.getExceptionSelectorRegister(); 3173 assert(Reg && "Can't expand to unknown register!"); 3174 Results.push_back(DAG.getCopyFromReg(Node->getOperand(1), dl, Reg, 3175 Node->getValueType(0))); 3176 Results.push_back(Results[0].getValue(1)); 3177 break; 3178 } 3179 case ISD::EXCEPTIONADDR: { 3180 unsigned Reg = TLI.getExceptionAddressRegister(); 3181 assert(Reg && "Can't expand to unknown register!"); 3182 Results.push_back(DAG.getCopyFromReg(Node->getOperand(0), dl, Reg, 3183 Node->getValueType(0))); 3184 Results.push_back(Results[0].getValue(1)); 3185 break; 3186 } 3187 case ISD::SUB: { 3188 EVT VT = Node->getValueType(0); 3189 assert(TLI.isOperationLegalOrCustom(ISD::ADD, VT) && 3190 TLI.isOperationLegalOrCustom(ISD::XOR, VT) && 3191 "Don't know how to expand this subtraction!"); 3192 Tmp1 = DAG.getNode(ISD::XOR, dl, VT, Node->getOperand(1), 3193 DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), VT)); 3194 Tmp1 = DAG.getNode(ISD::ADD, dl, VT, Tmp2, DAG.getConstant(1, VT)); 3195 Results.push_back(DAG.getNode(ISD::ADD, dl, VT, Node->getOperand(0), Tmp1)); 3196 break; 3197 } 3198 case ISD::UREM: 3199 case ISD::SREM: { 3200 EVT VT = Node->getValueType(0); 3201 SDVTList VTs = DAG.getVTList(VT, VT); 3202 bool isSigned = Node->getOpcode() == ISD::SREM; 3203 unsigned DivOpc = isSigned ? ISD::SDIV : ISD::UDIV; 3204 unsigned DivRemOpc = isSigned ? ISD::SDIVREM : ISD::UDIVREM; 3205 Tmp2 = Node->getOperand(0); 3206 Tmp3 = Node->getOperand(1); 3207 if (TLI.isOperationLegalOrCustom(DivRemOpc, VT)) { 3208 Tmp1 = DAG.getNode(DivRemOpc, dl, VTs, Tmp2, Tmp3).getValue(1); 3209 } else if (TLI.isOperationLegalOrCustom(DivOpc, VT)) { 3210 // X % Y -> X-X/Y*Y 3211 Tmp1 = DAG.getNode(DivOpc, dl, VT, Tmp2, Tmp3); 3212 Tmp1 = DAG.getNode(ISD::MUL, dl, VT, Tmp1, Tmp3); 3213 Tmp1 = DAG.getNode(ISD::SUB, dl, VT, Tmp2, Tmp1); 3214 } else if (isSigned) { 3215 Tmp1 = ExpandDivRemLibCall(Node, true, false); 3216 if (!Tmp1.getNode()) 3217 Tmp1 = ExpandIntLibCall(Node, true, 3218 RTLIB::SREM_I8, 3219 RTLIB::SREM_I16, RTLIB::SREM_I32, 3220 RTLIB::SREM_I64, RTLIB::SREM_I128); 3221 } else { 3222 Tmp1 = ExpandDivRemLibCall(Node, false, false); 3223 if (!Tmp1.getNode()) 3224 Tmp1 = ExpandIntLibCall(Node, false, 3225 RTLIB::UREM_I8, 3226 RTLIB::UREM_I16, RTLIB::UREM_I32, 3227 RTLIB::UREM_I64, RTLIB::UREM_I128); 3228 } 3229 Results.push_back(Tmp1); 3230 break; 3231 } 3232 case ISD::UDIV: 3233 case ISD::SDIV: { 3234 bool isSigned = Node->getOpcode() == ISD::SDIV; 3235 unsigned DivRemOpc = isSigned ? ISD::SDIVREM : ISD::UDIVREM; 3236 EVT VT = Node->getValueType(0); 3237 SDVTList VTs = DAG.getVTList(VT, VT); 3238 if (TLI.isOperationLegalOrCustom(DivRemOpc, VT)) 3239 Tmp1 = DAG.getNode(DivRemOpc, dl, VTs, Node->getOperand(0), 3240 Node->getOperand(1)); 3241 else if (isSigned) { 3242 Tmp1 = ExpandDivRemLibCall(Node, true, true); 3243 if (!Tmp1.getNode()) { 3244 Tmp1 = ExpandIntLibCall(Node, true, 3245 RTLIB::SDIV_I8, 3246 RTLIB::SDIV_I16, RTLIB::SDIV_I32, 3247 RTLIB::SDIV_I64, RTLIB::SDIV_I128); 3248 } 3249 } else { 3250 Tmp1 = ExpandDivRemLibCall(Node, false, true); 3251 if (!Tmp1.getNode()) { 3252 Tmp1 = ExpandIntLibCall(Node, false, 3253 RTLIB::UDIV_I8, 3254 RTLIB::UDIV_I16, RTLIB::UDIV_I32, 3255 RTLIB::UDIV_I64, RTLIB::UDIV_I128); 3256 } 3257 } 3258 Results.push_back(Tmp1); 3259 break; 3260 } 3261 case ISD::MULHU: 3262 case ISD::MULHS: { 3263 unsigned ExpandOpcode = Node->getOpcode() == ISD::MULHU ? ISD::UMUL_LOHI : 3264 ISD::SMUL_LOHI; 3265 EVT VT = Node->getValueType(0); 3266 SDVTList VTs = DAG.getVTList(VT, VT); 3267 assert(TLI.isOperationLegalOrCustom(ExpandOpcode, VT) && 3268 "If this wasn't legal, it shouldn't have been created!"); 3269 Tmp1 = DAG.getNode(ExpandOpcode, dl, VTs, Node->getOperand(0), 3270 Node->getOperand(1)); 3271 Results.push_back(Tmp1.getValue(1)); 3272 break; 3273 } 3274 case ISD::MUL: { 3275 EVT VT = Node->getValueType(0); 3276 SDVTList VTs = DAG.getVTList(VT, VT); 3277 // See if multiply or divide can be lowered using two-result operations. 3278 // We just need the low half of the multiply; try both the signed 3279 // and unsigned forms. If the target supports both SMUL_LOHI and 3280 // UMUL_LOHI, form a preference by checking which forms of plain 3281 // MULH it supports. 3282 bool HasSMUL_LOHI = TLI.isOperationLegalOrCustom(ISD::SMUL_LOHI, VT); 3283 bool HasUMUL_LOHI = TLI.isOperationLegalOrCustom(ISD::UMUL_LOHI, VT); 3284 bool HasMULHS = TLI.isOperationLegalOrCustom(ISD::MULHS, VT); 3285 bool HasMULHU = TLI.isOperationLegalOrCustom(ISD::MULHU, VT); 3286 unsigned OpToUse = 0; 3287 if (HasSMUL_LOHI && !HasMULHS) { 3288 OpToUse = ISD::SMUL_LOHI; 3289 } else if (HasUMUL_LOHI && !HasMULHU) { 3290 OpToUse = ISD::UMUL_LOHI; 3291 } else if (HasSMUL_LOHI) { 3292 OpToUse = ISD::SMUL_LOHI; 3293 } else if (HasUMUL_LOHI) { 3294 OpToUse = ISD::UMUL_LOHI; 3295 } 3296 if (OpToUse) { 3297 Results.push_back(DAG.getNode(OpToUse, dl, VTs, Node->getOperand(0), 3298 Node->getOperand(1))); 3299 break; 3300 } 3301 Tmp1 = ExpandIntLibCall(Node, false, 3302 RTLIB::MUL_I8, 3303 RTLIB::MUL_I16, RTLIB::MUL_I32, 3304 RTLIB::MUL_I64, RTLIB::MUL_I128); 3305 Results.push_back(Tmp1); 3306 break; 3307 } 3308 case ISD::SADDO: 3309 case ISD::SSUBO: { 3310 SDValue LHS = Node->getOperand(0); 3311 SDValue RHS = Node->getOperand(1); 3312 SDValue Sum = DAG.getNode(Node->getOpcode() == ISD::SADDO ? 3313 ISD::ADD : ISD::SUB, dl, LHS.getValueType(), 3314 LHS, RHS); 3315 Results.push_back(Sum); 3316 EVT OType = Node->getValueType(1); 3317 3318 SDValue Zero = DAG.getConstant(0, LHS.getValueType()); 3319 3320 // LHSSign -> LHS >= 0 3321 // RHSSign -> RHS >= 0 3322 // SumSign -> Sum >= 0 3323 // 3324 // Add: 3325 // Overflow -> (LHSSign == RHSSign) && (LHSSign != SumSign) 3326 // Sub: 3327 // Overflow -> (LHSSign != RHSSign) && (LHSSign != SumSign) 3328 // 3329 SDValue LHSSign = DAG.getSetCC(dl, OType, LHS, Zero, ISD::SETGE); 3330 SDValue RHSSign = DAG.getSetCC(dl, OType, RHS, Zero, ISD::SETGE); 3331 SDValue SignsMatch = DAG.getSetCC(dl, OType, LHSSign, RHSSign, 3332 Node->getOpcode() == ISD::SADDO ? 3333 ISD::SETEQ : ISD::SETNE); 3334 3335 SDValue SumSign = DAG.getSetCC(dl, OType, Sum, Zero, ISD::SETGE); 3336 SDValue SumSignNE = DAG.getSetCC(dl, OType, LHSSign, SumSign, ISD::SETNE); 3337 3338 SDValue Cmp = DAG.getNode(ISD::AND, dl, OType, SignsMatch, SumSignNE); 3339 Results.push_back(Cmp); 3340 break; 3341 } 3342 case ISD::UADDO: 3343 case ISD::USUBO: { 3344 SDValue LHS = Node->getOperand(0); 3345 SDValue RHS = Node->getOperand(1); 3346 SDValue Sum = DAG.getNode(Node->getOpcode() == ISD::UADDO ? 3347 ISD::ADD : ISD::SUB, dl, LHS.getValueType(), 3348 LHS, RHS); 3349 Results.push_back(Sum); 3350 Results.push_back(DAG.getSetCC(dl, Node->getValueType(1), Sum, LHS, 3351 Node->getOpcode () == ISD::UADDO ? 3352 ISD::SETULT : ISD::SETUGT)); 3353 break; 3354 } 3355 case ISD::UMULO: 3356 case ISD::SMULO: { 3357 EVT VT = Node->getValueType(0); 3358 SDValue LHS = Node->getOperand(0); 3359 SDValue RHS = Node->getOperand(1); 3360 SDValue BottomHalf; 3361 SDValue TopHalf; 3362 static const unsigned Ops[2][3] = 3363 { { ISD::MULHU, ISD::UMUL_LOHI, ISD::ZERO_EXTEND }, 3364 { ISD::MULHS, ISD::SMUL_LOHI, ISD::SIGN_EXTEND }}; 3365 bool isSigned = Node->getOpcode() == ISD::SMULO; 3366 if (TLI.isOperationLegalOrCustom(Ops[isSigned][0], VT)) { 3367 BottomHalf = DAG.getNode(ISD::MUL, dl, VT, LHS, RHS); 3368 TopHalf = DAG.getNode(Ops[isSigned][0], dl, VT, LHS, RHS); 3369 } else if (TLI.isOperationLegalOrCustom(Ops[isSigned][1], VT)) { 3370 BottomHalf = DAG.getNode(Ops[isSigned][1], dl, DAG.getVTList(VT, VT), LHS, 3371 RHS); 3372 TopHalf = BottomHalf.getValue(1); 3373 } else if (TLI.isTypeLegal(EVT::getIntegerVT(*DAG.getContext(), 3374 VT.getSizeInBits() * 2))) { 3375 EVT WideVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits() * 2); 3376 LHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, LHS); 3377 RHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, RHS); 3378 Tmp1 = DAG.getNode(ISD::MUL, dl, WideVT, LHS, RHS); 3379 BottomHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, Tmp1, 3380 DAG.getIntPtrConstant(0)); 3381 TopHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, Tmp1, 3382 DAG.getIntPtrConstant(1)); 3383 } else { 3384 // We can fall back to a libcall with an illegal type for the MUL if we 3385 // have a libcall big enough. 3386 // Also, we can fall back to a division in some cases, but that's a big 3387 // performance hit in the general case. 3388 EVT WideVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits() * 2); 3389 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 3390 if (WideVT == MVT::i16) 3391 LC = RTLIB::MUL_I16; 3392 else if (WideVT == MVT::i32) 3393 LC = RTLIB::MUL_I32; 3394 else if (WideVT == MVT::i64) 3395 LC = RTLIB::MUL_I64; 3396 else if (WideVT == MVT::i128) 3397 LC = RTLIB::MUL_I128; 3398 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Cannot expand this operation!"); 3399 LHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, LHS); 3400 RHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, RHS); 3401 3402 SDValue Ret = ExpandLibCall(LC, Node, isSigned); 3403 BottomHalf = DAG.getNode(ISD::TRUNCATE, dl, VT, Ret); 3404 TopHalf = DAG.getNode(ISD::SRL, dl, Ret.getValueType(), Ret, 3405 DAG.getConstant(VT.getSizeInBits(), TLI.getPointerTy())); 3406 TopHalf = DAG.getNode(ISD::TRUNCATE, dl, VT, TopHalf); 3407 } 3408 if (isSigned) { 3409 Tmp1 = DAG.getConstant(VT.getSizeInBits() - 1, 3410 TLI.getShiftAmountTy(BottomHalf.getValueType())); 3411 Tmp1 = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, Tmp1); 3412 TopHalf = DAG.getSetCC(dl, TLI.getSetCCResultType(VT), TopHalf, Tmp1, 3413 ISD::SETNE); 3414 } else { 3415 TopHalf = DAG.getSetCC(dl, TLI.getSetCCResultType(VT), TopHalf, 3416 DAG.getConstant(0, VT), ISD::SETNE); 3417 } 3418 Results.push_back(BottomHalf); 3419 Results.push_back(TopHalf); 3420 break; 3421 } 3422 case ISD::BUILD_PAIR: { 3423 EVT PairTy = Node->getValueType(0); 3424 Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, dl, PairTy, Node->getOperand(0)); 3425 Tmp2 = DAG.getNode(ISD::ANY_EXTEND, dl, PairTy, Node->getOperand(1)); 3426 Tmp2 = DAG.getNode(ISD::SHL, dl, PairTy, Tmp2, 3427 DAG.getConstant(PairTy.getSizeInBits()/2, 3428 TLI.getShiftAmountTy(PairTy))); 3429 Results.push_back(DAG.getNode(ISD::OR, dl, PairTy, Tmp1, Tmp2)); 3430 break; 3431 } 3432 case ISD::SELECT: 3433 Tmp1 = Node->getOperand(0); 3434 Tmp2 = Node->getOperand(1); 3435 Tmp3 = Node->getOperand(2); 3436 if (Tmp1.getOpcode() == ISD::SETCC) { 3437 Tmp1 = DAG.getSelectCC(dl, Tmp1.getOperand(0), Tmp1.getOperand(1), 3438 Tmp2, Tmp3, 3439 cast<CondCodeSDNode>(Tmp1.getOperand(2))->get()); 3440 } else { 3441 Tmp1 = DAG.getSelectCC(dl, Tmp1, 3442 DAG.getConstant(0, Tmp1.getValueType()), 3443 Tmp2, Tmp3, ISD::SETNE); 3444 } 3445 Results.push_back(Tmp1); 3446 break; 3447 case ISD::BR_JT: { 3448 SDValue Chain = Node->getOperand(0); 3449 SDValue Table = Node->getOperand(1); 3450 SDValue Index = Node->getOperand(2); 3451 3452 EVT PTy = TLI.getPointerTy(); 3453 3454 const TargetData &TD = *TLI.getTargetData(); 3455 unsigned EntrySize = 3456 DAG.getMachineFunction().getJumpTableInfo()->getEntrySize(TD); 3457 3458 Index = DAG.getNode(ISD::MUL, dl, PTy, 3459 Index, DAG.getConstant(EntrySize, PTy)); 3460 SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Index, Table); 3461 3462 EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), EntrySize * 8); 3463 SDValue LD = DAG.getExtLoad(ISD::SEXTLOAD, dl, PTy, Chain, Addr, 3464 MachinePointerInfo::getJumpTable(), MemVT, 3465 false, false, 0); 3466 Addr = LD; 3467 if (TM.getRelocationModel() == Reloc::PIC_) { 3468 // For PIC, the sequence is: 3469 // BRIND(load(Jumptable + index) + RelocBase) 3470 // RelocBase can be JumpTable, GOT or some sort of global base. 3471 Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr, 3472 TLI.getPICJumpTableRelocBase(Table, DAG)); 3473 } 3474 Tmp1 = DAG.getNode(ISD::BRIND, dl, MVT::Other, LD.getValue(1), Addr); 3475 Results.push_back(Tmp1); 3476 break; 3477 } 3478 case ISD::BRCOND: 3479 // Expand brcond's setcc into its constituent parts and create a BR_CC 3480 // Node. 3481 Tmp1 = Node->getOperand(0); 3482 Tmp2 = Node->getOperand(1); 3483 if (Tmp2.getOpcode() == ISD::SETCC) { 3484 Tmp1 = DAG.getNode(ISD::BR_CC, dl, MVT::Other, 3485 Tmp1, Tmp2.getOperand(2), 3486 Tmp2.getOperand(0), Tmp2.getOperand(1), 3487 Node->getOperand(2)); 3488 } else { 3489 Tmp1 = DAG.getNode(ISD::BR_CC, dl, MVT::Other, Tmp1, 3490 DAG.getCondCode(ISD::SETNE), Tmp2, 3491 DAG.getConstant(0, Tmp2.getValueType()), 3492 Node->getOperand(2)); 3493 } 3494 Results.push_back(Tmp1); 3495 break; 3496 case ISD::SETCC: { 3497 Tmp1 = Node->getOperand(0); 3498 Tmp2 = Node->getOperand(1); 3499 Tmp3 = Node->getOperand(2); 3500 LegalizeSetCCCondCode(Node->getValueType(0), Tmp1, Tmp2, Tmp3, dl); 3501 3502 // If we expanded the SETCC into an AND/OR, return the new node 3503 if (Tmp2.getNode() == 0) { 3504 Results.push_back(Tmp1); 3505 break; 3506 } 3507 3508 // Otherwise, SETCC for the given comparison type must be completely 3509 // illegal; expand it into a SELECT_CC. 3510 EVT VT = Node->getValueType(0); 3511 Tmp1 = DAG.getNode(ISD::SELECT_CC, dl, VT, Tmp1, Tmp2, 3512 DAG.getConstant(1, VT), DAG.getConstant(0, VT), Tmp3); 3513 Results.push_back(Tmp1); 3514 break; 3515 } 3516 case ISD::SELECT_CC: { 3517 Tmp1 = Node->getOperand(0); // LHS 3518 Tmp2 = Node->getOperand(1); // RHS 3519 Tmp3 = Node->getOperand(2); // True 3520 Tmp4 = Node->getOperand(3); // False 3521 SDValue CC = Node->getOperand(4); 3522 3523 LegalizeSetCCCondCode(TLI.getSetCCResultType(Tmp1.getValueType()), 3524 Tmp1, Tmp2, CC, dl); 3525 3526 assert(!Tmp2.getNode() && "Can't legalize SELECT_CC with legal condition!"); 3527 Tmp2 = DAG.getConstant(0, Tmp1.getValueType()); 3528 CC = DAG.getCondCode(ISD::SETNE); 3529 Tmp1 = DAG.getNode(ISD::SELECT_CC, dl, Node->getValueType(0), Tmp1, Tmp2, 3530 Tmp3, Tmp4, CC); 3531 Results.push_back(Tmp1); 3532 break; 3533 } 3534 case ISD::BR_CC: { 3535 Tmp1 = Node->getOperand(0); // Chain 3536 Tmp2 = Node->getOperand(2); // LHS 3537 Tmp3 = Node->getOperand(3); // RHS 3538 Tmp4 = Node->getOperand(1); // CC 3539 3540 LegalizeSetCCCondCode(TLI.getSetCCResultType(Tmp2.getValueType()), 3541 Tmp2, Tmp3, Tmp4, dl); 3542 LastCALLSEQ_END = DAG.getEntryNode(); 3543 3544 assert(!Tmp3.getNode() && "Can't legalize BR_CC with legal condition!"); 3545 Tmp3 = DAG.getConstant(0, Tmp2.getValueType()); 3546 Tmp4 = DAG.getCondCode(ISD::SETNE); 3547 Tmp1 = DAG.getNode(ISD::BR_CC, dl, Node->getValueType(0), Tmp1, Tmp4, Tmp2, 3548 Tmp3, Node->getOperand(4)); 3549 Results.push_back(Tmp1); 3550 break; 3551 } 3552 case ISD::GLOBAL_OFFSET_TABLE: 3553 case ISD::GlobalAddress: 3554 case ISD::GlobalTLSAddress: 3555 case ISD::ExternalSymbol: 3556 case ISD::ConstantPool: 3557 case ISD::JumpTable: 3558 case ISD::INTRINSIC_W_CHAIN: 3559 case ISD::INTRINSIC_WO_CHAIN: 3560 case ISD::INTRINSIC_VOID: 3561 // FIXME: Custom lowering for these operations shouldn't return null! 3562 for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i) 3563 Results.push_back(SDValue(Node, i)); 3564 break; 3565 } 3566} 3567void SelectionDAGLegalize::PromoteNode(SDNode *Node, 3568 SmallVectorImpl<SDValue> &Results) { 3569 EVT OVT = Node->getValueType(0); 3570 if (Node->getOpcode() == ISD::UINT_TO_FP || 3571 Node->getOpcode() == ISD::SINT_TO_FP || 3572 Node->getOpcode() == ISD::SETCC) { 3573 OVT = Node->getOperand(0).getValueType(); 3574 } 3575 EVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), OVT); 3576 DebugLoc dl = Node->getDebugLoc(); 3577 SDValue Tmp1, Tmp2, Tmp3; 3578 switch (Node->getOpcode()) { 3579 case ISD::CTTZ: 3580 case ISD::CTLZ: 3581 case ISD::CTPOP: 3582 // Zero extend the argument. 3583 Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, Node->getOperand(0)); 3584 // Perform the larger operation. 3585 Tmp1 = DAG.getNode(Node->getOpcode(), dl, NVT, Tmp1); 3586 if (Node->getOpcode() == ISD::CTTZ) { 3587 //if Tmp1 == sizeinbits(NVT) then Tmp1 = sizeinbits(Old VT) 3588 Tmp2 = DAG.getSetCC(dl, TLI.getSetCCResultType(NVT), 3589 Tmp1, DAG.getConstant(NVT.getSizeInBits(), NVT), 3590 ISD::SETEQ); 3591 Tmp1 = DAG.getNode(ISD::SELECT, dl, NVT, Tmp2, 3592 DAG.getConstant(OVT.getSizeInBits(), NVT), Tmp1); 3593 } else if (Node->getOpcode() == ISD::CTLZ) { 3594 // Tmp1 = Tmp1 - (sizeinbits(NVT) - sizeinbits(Old VT)) 3595 Tmp1 = DAG.getNode(ISD::SUB, dl, NVT, Tmp1, 3596 DAG.getConstant(NVT.getSizeInBits() - 3597 OVT.getSizeInBits(), NVT)); 3598 } 3599 Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, OVT, Tmp1)); 3600 break; 3601 case ISD::BSWAP: { 3602 unsigned DiffBits = NVT.getSizeInBits() - OVT.getSizeInBits(); 3603 Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, Node->getOperand(0)); 3604 Tmp1 = DAG.getNode(ISD::BSWAP, dl, NVT, Tmp1); 3605 Tmp1 = DAG.getNode(ISD::SRL, dl, NVT, Tmp1, 3606 DAG.getConstant(DiffBits, TLI.getShiftAmountTy(NVT))); 3607 Results.push_back(Tmp1); 3608 break; 3609 } 3610 case ISD::FP_TO_UINT: 3611 case ISD::FP_TO_SINT: 3612 Tmp1 = PromoteLegalFP_TO_INT(Node->getOperand(0), Node->getValueType(0), 3613 Node->getOpcode() == ISD::FP_TO_SINT, dl); 3614 Results.push_back(Tmp1); 3615 break; 3616 case ISD::UINT_TO_FP: 3617 case ISD::SINT_TO_FP: 3618 Tmp1 = PromoteLegalINT_TO_FP(Node->getOperand(0), Node->getValueType(0), 3619 Node->getOpcode() == ISD::SINT_TO_FP, dl); 3620 Results.push_back(Tmp1); 3621 break; 3622 case ISD::AND: 3623 case ISD::OR: 3624 case ISD::XOR: { 3625 unsigned ExtOp, TruncOp; 3626 if (OVT.isVector()) { 3627 ExtOp = ISD::BITCAST; 3628 TruncOp = ISD::BITCAST; 3629 } else { 3630 assert(OVT.isInteger() && "Cannot promote logic operation"); 3631 ExtOp = ISD::ANY_EXTEND; 3632 TruncOp = ISD::TRUNCATE; 3633 } 3634 // Promote each of the values to the new type. 3635 Tmp1 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(0)); 3636 Tmp2 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(1)); 3637 // Perform the larger operation, then convert back 3638 Tmp1 = DAG.getNode(Node->getOpcode(), dl, NVT, Tmp1, Tmp2); 3639 Results.push_back(DAG.getNode(TruncOp, dl, OVT, Tmp1)); 3640 break; 3641 } 3642 case ISD::SELECT: { 3643 unsigned ExtOp, TruncOp; 3644 if (Node->getValueType(0).isVector()) { 3645 ExtOp = ISD::BITCAST; 3646 TruncOp = ISD::BITCAST; 3647 } else if (Node->getValueType(0).isInteger()) { 3648 ExtOp = ISD::ANY_EXTEND; 3649 TruncOp = ISD::TRUNCATE; 3650 } else { 3651 ExtOp = ISD::FP_EXTEND; 3652 TruncOp = ISD::FP_ROUND; 3653 } 3654 Tmp1 = Node->getOperand(0); 3655 // Promote each of the values to the new type. 3656 Tmp2 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(1)); 3657 Tmp3 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(2)); 3658 // Perform the larger operation, then round down. 3659 Tmp1 = DAG.getNode(ISD::SELECT, dl, NVT, Tmp1, Tmp2, Tmp3); 3660 if (TruncOp != ISD::FP_ROUND) 3661 Tmp1 = DAG.getNode(TruncOp, dl, Node->getValueType(0), Tmp1); 3662 else 3663 Tmp1 = DAG.getNode(TruncOp, dl, Node->getValueType(0), Tmp1, 3664 DAG.getIntPtrConstant(0)); 3665 Results.push_back(Tmp1); 3666 break; 3667 } 3668 case ISD::VECTOR_SHUFFLE: { 3669 SmallVector<int, 8> Mask; 3670 cast<ShuffleVectorSDNode>(Node)->getMask(Mask); 3671 3672 // Cast the two input vectors. 3673 Tmp1 = DAG.getNode(ISD::BITCAST, dl, NVT, Node->getOperand(0)); 3674 Tmp2 = DAG.getNode(ISD::BITCAST, dl, NVT, Node->getOperand(1)); 3675 3676 // Convert the shuffle mask to the right # elements. 3677 Tmp1 = ShuffleWithNarrowerEltType(NVT, OVT, dl, Tmp1, Tmp2, Mask); 3678 Tmp1 = DAG.getNode(ISD::BITCAST, dl, OVT, Tmp1); 3679 Results.push_back(Tmp1); 3680 break; 3681 } 3682 case ISD::SETCC: { 3683 unsigned ExtOp = ISD::FP_EXTEND; 3684 if (NVT.isInteger()) { 3685 ISD::CondCode CCCode = 3686 cast<CondCodeSDNode>(Node->getOperand(2))->get(); 3687 ExtOp = isSignedIntSetCC(CCCode) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 3688 } 3689 Tmp1 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(0)); 3690 Tmp2 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(1)); 3691 Results.push_back(DAG.getNode(ISD::SETCC, dl, Node->getValueType(0), 3692 Tmp1, Tmp2, Node->getOperand(2))); 3693 break; 3694 } 3695 } 3696} 3697 3698// SelectionDAG::Legalize - This is the entry point for the file. 3699// 3700void SelectionDAG::Legalize(CodeGenOpt::Level OptLevel) { 3701 /// run - This is the main entry point to this class. 3702 /// 3703 SelectionDAGLegalize(*this, OptLevel).LegalizeDAG(); 3704} 3705 3706