LegalizeDAG.cpp revision 2d6dcb34b7f39682f3eed08180631189fb4b6636
1//===-- LegalizeDAG.cpp - Implement SelectionDAG::Legalize ----------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the SelectionDAG::Legalize method. 11// 12//===----------------------------------------------------------------------===// 13 14#include "llvm/Analysis/DebugInfo.h" 15#include "llvm/CodeGen/Analysis.h" 16#include "llvm/CodeGen/MachineFunction.h" 17#include "llvm/CodeGen/MachineJumpTableInfo.h" 18#include "llvm/CodeGen/SelectionDAG.h" 19#include "llvm/Target/TargetFrameLowering.h" 20#include "llvm/Target/TargetLowering.h" 21#include "llvm/Target/TargetData.h" 22#include "llvm/Target/TargetMachine.h" 23#include "llvm/CallingConv.h" 24#include "llvm/Constants.h" 25#include "llvm/DerivedTypes.h" 26#include "llvm/LLVMContext.h" 27#include "llvm/Support/Debug.h" 28#include "llvm/Support/ErrorHandling.h" 29#include "llvm/Support/MathExtras.h" 30#include "llvm/Support/raw_ostream.h" 31#include "llvm/ADT/DenseMap.h" 32#include "llvm/ADT/SmallVector.h" 33#include "llvm/ADT/SmallPtrSet.h" 34using namespace llvm; 35 36//===----------------------------------------------------------------------===// 37/// SelectionDAGLegalize - This takes an arbitrary SelectionDAG as input and 38/// hacks on it until the target machine can handle it. This involves 39/// eliminating value sizes the machine cannot handle (promoting small sizes to 40/// large sizes or splitting up large values into small values) as well as 41/// eliminating operations the machine cannot handle. 42/// 43/// This code also does a small amount of optimization and recognition of idioms 44/// as part of its processing. For example, if a target does not support a 45/// 'setcc' instruction efficiently, but does support 'brcc' instruction, this 46/// will attempt merge setcc and brc instructions into brcc's. 47/// 48namespace { 49class SelectionDAGLegalize { 50 const TargetMachine &TM; 51 const TargetLowering &TLI; 52 SelectionDAG &DAG; 53 54 // Libcall insertion helpers. 55 56 /// LastCALLSEQ - This keeps track of the CALLSEQ_END node that has been 57 /// legalized. We use this to ensure that calls are properly serialized 58 /// against each other, including inserted libcalls. 59 SmallVector<SDValue, 8> LastCALLSEQ; 60 61 enum LegalizeAction { 62 Legal, // The target natively supports this operation. 63 Promote, // This operation should be executed in a larger type. 64 Expand // Try to expand this to other ops, otherwise use a libcall. 65 }; 66 67 /// ValueTypeActions - This is a bitvector that contains two bits for each 68 /// value type, where the two bits correspond to the LegalizeAction enum. 69 /// This can be queried with "getTypeAction(VT)". 70 TargetLowering::ValueTypeActionImpl ValueTypeActions; 71 72 /// LegalizedNodes - For nodes that are of legal width, and that have more 73 /// than one use, this map indicates what regularized operand to use. This 74 /// allows us to avoid legalizing the same thing more than once. 75 DenseMap<SDValue, SDValue> LegalizedNodes; 76 77 void AddLegalizedOperand(SDValue From, SDValue To) { 78 LegalizedNodes.insert(std::make_pair(From, To)); 79 // If someone requests legalization of the new node, return itself. 80 if (From != To) 81 LegalizedNodes.insert(std::make_pair(To, To)); 82 83 // Transfer SDDbgValues. 84 DAG.TransferDbgValues(From, To); 85 } 86 87public: 88 explicit SelectionDAGLegalize(SelectionDAG &DAG); 89 90 /// getTypeAction - Return how we should legalize values of this type, either 91 /// it is already legal or we need to expand it into multiple registers of 92 /// smaller integer type, or we need to promote it to a larger type. 93 LegalizeAction getTypeAction(EVT VT) const { 94 return (LegalizeAction)TLI.getTypeAction(*DAG.getContext(), VT); 95 } 96 97 /// isTypeLegal - Return true if this type is legal on this target. 98 /// 99 bool isTypeLegal(EVT VT) const { 100 return getTypeAction(VT) == Legal; 101 } 102 103 void LegalizeDAG(); 104 105private: 106 /// LegalizeOp - We know that the specified value has a legal type. 107 /// Recursively ensure that the operands have legal types, then return the 108 /// result. 109 SDValue LegalizeOp(SDValue O); 110 111 SDValue OptimizeFloatStore(StoreSDNode *ST); 112 113 /// PerformInsertVectorEltInMemory - Some target cannot handle a variable 114 /// insertion index for the INSERT_VECTOR_ELT instruction. In this case, it 115 /// is necessary to spill the vector being inserted into to memory, perform 116 /// the insert there, and then read the result back. 117 SDValue PerformInsertVectorEltInMemory(SDValue Vec, SDValue Val, 118 SDValue Idx, DebugLoc dl); 119 SDValue ExpandINSERT_VECTOR_ELT(SDValue Vec, SDValue Val, 120 SDValue Idx, DebugLoc dl); 121 122 /// ShuffleWithNarrowerEltType - Return a vector shuffle operation which 123 /// performs the same shuffe in terms of order or result bytes, but on a type 124 /// whose vector element type is narrower than the original shuffle type. 125 /// e.g. <v4i32> <0, 1, 0, 1> -> v8i16 <0, 1, 2, 3, 0, 1, 2, 3> 126 SDValue ShuffleWithNarrowerEltType(EVT NVT, EVT VT, DebugLoc dl, 127 SDValue N1, SDValue N2, 128 SmallVectorImpl<int> &Mask) const; 129 130 bool LegalizeAllNodesNotLeadingTo(SDNode *N, SDNode *Dest, 131 SmallPtrSet<SDNode*, 32> &NodesLeadingTo); 132 133 void LegalizeSetCCCondCode(EVT VT, SDValue &LHS, SDValue &RHS, SDValue &CC, 134 DebugLoc dl); 135 136 SDValue ExpandLibCall(RTLIB::Libcall LC, SDNode *Node, bool isSigned); 137 SDValue ExpandLibCall(RTLIB::Libcall LC, EVT RetVT, const SDValue *Ops, 138 unsigned NumOps, bool isSigned, DebugLoc dl); 139 140 std::pair<SDValue, SDValue> ExpandChainLibCall(RTLIB::Libcall LC, 141 SDNode *Node, bool isSigned); 142 SDValue ExpandFPLibCall(SDNode *Node, RTLIB::Libcall Call_F32, 143 RTLIB::Libcall Call_F64, RTLIB::Libcall Call_F80, 144 RTLIB::Libcall Call_PPCF128); 145 SDValue ExpandIntLibCall(SDNode *Node, bool isSigned, 146 RTLIB::Libcall Call_I8, 147 RTLIB::Libcall Call_I16, 148 RTLIB::Libcall Call_I32, 149 RTLIB::Libcall Call_I64, 150 RTLIB::Libcall Call_I128); 151 void ExpandDivRemLibCall(SDNode *Node, SmallVectorImpl<SDValue> &Results); 152 153 SDValue EmitStackConvert(SDValue SrcOp, EVT SlotVT, EVT DestVT, DebugLoc dl); 154 SDValue ExpandBUILD_VECTOR(SDNode *Node); 155 SDValue ExpandSCALAR_TO_VECTOR(SDNode *Node); 156 void ExpandDYNAMIC_STACKALLOC(SDNode *Node, 157 SmallVectorImpl<SDValue> &Results); 158 SDValue ExpandFCOPYSIGN(SDNode *Node); 159 SDValue ExpandLegalINT_TO_FP(bool isSigned, SDValue LegalOp, EVT DestVT, 160 DebugLoc dl); 161 SDValue PromoteLegalINT_TO_FP(SDValue LegalOp, EVT DestVT, bool isSigned, 162 DebugLoc dl); 163 SDValue PromoteLegalFP_TO_INT(SDValue LegalOp, EVT DestVT, bool isSigned, 164 DebugLoc dl); 165 166 SDValue ExpandBSWAP(SDValue Op, DebugLoc dl); 167 SDValue ExpandBitCount(unsigned Opc, SDValue Op, DebugLoc dl); 168 169 SDValue ExpandExtractFromVectorThroughStack(SDValue Op); 170 SDValue ExpandInsertToVectorThroughStack(SDValue Op); 171 SDValue ExpandVectorBuildThroughStack(SDNode* Node); 172 173 std::pair<SDValue, SDValue> ExpandAtomic(SDNode *Node); 174 175 void ExpandNode(SDNode *Node, SmallVectorImpl<SDValue> &Results); 176 void PromoteNode(SDNode *Node, SmallVectorImpl<SDValue> &Results); 177 178 SDValue getLastCALLSEQ() { return LastCALLSEQ.back(); } 179 void setLastCALLSEQ(const SDValue s) { LastCALLSEQ.back() = s; } 180 void pushLastCALLSEQ(SDValue s) { 181 LastCALLSEQ.push_back(s); 182 } 183 void popLastCALLSEQ() { 184 LastCALLSEQ.pop_back(); 185 } 186}; 187} 188 189/// ShuffleWithNarrowerEltType - Return a vector shuffle operation which 190/// performs the same shuffe in terms of order or result bytes, but on a type 191/// whose vector element type is narrower than the original shuffle type. 192/// e.g. <v4i32> <0, 1, 0, 1> -> v8i16 <0, 1, 2, 3, 0, 1, 2, 3> 193SDValue 194SelectionDAGLegalize::ShuffleWithNarrowerEltType(EVT NVT, EVT VT, DebugLoc dl, 195 SDValue N1, SDValue N2, 196 SmallVectorImpl<int> &Mask) const { 197 unsigned NumMaskElts = VT.getVectorNumElements(); 198 unsigned NumDestElts = NVT.getVectorNumElements(); 199 unsigned NumEltsGrowth = NumDestElts / NumMaskElts; 200 201 assert(NumEltsGrowth && "Cannot promote to vector type with fewer elts!"); 202 203 if (NumEltsGrowth == 1) 204 return DAG.getVectorShuffle(NVT, dl, N1, N2, &Mask[0]); 205 206 SmallVector<int, 8> NewMask; 207 for (unsigned i = 0; i != NumMaskElts; ++i) { 208 int Idx = Mask[i]; 209 for (unsigned j = 0; j != NumEltsGrowth; ++j) { 210 if (Idx < 0) 211 NewMask.push_back(-1); 212 else 213 NewMask.push_back(Idx * NumEltsGrowth + j); 214 } 215 } 216 assert(NewMask.size() == NumDestElts && "Non-integer NumEltsGrowth?"); 217 assert(TLI.isShuffleMaskLegal(NewMask, NVT) && "Shuffle not legal?"); 218 return DAG.getVectorShuffle(NVT, dl, N1, N2, &NewMask[0]); 219} 220 221SelectionDAGLegalize::SelectionDAGLegalize(SelectionDAG &dag) 222 : TM(dag.getTarget()), TLI(dag.getTargetLoweringInfo()), 223 DAG(dag), 224 ValueTypeActions(TLI.getValueTypeActions()) { 225 assert(MVT::LAST_VALUETYPE <= MVT::MAX_ALLOWED_VALUETYPE && 226 "Too many value types for ValueTypeActions to hold!"); 227} 228 229void SelectionDAGLegalize::LegalizeDAG() { 230 pushLastCALLSEQ(DAG.getEntryNode()); 231 232 // The legalize process is inherently a bottom-up recursive process (users 233 // legalize their uses before themselves). Given infinite stack space, we 234 // could just start legalizing on the root and traverse the whole graph. In 235 // practice however, this causes us to run out of stack space on large basic 236 // blocks. To avoid this problem, compute an ordering of the nodes where each 237 // node is only legalized after all of its operands are legalized. 238 DAG.AssignTopologicalOrder(); 239 for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(), 240 E = prior(DAG.allnodes_end()); I != llvm::next(E); ++I) 241 LegalizeOp(SDValue(I, 0)); 242 243 // Finally, it's possible the root changed. Get the new root. 244 SDValue OldRoot = DAG.getRoot(); 245 assert(LegalizedNodes.count(OldRoot) && "Root didn't get legalized?"); 246 DAG.setRoot(LegalizedNodes[OldRoot]); 247 248 LegalizedNodes.clear(); 249 250 // Remove dead nodes now. 251 DAG.RemoveDeadNodes(); 252} 253 254 255/// FindCallEndFromCallStart - Given a chained node that is part of a call 256/// sequence, find the CALLSEQ_END node that terminates the call sequence. 257static SDNode *FindCallEndFromCallStart(SDNode *Node, int depth = 0) { 258 int next_depth = depth; 259 if (Node->getOpcode() == ISD::CALLSEQ_START) 260 next_depth = depth + 1; 261 if (Node->getOpcode() == ISD::CALLSEQ_END) { 262 assert(depth > 0 && "negative depth!"); 263 if (depth == 1) 264 return Node; 265 else 266 next_depth = depth - 1; 267 } 268 if (Node->use_empty()) 269 return 0; // No CallSeqEnd 270 271 // The chain is usually at the end. 272 SDValue TheChain(Node, Node->getNumValues()-1); 273 if (TheChain.getValueType() != MVT::Other) { 274 // Sometimes it's at the beginning. 275 TheChain = SDValue(Node, 0); 276 if (TheChain.getValueType() != MVT::Other) { 277 // Otherwise, hunt for it. 278 for (unsigned i = 1, e = Node->getNumValues(); i != e; ++i) 279 if (Node->getValueType(i) == MVT::Other) { 280 TheChain = SDValue(Node, i); 281 break; 282 } 283 284 // Otherwise, we walked into a node without a chain. 285 if (TheChain.getValueType() != MVT::Other) 286 return 0; 287 } 288 } 289 290 for (SDNode::use_iterator UI = Node->use_begin(), 291 E = Node->use_end(); UI != E; ++UI) { 292 293 // Make sure to only follow users of our token chain. 294 SDNode *User = *UI; 295 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) 296 if (User->getOperand(i) == TheChain) 297 if (SDNode *Result = FindCallEndFromCallStart(User, next_depth)) 298 return Result; 299 } 300 return 0; 301} 302 303/// FindCallStartFromCallEnd - Given a chained node that is part of a call 304/// sequence, find the CALLSEQ_START node that initiates the call sequence. 305static SDNode *FindCallStartFromCallEnd(SDNode *Node) { 306 int nested = 0; 307 assert(Node && "Didn't find callseq_start for a call??"); 308 while (Node->getOpcode() != ISD::CALLSEQ_START || nested) { 309 Node = Node->getOperand(0).getNode(); 310 assert(Node->getOperand(0).getValueType() == MVT::Other && 311 "Node doesn't have a token chain argument!"); 312 switch (Node->getOpcode()) { 313 default: 314 break; 315 case ISD::CALLSEQ_START: 316 if (!nested) 317 return Node; 318 Node = Node->getOperand(0).getNode(); 319 nested--; 320 break; 321 case ISD::CALLSEQ_END: 322 nested++; 323 break; 324 } 325 } 326 return (Node->getOpcode() == ISD::CALLSEQ_START) ? Node : 0; 327} 328 329/// LegalizeAllNodesNotLeadingTo - Recursively walk the uses of N, looking to 330/// see if any uses can reach Dest. If no dest operands can get to dest, 331/// legalize them, legalize ourself, and return false, otherwise, return true. 332/// 333/// Keep track of the nodes we fine that actually do lead to Dest in 334/// NodesLeadingTo. This avoids retraversing them exponential number of times. 335/// 336bool SelectionDAGLegalize::LegalizeAllNodesNotLeadingTo(SDNode *N, SDNode *Dest, 337 SmallPtrSet<SDNode*, 32> &NodesLeadingTo) { 338 if (N == Dest) return true; // N certainly leads to Dest :) 339 340 // If we've already processed this node and it does lead to Dest, there is no 341 // need to reprocess it. 342 if (NodesLeadingTo.count(N)) return true; 343 344 // If the first result of this node has been already legalized, then it cannot 345 // reach N. 346 if (LegalizedNodes.count(SDValue(N, 0))) return false; 347 348 // Okay, this node has not already been legalized. Check and legalize all 349 // operands. If none lead to Dest, then we can legalize this node. 350 bool OperandsLeadToDest = false; 351 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) 352 OperandsLeadToDest |= // If an operand leads to Dest, so do we. 353 LegalizeAllNodesNotLeadingTo(N->getOperand(i).getNode(), Dest, 354 NodesLeadingTo); 355 356 if (OperandsLeadToDest) { 357 NodesLeadingTo.insert(N); 358 return true; 359 } 360 361 // Okay, this node looks safe, legalize it and return false. 362 LegalizeOp(SDValue(N, 0)); 363 return false; 364} 365 366/// ExpandConstantFP - Expands the ConstantFP node to an integer constant or 367/// a load from the constant pool. 368static SDValue ExpandConstantFP(ConstantFPSDNode *CFP, bool UseCP, 369 SelectionDAG &DAG, const TargetLowering &TLI) { 370 bool Extend = false; 371 DebugLoc dl = CFP->getDebugLoc(); 372 373 // If a FP immediate is precise when represented as a float and if the 374 // target can do an extending load from float to double, we put it into 375 // the constant pool as a float, even if it's is statically typed as a 376 // double. This shrinks FP constants and canonicalizes them for targets where 377 // an FP extending load is the same cost as a normal load (such as on the x87 378 // fp stack or PPC FP unit). 379 EVT VT = CFP->getValueType(0); 380 ConstantFP *LLVMC = const_cast<ConstantFP*>(CFP->getConstantFPValue()); 381 if (!UseCP) { 382 assert((VT == MVT::f64 || VT == MVT::f32) && "Invalid type expansion"); 383 return DAG.getConstant(LLVMC->getValueAPF().bitcastToAPInt(), 384 (VT == MVT::f64) ? MVT::i64 : MVT::i32); 385 } 386 387 EVT OrigVT = VT; 388 EVT SVT = VT; 389 while (SVT != MVT::f32) { 390 SVT = (MVT::SimpleValueType)(SVT.getSimpleVT().SimpleTy - 1); 391 if (ConstantFPSDNode::isValueValidForType(SVT, CFP->getValueAPF()) && 392 // Only do this if the target has a native EXTLOAD instruction from 393 // smaller type. 394 TLI.isLoadExtLegal(ISD::EXTLOAD, SVT) && 395 TLI.ShouldShrinkFPConstant(OrigVT)) { 396 const Type *SType = SVT.getTypeForEVT(*DAG.getContext()); 397 LLVMC = cast<ConstantFP>(ConstantExpr::getFPTrunc(LLVMC, SType)); 398 VT = SVT; 399 Extend = true; 400 } 401 } 402 403 SDValue CPIdx = DAG.getConstantPool(LLVMC, TLI.getPointerTy()); 404 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment(); 405 if (Extend) 406 return DAG.getExtLoad(ISD::EXTLOAD, dl, OrigVT, 407 DAG.getEntryNode(), 408 CPIdx, MachinePointerInfo::getConstantPool(), 409 VT, false, false, Alignment); 410 return DAG.getLoad(OrigVT, dl, DAG.getEntryNode(), CPIdx, 411 MachinePointerInfo::getConstantPool(), false, false, 412 Alignment); 413} 414 415/// ExpandUnalignedStore - Expands an unaligned store to 2 half-size stores. 416static 417SDValue ExpandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG, 418 const TargetLowering &TLI) { 419 SDValue Chain = ST->getChain(); 420 SDValue Ptr = ST->getBasePtr(); 421 SDValue Val = ST->getValue(); 422 EVT VT = Val.getValueType(); 423 int Alignment = ST->getAlignment(); 424 DebugLoc dl = ST->getDebugLoc(); 425 if (ST->getMemoryVT().isFloatingPoint() || 426 ST->getMemoryVT().isVector()) { 427 EVT intVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits()); 428 if (TLI.isTypeLegal(intVT)) { 429 // Expand to a bitconvert of the value to the integer type of the 430 // same size, then a (misaligned) int store. 431 // FIXME: Does not handle truncating floating point stores! 432 SDValue Result = DAG.getNode(ISD::BITCAST, dl, intVT, Val); 433 return DAG.getStore(Chain, dl, Result, Ptr, ST->getPointerInfo(), 434 ST->isVolatile(), ST->isNonTemporal(), Alignment); 435 } 436 // Do a (aligned) store to a stack slot, then copy from the stack slot 437 // to the final destination using (unaligned) integer loads and stores. 438 EVT StoredVT = ST->getMemoryVT(); 439 EVT RegVT = 440 TLI.getRegisterType(*DAG.getContext(), 441 EVT::getIntegerVT(*DAG.getContext(), 442 StoredVT.getSizeInBits())); 443 unsigned StoredBytes = StoredVT.getSizeInBits() / 8; 444 unsigned RegBytes = RegVT.getSizeInBits() / 8; 445 unsigned NumRegs = (StoredBytes + RegBytes - 1) / RegBytes; 446 447 // Make sure the stack slot is also aligned for the register type. 448 SDValue StackPtr = DAG.CreateStackTemporary(StoredVT, RegVT); 449 450 // Perform the original store, only redirected to the stack slot. 451 SDValue Store = DAG.getTruncStore(Chain, dl, 452 Val, StackPtr, MachinePointerInfo(), 453 StoredVT, false, false, 0); 454 SDValue Increment = DAG.getConstant(RegBytes, TLI.getPointerTy()); 455 SmallVector<SDValue, 8> Stores; 456 unsigned Offset = 0; 457 458 // Do all but one copies using the full register width. 459 for (unsigned i = 1; i < NumRegs; i++) { 460 // Load one integer register's worth from the stack slot. 461 SDValue Load = DAG.getLoad(RegVT, dl, Store, StackPtr, 462 MachinePointerInfo(), 463 false, false, 0); 464 // Store it to the final location. Remember the store. 465 Stores.push_back(DAG.getStore(Load.getValue(1), dl, Load, Ptr, 466 ST->getPointerInfo().getWithOffset(Offset), 467 ST->isVolatile(), ST->isNonTemporal(), 468 MinAlign(ST->getAlignment(), Offset))); 469 // Increment the pointers. 470 Offset += RegBytes; 471 StackPtr = DAG.getNode(ISD::ADD, dl, StackPtr.getValueType(), StackPtr, 472 Increment); 473 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 474 } 475 476 // The last store may be partial. Do a truncating store. On big-endian 477 // machines this requires an extending load from the stack slot to ensure 478 // that the bits are in the right place. 479 EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), 480 8 * (StoredBytes - Offset)); 481 482 // Load from the stack slot. 483 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Store, StackPtr, 484 MachinePointerInfo(), 485 MemVT, false, false, 0); 486 487 Stores.push_back(DAG.getTruncStore(Load.getValue(1), dl, Load, Ptr, 488 ST->getPointerInfo() 489 .getWithOffset(Offset), 490 MemVT, ST->isVolatile(), 491 ST->isNonTemporal(), 492 MinAlign(ST->getAlignment(), Offset))); 493 // The order of the stores doesn't matter - say it with a TokenFactor. 494 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Stores[0], 495 Stores.size()); 496 } 497 assert(ST->getMemoryVT().isInteger() && 498 !ST->getMemoryVT().isVector() && 499 "Unaligned store of unknown type."); 500 // Get the half-size VT 501 EVT NewStoredVT = ST->getMemoryVT().getHalfSizedIntegerVT(*DAG.getContext()); 502 int NumBits = NewStoredVT.getSizeInBits(); 503 int IncrementSize = NumBits / 8; 504 505 // Divide the stored value in two parts. 506 SDValue ShiftAmount = DAG.getConstant(NumBits, 507 TLI.getShiftAmountTy(Val.getValueType())); 508 SDValue Lo = Val; 509 SDValue Hi = DAG.getNode(ISD::SRL, dl, VT, Val, ShiftAmount); 510 511 // Store the two parts 512 SDValue Store1, Store2; 513 Store1 = DAG.getTruncStore(Chain, dl, TLI.isLittleEndian()?Lo:Hi, Ptr, 514 ST->getPointerInfo(), NewStoredVT, 515 ST->isVolatile(), ST->isNonTemporal(), Alignment); 516 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, 517 DAG.getConstant(IncrementSize, TLI.getPointerTy())); 518 Alignment = MinAlign(Alignment, IncrementSize); 519 Store2 = DAG.getTruncStore(Chain, dl, TLI.isLittleEndian()?Hi:Lo, Ptr, 520 ST->getPointerInfo().getWithOffset(IncrementSize), 521 NewStoredVT, ST->isVolatile(), ST->isNonTemporal(), 522 Alignment); 523 524 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Store1, Store2); 525} 526 527/// ExpandUnalignedLoad - Expands an unaligned load to 2 half-size loads. 528static 529SDValue ExpandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG, 530 const TargetLowering &TLI) { 531 SDValue Chain = LD->getChain(); 532 SDValue Ptr = LD->getBasePtr(); 533 EVT VT = LD->getValueType(0); 534 EVT LoadedVT = LD->getMemoryVT(); 535 DebugLoc dl = LD->getDebugLoc(); 536 if (VT.isFloatingPoint() || VT.isVector()) { 537 EVT intVT = EVT::getIntegerVT(*DAG.getContext(), LoadedVT.getSizeInBits()); 538 if (TLI.isTypeLegal(intVT)) { 539 // Expand to a (misaligned) integer load of the same size, 540 // then bitconvert to floating point or vector. 541 SDValue newLoad = DAG.getLoad(intVT, dl, Chain, Ptr, LD->getPointerInfo(), 542 LD->isVolatile(), 543 LD->isNonTemporal(), LD->getAlignment()); 544 SDValue Result = DAG.getNode(ISD::BITCAST, dl, LoadedVT, newLoad); 545 if (VT.isFloatingPoint() && LoadedVT != VT) 546 Result = DAG.getNode(ISD::FP_EXTEND, dl, VT, Result); 547 548 SDValue Ops[] = { Result, Chain }; 549 return DAG.getMergeValues(Ops, 2, dl); 550 } 551 552 // Copy the value to a (aligned) stack slot using (unaligned) integer 553 // loads and stores, then do a (aligned) load from the stack slot. 554 EVT RegVT = TLI.getRegisterType(*DAG.getContext(), intVT); 555 unsigned LoadedBytes = LoadedVT.getSizeInBits() / 8; 556 unsigned RegBytes = RegVT.getSizeInBits() / 8; 557 unsigned NumRegs = (LoadedBytes + RegBytes - 1) / RegBytes; 558 559 // Make sure the stack slot is also aligned for the register type. 560 SDValue StackBase = DAG.CreateStackTemporary(LoadedVT, RegVT); 561 562 SDValue Increment = DAG.getConstant(RegBytes, TLI.getPointerTy()); 563 SmallVector<SDValue, 8> Stores; 564 SDValue StackPtr = StackBase; 565 unsigned Offset = 0; 566 567 // Do all but one copies using the full register width. 568 for (unsigned i = 1; i < NumRegs; i++) { 569 // Load one integer register's worth from the original location. 570 SDValue Load = DAG.getLoad(RegVT, dl, Chain, Ptr, 571 LD->getPointerInfo().getWithOffset(Offset), 572 LD->isVolatile(), LD->isNonTemporal(), 573 MinAlign(LD->getAlignment(), Offset)); 574 // Follow the load with a store to the stack slot. Remember the store. 575 Stores.push_back(DAG.getStore(Load.getValue(1), dl, Load, StackPtr, 576 MachinePointerInfo(), false, false, 0)); 577 // Increment the pointers. 578 Offset += RegBytes; 579 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 580 StackPtr = DAG.getNode(ISD::ADD, dl, StackPtr.getValueType(), StackPtr, 581 Increment); 582 } 583 584 // The last copy may be partial. Do an extending load. 585 EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), 586 8 * (LoadedBytes - Offset)); 587 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Chain, Ptr, 588 LD->getPointerInfo().getWithOffset(Offset), 589 MemVT, LD->isVolatile(), 590 LD->isNonTemporal(), 591 MinAlign(LD->getAlignment(), Offset)); 592 // Follow the load with a store to the stack slot. Remember the store. 593 // On big-endian machines this requires a truncating store to ensure 594 // that the bits end up in the right place. 595 Stores.push_back(DAG.getTruncStore(Load.getValue(1), dl, Load, StackPtr, 596 MachinePointerInfo(), MemVT, 597 false, false, 0)); 598 599 // The order of the stores doesn't matter - say it with a TokenFactor. 600 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Stores[0], 601 Stores.size()); 602 603 // Finally, perform the original load only redirected to the stack slot. 604 Load = DAG.getExtLoad(LD->getExtensionType(), dl, VT, TF, StackBase, 605 MachinePointerInfo(), LoadedVT, false, false, 0); 606 607 // Callers expect a MERGE_VALUES node. 608 SDValue Ops[] = { Load, TF }; 609 return DAG.getMergeValues(Ops, 2, dl); 610 } 611 assert(LoadedVT.isInteger() && !LoadedVT.isVector() && 612 "Unaligned load of unsupported type."); 613 614 // Compute the new VT that is half the size of the old one. This is an 615 // integer MVT. 616 unsigned NumBits = LoadedVT.getSizeInBits(); 617 EVT NewLoadedVT; 618 NewLoadedVT = EVT::getIntegerVT(*DAG.getContext(), NumBits/2); 619 NumBits >>= 1; 620 621 unsigned Alignment = LD->getAlignment(); 622 unsigned IncrementSize = NumBits / 8; 623 ISD::LoadExtType HiExtType = LD->getExtensionType(); 624 625 // If the original load is NON_EXTLOAD, the hi part load must be ZEXTLOAD. 626 if (HiExtType == ISD::NON_EXTLOAD) 627 HiExtType = ISD::ZEXTLOAD; 628 629 // Load the value in two parts 630 SDValue Lo, Hi; 631 if (TLI.isLittleEndian()) { 632 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, LD->getPointerInfo(), 633 NewLoadedVT, LD->isVolatile(), 634 LD->isNonTemporal(), Alignment); 635 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, 636 DAG.getConstant(IncrementSize, TLI.getPointerTy())); 637 Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, 638 LD->getPointerInfo().getWithOffset(IncrementSize), 639 NewLoadedVT, LD->isVolatile(), 640 LD->isNonTemporal(), MinAlign(Alignment,IncrementSize)); 641 } else { 642 Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, LD->getPointerInfo(), 643 NewLoadedVT, LD->isVolatile(), 644 LD->isNonTemporal(), Alignment); 645 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, 646 DAG.getConstant(IncrementSize, TLI.getPointerTy())); 647 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, 648 LD->getPointerInfo().getWithOffset(IncrementSize), 649 NewLoadedVT, LD->isVolatile(), 650 LD->isNonTemporal(), MinAlign(Alignment,IncrementSize)); 651 } 652 653 // aggregate the two parts 654 SDValue ShiftAmount = DAG.getConstant(NumBits, 655 TLI.getShiftAmountTy(Hi.getValueType())); 656 SDValue Result = DAG.getNode(ISD::SHL, dl, VT, Hi, ShiftAmount); 657 Result = DAG.getNode(ISD::OR, dl, VT, Result, Lo); 658 659 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1), 660 Hi.getValue(1)); 661 662 SDValue Ops[] = { Result, TF }; 663 return DAG.getMergeValues(Ops, 2, dl); 664} 665 666/// PerformInsertVectorEltInMemory - Some target cannot handle a variable 667/// insertion index for the INSERT_VECTOR_ELT instruction. In this case, it 668/// is necessary to spill the vector being inserted into to memory, perform 669/// the insert there, and then read the result back. 670SDValue SelectionDAGLegalize:: 671PerformInsertVectorEltInMemory(SDValue Vec, SDValue Val, SDValue Idx, 672 DebugLoc dl) { 673 SDValue Tmp1 = Vec; 674 SDValue Tmp2 = Val; 675 SDValue Tmp3 = Idx; 676 677 // If the target doesn't support this, we have to spill the input vector 678 // to a temporary stack slot, update the element, then reload it. This is 679 // badness. We could also load the value into a vector register (either 680 // with a "move to register" or "extload into register" instruction, then 681 // permute it into place, if the idx is a constant and if the idx is 682 // supported by the target. 683 EVT VT = Tmp1.getValueType(); 684 EVT EltVT = VT.getVectorElementType(); 685 EVT IdxVT = Tmp3.getValueType(); 686 EVT PtrVT = TLI.getPointerTy(); 687 SDValue StackPtr = DAG.CreateStackTemporary(VT); 688 689 int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex(); 690 691 // Store the vector. 692 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, Tmp1, StackPtr, 693 MachinePointerInfo::getFixedStack(SPFI), 694 false, false, 0); 695 696 // Truncate or zero extend offset to target pointer type. 697 unsigned CastOpc = IdxVT.bitsGT(PtrVT) ? ISD::TRUNCATE : ISD::ZERO_EXTEND; 698 Tmp3 = DAG.getNode(CastOpc, dl, PtrVT, Tmp3); 699 // Add the offset to the index. 700 unsigned EltSize = EltVT.getSizeInBits()/8; 701 Tmp3 = DAG.getNode(ISD::MUL, dl, IdxVT, Tmp3,DAG.getConstant(EltSize, IdxVT)); 702 SDValue StackPtr2 = DAG.getNode(ISD::ADD, dl, IdxVT, Tmp3, StackPtr); 703 // Store the scalar value. 704 Ch = DAG.getTruncStore(Ch, dl, Tmp2, StackPtr2, MachinePointerInfo(), EltVT, 705 false, false, 0); 706 // Load the updated vector. 707 return DAG.getLoad(VT, dl, Ch, StackPtr, 708 MachinePointerInfo::getFixedStack(SPFI), false, false, 0); 709} 710 711 712SDValue SelectionDAGLegalize:: 713ExpandINSERT_VECTOR_ELT(SDValue Vec, SDValue Val, SDValue Idx, DebugLoc dl) { 714 if (ConstantSDNode *InsertPos = dyn_cast<ConstantSDNode>(Idx)) { 715 // SCALAR_TO_VECTOR requires that the type of the value being inserted 716 // match the element type of the vector being created, except for 717 // integers in which case the inserted value can be over width. 718 EVT EltVT = Vec.getValueType().getVectorElementType(); 719 if (Val.getValueType() == EltVT || 720 (EltVT.isInteger() && Val.getValueType().bitsGE(EltVT))) { 721 SDValue ScVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 722 Vec.getValueType(), Val); 723 724 unsigned NumElts = Vec.getValueType().getVectorNumElements(); 725 // We generate a shuffle of InVec and ScVec, so the shuffle mask 726 // should be 0,1,2,3,4,5... with the appropriate element replaced with 727 // elt 0 of the RHS. 728 SmallVector<int, 8> ShufOps; 729 for (unsigned i = 0; i != NumElts; ++i) 730 ShufOps.push_back(i != InsertPos->getZExtValue() ? i : NumElts); 731 732 return DAG.getVectorShuffle(Vec.getValueType(), dl, Vec, ScVec, 733 &ShufOps[0]); 734 } 735 } 736 return PerformInsertVectorEltInMemory(Vec, Val, Idx, dl); 737} 738 739SDValue SelectionDAGLegalize::OptimizeFloatStore(StoreSDNode* ST) { 740 // Turn 'store float 1.0, Ptr' -> 'store int 0x12345678, Ptr' 741 // FIXME: We shouldn't do this for TargetConstantFP's. 742 // FIXME: move this to the DAG Combiner! Note that we can't regress due 743 // to phase ordering between legalized code and the dag combiner. This 744 // probably means that we need to integrate dag combiner and legalizer 745 // together. 746 // We generally can't do this one for long doubles. 747 SDValue Tmp1 = ST->getChain(); 748 SDValue Tmp2 = ST->getBasePtr(); 749 SDValue Tmp3; 750 unsigned Alignment = ST->getAlignment(); 751 bool isVolatile = ST->isVolatile(); 752 bool isNonTemporal = ST->isNonTemporal(); 753 DebugLoc dl = ST->getDebugLoc(); 754 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(ST->getValue())) { 755 if (CFP->getValueType(0) == MVT::f32 && 756 getTypeAction(MVT::i32) == Legal) { 757 Tmp3 = DAG.getConstant(CFP->getValueAPF(). 758 bitcastToAPInt().zextOrTrunc(32), 759 MVT::i32); 760 return DAG.getStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(), 761 isVolatile, isNonTemporal, Alignment); 762 } 763 764 if (CFP->getValueType(0) == MVT::f64) { 765 // If this target supports 64-bit registers, do a single 64-bit store. 766 if (getTypeAction(MVT::i64) == Legal) { 767 Tmp3 = DAG.getConstant(CFP->getValueAPF().bitcastToAPInt(). 768 zextOrTrunc(64), MVT::i64); 769 return DAG.getStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(), 770 isVolatile, isNonTemporal, Alignment); 771 } 772 773 if (getTypeAction(MVT::i32) == Legal && !ST->isVolatile()) { 774 // Otherwise, if the target supports 32-bit registers, use 2 32-bit 775 // stores. If the target supports neither 32- nor 64-bits, this 776 // xform is certainly not worth it. 777 const APInt &IntVal =CFP->getValueAPF().bitcastToAPInt(); 778 SDValue Lo = DAG.getConstant(IntVal.trunc(32), MVT::i32); 779 SDValue Hi = DAG.getConstant(IntVal.lshr(32).trunc(32), MVT::i32); 780 if (TLI.isBigEndian()) std::swap(Lo, Hi); 781 782 Lo = DAG.getStore(Tmp1, dl, Lo, Tmp2, ST->getPointerInfo(), isVolatile, 783 isNonTemporal, Alignment); 784 Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2, 785 DAG.getIntPtrConstant(4)); 786 Hi = DAG.getStore(Tmp1, dl, Hi, Tmp2, 787 ST->getPointerInfo().getWithOffset(4), 788 isVolatile, isNonTemporal, MinAlign(Alignment, 4U)); 789 790 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo, Hi); 791 } 792 } 793 } 794 return SDValue(0, 0); 795} 796 797/// LegalizeOp - We know that the specified value has a legal type, and 798/// that its operands are legal. Now ensure that the operation itself 799/// is legal, recursively ensuring that the operands' operations remain 800/// legal. 801SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) { 802 if (Op.getOpcode() == ISD::TargetConstant) // Allow illegal target nodes. 803 return Op; 804 805 SDNode *Node = Op.getNode(); 806 DebugLoc dl = Node->getDebugLoc(); 807 808 for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i) 809 assert(getTypeAction(Node->getValueType(i)) == Legal && 810 "Unexpected illegal type!"); 811 812 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) 813 assert((isTypeLegal(Node->getOperand(i).getValueType()) || 814 Node->getOperand(i).getOpcode() == ISD::TargetConstant) && 815 "Unexpected illegal type!"); 816 817 // Note that LegalizeOp may be reentered even from single-use nodes, which 818 // means that we always must cache transformed nodes. 819 DenseMap<SDValue, SDValue>::iterator I = LegalizedNodes.find(Op); 820 if (I != LegalizedNodes.end()) return I->second; 821 822 SDValue Tmp1, Tmp2, Tmp3, Tmp4; 823 SDValue Result = Op; 824 bool isCustom = false; 825 826 // Figure out the correct action; the way to query this varies by opcode 827 TargetLowering::LegalizeAction Action = TargetLowering::Legal; 828 bool SimpleFinishLegalizing = true; 829 switch (Node->getOpcode()) { 830 case ISD::INTRINSIC_W_CHAIN: 831 case ISD::INTRINSIC_WO_CHAIN: 832 case ISD::INTRINSIC_VOID: 833 case ISD::VAARG: 834 case ISD::STACKSAVE: 835 Action = TLI.getOperationAction(Node->getOpcode(), MVT::Other); 836 break; 837 case ISD::SINT_TO_FP: 838 case ISD::UINT_TO_FP: 839 case ISD::EXTRACT_VECTOR_ELT: 840 Action = TLI.getOperationAction(Node->getOpcode(), 841 Node->getOperand(0).getValueType()); 842 break; 843 case ISD::FP_ROUND_INREG: 844 case ISD::SIGN_EXTEND_INREG: { 845 EVT InnerType = cast<VTSDNode>(Node->getOperand(1))->getVT(); 846 Action = TLI.getOperationAction(Node->getOpcode(), InnerType); 847 break; 848 } 849 case ISD::SELECT_CC: 850 case ISD::SETCC: 851 case ISD::BR_CC: { 852 unsigned CCOperand = Node->getOpcode() == ISD::SELECT_CC ? 4 : 853 Node->getOpcode() == ISD::SETCC ? 2 : 1; 854 unsigned CompareOperand = Node->getOpcode() == ISD::BR_CC ? 2 : 0; 855 EVT OpVT = Node->getOperand(CompareOperand).getValueType(); 856 ISD::CondCode CCCode = 857 cast<CondCodeSDNode>(Node->getOperand(CCOperand))->get(); 858 Action = TLI.getCondCodeAction(CCCode, OpVT); 859 if (Action == TargetLowering::Legal) { 860 if (Node->getOpcode() == ISD::SELECT_CC) 861 Action = TLI.getOperationAction(Node->getOpcode(), 862 Node->getValueType(0)); 863 else 864 Action = TLI.getOperationAction(Node->getOpcode(), OpVT); 865 } 866 break; 867 } 868 case ISD::LOAD: 869 case ISD::STORE: 870 // FIXME: Model these properly. LOAD and STORE are complicated, and 871 // STORE expects the unlegalized operand in some cases. 872 SimpleFinishLegalizing = false; 873 break; 874 case ISD::CALLSEQ_START: 875 case ISD::CALLSEQ_END: 876 // FIXME: This shouldn't be necessary. These nodes have special properties 877 // dealing with the recursive nature of legalization. Removing this 878 // special case should be done as part of making LegalizeDAG non-recursive. 879 SimpleFinishLegalizing = false; 880 break; 881 case ISD::EXTRACT_ELEMENT: 882 case ISD::FLT_ROUNDS_: 883 case ISD::SADDO: 884 case ISD::SSUBO: 885 case ISD::UADDO: 886 case ISD::USUBO: 887 case ISD::SMULO: 888 case ISD::UMULO: 889 case ISD::FPOWI: 890 case ISD::MERGE_VALUES: 891 case ISD::EH_RETURN: 892 case ISD::FRAME_TO_ARGS_OFFSET: 893 case ISD::EH_SJLJ_SETJMP: 894 case ISD::EH_SJLJ_LONGJMP: 895 case ISD::EH_SJLJ_DISPATCHSETUP: 896 // These operations lie about being legal: when they claim to be legal, 897 // they should actually be expanded. 898 Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0)); 899 if (Action == TargetLowering::Legal) 900 Action = TargetLowering::Expand; 901 break; 902 case ISD::TRAMPOLINE: 903 case ISD::FRAMEADDR: 904 case ISD::RETURNADDR: 905 // These operations lie about being legal: when they claim to be legal, 906 // they should actually be custom-lowered. 907 Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0)); 908 if (Action == TargetLowering::Legal) 909 Action = TargetLowering::Custom; 910 break; 911 case ISD::BUILD_VECTOR: 912 // A weird case: legalization for BUILD_VECTOR never legalizes the 913 // operands! 914 // FIXME: This really sucks... changing it isn't semantically incorrect, 915 // but it massively pessimizes the code for floating-point BUILD_VECTORs 916 // because ConstantFP operands get legalized into constant pool loads 917 // before the BUILD_VECTOR code can see them. It doesn't usually bite, 918 // though, because BUILD_VECTORS usually get lowered into other nodes 919 // which get legalized properly. 920 SimpleFinishLegalizing = false; 921 break; 922 default: 923 if (Node->getOpcode() >= ISD::BUILTIN_OP_END) { 924 Action = TargetLowering::Legal; 925 } else { 926 Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0)); 927 } 928 break; 929 } 930 931 if (SimpleFinishLegalizing) { 932 SmallVector<SDValue, 8> Ops, ResultVals; 933 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) 934 Ops.push_back(LegalizeOp(Node->getOperand(i))); 935 switch (Node->getOpcode()) { 936 default: break; 937 case ISD::BR: 938 case ISD::BRIND: 939 case ISD::BR_JT: 940 case ISD::BR_CC: 941 case ISD::BRCOND: 942 assert(LastCALLSEQ.size() == 1 && "branch inside CALLSEQ_BEGIN/END?"); 943 // Branches tweak the chain to include LastCALLSEQ 944 Ops[0] = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ops[0], 945 getLastCALLSEQ()); 946 Ops[0] = LegalizeOp(Ops[0]); 947 setLastCALLSEQ(DAG.getEntryNode()); 948 break; 949 case ISD::SHL: 950 case ISD::SRL: 951 case ISD::SRA: 952 case ISD::ROTL: 953 case ISD::ROTR: 954 // Legalizing shifts/rotates requires adjusting the shift amount 955 // to the appropriate width. 956 if (!Ops[1].getValueType().isVector()) 957 Ops[1] = LegalizeOp(DAG.getShiftAmountOperand(Ops[0].getValueType(), 958 Ops[1])); 959 break; 960 case ISD::SRL_PARTS: 961 case ISD::SRA_PARTS: 962 case ISD::SHL_PARTS: 963 // Legalizing shifts/rotates requires adjusting the shift amount 964 // to the appropriate width. 965 if (!Ops[2].getValueType().isVector()) 966 Ops[2] = LegalizeOp(DAG.getShiftAmountOperand(Ops[0].getValueType(), 967 Ops[2])); 968 break; 969 } 970 971 Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(), Ops.data(), 972 Ops.size()), 0); 973 switch (Action) { 974 case TargetLowering::Legal: 975 for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i) 976 ResultVals.push_back(Result.getValue(i)); 977 break; 978 case TargetLowering::Custom: 979 // FIXME: The handling for custom lowering with multiple results is 980 // a complete mess. 981 Tmp1 = TLI.LowerOperation(Result, DAG); 982 if (Tmp1.getNode()) { 983 for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i) { 984 if (e == 1) 985 ResultVals.push_back(Tmp1); 986 else 987 ResultVals.push_back(Tmp1.getValue(i)); 988 } 989 break; 990 } 991 992 // FALL THROUGH 993 case TargetLowering::Expand: 994 ExpandNode(Result.getNode(), ResultVals); 995 break; 996 case TargetLowering::Promote: 997 PromoteNode(Result.getNode(), ResultVals); 998 break; 999 } 1000 if (!ResultVals.empty()) { 1001 for (unsigned i = 0, e = ResultVals.size(); i != e; ++i) { 1002 if (ResultVals[i] != SDValue(Node, i)) 1003 ResultVals[i] = LegalizeOp(ResultVals[i]); 1004 AddLegalizedOperand(SDValue(Node, i), ResultVals[i]); 1005 } 1006 return ResultVals[Op.getResNo()]; 1007 } 1008 } 1009 1010 switch (Node->getOpcode()) { 1011 default: 1012#ifndef NDEBUG 1013 dbgs() << "NODE: "; 1014 Node->dump( &DAG); 1015 dbgs() << "\n"; 1016#endif 1017 assert(0 && "Do not know how to legalize this operator!"); 1018 1019 case ISD::BUILD_VECTOR: 1020 switch (TLI.getOperationAction(ISD::BUILD_VECTOR, Node->getValueType(0))) { 1021 default: assert(0 && "This action is not supported yet!"); 1022 case TargetLowering::Custom: 1023 Tmp3 = TLI.LowerOperation(Result, DAG); 1024 if (Tmp3.getNode()) { 1025 Result = Tmp3; 1026 break; 1027 } 1028 // FALLTHROUGH 1029 case TargetLowering::Expand: 1030 Result = ExpandBUILD_VECTOR(Result.getNode()); 1031 break; 1032 } 1033 break; 1034 case ISD::CALLSEQ_START: { 1035 SDNode *CallEnd = FindCallEndFromCallStart(Node); 1036 assert(CallEnd && "didn't find CALLSEQ_END!"); 1037 1038 // Recursively Legalize all of the inputs of the call end that do not lead 1039 // to this call start. This ensures that any libcalls that need be inserted 1040 // are inserted *before* the CALLSEQ_START. 1041 {SmallPtrSet<SDNode*, 32> NodesLeadingTo; 1042 for (unsigned i = 0, e = CallEnd->getNumOperands(); i != e; ++i) 1043 LegalizeAllNodesNotLeadingTo(CallEnd->getOperand(i).getNode(), Node, 1044 NodesLeadingTo); 1045 } 1046 1047 // Now that we have legalized all of the inputs (which may have inserted 1048 // libcalls), create the new CALLSEQ_START node. 1049 Tmp1 = LegalizeOp(Node->getOperand(0)); // Legalize the chain. 1050 1051 // Merge in the last call to ensure that this call starts after the last 1052 // call ended. 1053 if (getLastCALLSEQ().getOpcode() != ISD::EntryToken) { 1054 Tmp1 = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 1055 Tmp1, getLastCALLSEQ()); 1056 Tmp1 = LegalizeOp(Tmp1); 1057 } 1058 1059 // Do not try to legalize the target-specific arguments (#1+). 1060 if (Tmp1 != Node->getOperand(0)) { 1061 SmallVector<SDValue, 8> Ops(Node->op_begin(), Node->op_end()); 1062 Ops[0] = Tmp1; 1063 Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(), &Ops[0], 1064 Ops.size()), Result.getResNo()); 1065 } 1066 1067 // Remember that the CALLSEQ_START is legalized. 1068 AddLegalizedOperand(Op.getValue(0), Result); 1069 if (Node->getNumValues() == 2) // If this has a flag result, remember it. 1070 AddLegalizedOperand(Op.getValue(1), Result.getValue(1)); 1071 1072 // Now that the callseq_start and all of the non-call nodes above this call 1073 // sequence have been legalized, legalize the call itself. During this 1074 // process, no libcalls can/will be inserted, guaranteeing that no calls 1075 // can overlap. 1076 // Note that we are selecting this call! 1077 setLastCALLSEQ(SDValue(CallEnd, 0)); 1078 1079 // Legalize the call, starting from the CALLSEQ_END. 1080 LegalizeOp(getLastCALLSEQ()); 1081 return Result; 1082 } 1083 case ISD::CALLSEQ_END: 1084 { 1085 SDNode *myCALLSEQ_BEGIN = FindCallStartFromCallEnd(Node); 1086 1087 // If the CALLSEQ_START node hasn't been legalized first, legalize it. 1088 // This will cause this node to be legalized as well as handling libcalls 1089 // right. 1090 if (getLastCALLSEQ().getNode() != Node) { 1091 LegalizeOp(SDValue(myCALLSEQ_BEGIN, 0)); 1092 DenseMap<SDValue, SDValue>::iterator I = LegalizedNodes.find(Op); 1093 assert(I != LegalizedNodes.end() && 1094 "Legalizing the call start should have legalized this node!"); 1095 return I->second; 1096 } 1097 1098 pushLastCALLSEQ(SDValue(myCALLSEQ_BEGIN, 0)); 1099 } 1100 1101 // Otherwise, the call start has been legalized and everything is going 1102 // according to plan. Just legalize ourselves normally here. 1103 Tmp1 = LegalizeOp(Node->getOperand(0)); // Legalize the chain. 1104 // Do not try to legalize the target-specific arguments (#1+), except for 1105 // an optional flag input. 1106 if (Node->getOperand(Node->getNumOperands()-1).getValueType() != MVT::Glue){ 1107 if (Tmp1 != Node->getOperand(0)) { 1108 SmallVector<SDValue, 8> Ops(Node->op_begin(), Node->op_end()); 1109 Ops[0] = Tmp1; 1110 Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(), 1111 &Ops[0], Ops.size()), 1112 Result.getResNo()); 1113 } 1114 } else { 1115 Tmp2 = LegalizeOp(Node->getOperand(Node->getNumOperands()-1)); 1116 if (Tmp1 != Node->getOperand(0) || 1117 Tmp2 != Node->getOperand(Node->getNumOperands()-1)) { 1118 SmallVector<SDValue, 8> Ops(Node->op_begin(), Node->op_end()); 1119 Ops[0] = Tmp1; 1120 Ops.back() = Tmp2; 1121 Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(), 1122 &Ops[0], Ops.size()), 1123 Result.getResNo()); 1124 } 1125 } 1126 // This finishes up call legalization. 1127 popLastCALLSEQ(); 1128 1129 // If the CALLSEQ_END node has a flag, remember that we legalized it. 1130 AddLegalizedOperand(SDValue(Node, 0), Result.getValue(0)); 1131 if (Node->getNumValues() == 2) 1132 AddLegalizedOperand(SDValue(Node, 1), Result.getValue(1)); 1133 return Result.getValue(Op.getResNo()); 1134 case ISD::LOAD: { 1135 LoadSDNode *LD = cast<LoadSDNode>(Node); 1136 Tmp1 = LegalizeOp(LD->getChain()); // Legalize the chain. 1137 Tmp2 = LegalizeOp(LD->getBasePtr()); // Legalize the base pointer. 1138 1139 ISD::LoadExtType ExtType = LD->getExtensionType(); 1140 if (ExtType == ISD::NON_EXTLOAD) { 1141 EVT VT = Node->getValueType(0); 1142 Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(), 1143 Tmp1, Tmp2, LD->getOffset()), 1144 Result.getResNo()); 1145 Tmp3 = Result.getValue(0); 1146 Tmp4 = Result.getValue(1); 1147 1148 switch (TLI.getOperationAction(Node->getOpcode(), VT)) { 1149 default: assert(0 && "This action is not supported yet!"); 1150 case TargetLowering::Legal: 1151 // If this is an unaligned load and the target doesn't support it, 1152 // expand it. 1153 if (!TLI.allowsUnalignedMemoryAccesses(LD->getMemoryVT())) { 1154 const Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext()); 1155 unsigned ABIAlignment = TLI.getTargetData()->getABITypeAlignment(Ty); 1156 if (LD->getAlignment() < ABIAlignment){ 1157 Result = ExpandUnalignedLoad(cast<LoadSDNode>(Result.getNode()), 1158 DAG, TLI); 1159 Tmp3 = Result.getOperand(0); 1160 Tmp4 = Result.getOperand(1); 1161 Tmp3 = LegalizeOp(Tmp3); 1162 Tmp4 = LegalizeOp(Tmp4); 1163 } 1164 } 1165 break; 1166 case TargetLowering::Custom: 1167 Tmp1 = TLI.LowerOperation(Tmp3, DAG); 1168 if (Tmp1.getNode()) { 1169 Tmp3 = LegalizeOp(Tmp1); 1170 Tmp4 = LegalizeOp(Tmp1.getValue(1)); 1171 } 1172 break; 1173 case TargetLowering::Promote: { 1174 // Only promote a load of vector type to another. 1175 assert(VT.isVector() && "Cannot promote this load!"); 1176 // Change base type to a different vector type. 1177 EVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), VT); 1178 1179 Tmp1 = DAG.getLoad(NVT, dl, Tmp1, Tmp2, LD->getPointerInfo(), 1180 LD->isVolatile(), LD->isNonTemporal(), 1181 LD->getAlignment()); 1182 Tmp3 = LegalizeOp(DAG.getNode(ISD::BITCAST, dl, VT, Tmp1)); 1183 Tmp4 = LegalizeOp(Tmp1.getValue(1)); 1184 break; 1185 } 1186 } 1187 // Since loads produce two values, make sure to remember that we 1188 // legalized both of them. 1189 AddLegalizedOperand(SDValue(Node, 0), Tmp3); 1190 AddLegalizedOperand(SDValue(Node, 1), Tmp4); 1191 return Op.getResNo() ? Tmp4 : Tmp3; 1192 } 1193 1194 EVT SrcVT = LD->getMemoryVT(); 1195 unsigned SrcWidth = SrcVT.getSizeInBits(); 1196 unsigned Alignment = LD->getAlignment(); 1197 bool isVolatile = LD->isVolatile(); 1198 bool isNonTemporal = LD->isNonTemporal(); 1199 1200 if (SrcWidth != SrcVT.getStoreSizeInBits() && 1201 // Some targets pretend to have an i1 loading operation, and actually 1202 // load an i8. This trick is correct for ZEXTLOAD because the top 7 1203 // bits are guaranteed to be zero; it helps the optimizers understand 1204 // that these bits are zero. It is also useful for EXTLOAD, since it 1205 // tells the optimizers that those bits are undefined. It would be 1206 // nice to have an effective generic way of getting these benefits... 1207 // Until such a way is found, don't insist on promoting i1 here. 1208 (SrcVT != MVT::i1 || 1209 TLI.getLoadExtAction(ExtType, MVT::i1) == TargetLowering::Promote)) { 1210 // Promote to a byte-sized load if not loading an integral number of 1211 // bytes. For example, promote EXTLOAD:i20 -> EXTLOAD:i24. 1212 unsigned NewWidth = SrcVT.getStoreSizeInBits(); 1213 EVT NVT = EVT::getIntegerVT(*DAG.getContext(), NewWidth); 1214 SDValue Ch; 1215 1216 // The extra bits are guaranteed to be zero, since we stored them that 1217 // way. A zext load from NVT thus automatically gives zext from SrcVT. 1218 1219 ISD::LoadExtType NewExtType = 1220 ExtType == ISD::ZEXTLOAD ? ISD::ZEXTLOAD : ISD::EXTLOAD; 1221 1222 Result = DAG.getExtLoad(NewExtType, dl, Node->getValueType(0), 1223 Tmp1, Tmp2, LD->getPointerInfo(), 1224 NVT, isVolatile, isNonTemporal, Alignment); 1225 1226 Ch = Result.getValue(1); // The chain. 1227 1228 if (ExtType == ISD::SEXTLOAD) 1229 // Having the top bits zero doesn't help when sign extending. 1230 Result = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, 1231 Result.getValueType(), 1232 Result, DAG.getValueType(SrcVT)); 1233 else if (ExtType == ISD::ZEXTLOAD || NVT == Result.getValueType()) 1234 // All the top bits are guaranteed to be zero - inform the optimizers. 1235 Result = DAG.getNode(ISD::AssertZext, dl, 1236 Result.getValueType(), Result, 1237 DAG.getValueType(SrcVT)); 1238 1239 Tmp1 = LegalizeOp(Result); 1240 Tmp2 = LegalizeOp(Ch); 1241 } else if (SrcWidth & (SrcWidth - 1)) { 1242 // If not loading a power-of-2 number of bits, expand as two loads. 1243 assert(!SrcVT.isVector() && "Unsupported extload!"); 1244 unsigned RoundWidth = 1 << Log2_32(SrcWidth); 1245 assert(RoundWidth < SrcWidth); 1246 unsigned ExtraWidth = SrcWidth - RoundWidth; 1247 assert(ExtraWidth < RoundWidth); 1248 assert(!(RoundWidth % 8) && !(ExtraWidth % 8) && 1249 "Load size not an integral number of bytes!"); 1250 EVT RoundVT = EVT::getIntegerVT(*DAG.getContext(), RoundWidth); 1251 EVT ExtraVT = EVT::getIntegerVT(*DAG.getContext(), ExtraWidth); 1252 SDValue Lo, Hi, Ch; 1253 unsigned IncrementSize; 1254 1255 if (TLI.isLittleEndian()) { 1256 // EXTLOAD:i24 -> ZEXTLOAD:i16 | (shl EXTLOAD@+2:i8, 16) 1257 // Load the bottom RoundWidth bits. 1258 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, Node->getValueType(0), 1259 Tmp1, Tmp2, 1260 LD->getPointerInfo(), RoundVT, isVolatile, 1261 isNonTemporal, Alignment); 1262 1263 // Load the remaining ExtraWidth bits. 1264 IncrementSize = RoundWidth / 8; 1265 Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2, 1266 DAG.getIntPtrConstant(IncrementSize)); 1267 Hi = DAG.getExtLoad(ExtType, dl, Node->getValueType(0), Tmp1, Tmp2, 1268 LD->getPointerInfo().getWithOffset(IncrementSize), 1269 ExtraVT, isVolatile, isNonTemporal, 1270 MinAlign(Alignment, IncrementSize)); 1271 1272 // Build a factor node to remember that this load is independent of 1273 // the other one. 1274 Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1), 1275 Hi.getValue(1)); 1276 1277 // Move the top bits to the right place. 1278 Hi = DAG.getNode(ISD::SHL, dl, Hi.getValueType(), Hi, 1279 DAG.getConstant(RoundWidth, 1280 TLI.getShiftAmountTy(Hi.getValueType()))); 1281 1282 // Join the hi and lo parts. 1283 Result = DAG.getNode(ISD::OR, dl, Node->getValueType(0), Lo, Hi); 1284 } else { 1285 // Big endian - avoid unaligned loads. 1286 // EXTLOAD:i24 -> (shl EXTLOAD:i16, 8) | ZEXTLOAD@+2:i8 1287 // Load the top RoundWidth bits. 1288 Hi = DAG.getExtLoad(ExtType, dl, Node->getValueType(0), Tmp1, Tmp2, 1289 LD->getPointerInfo(), RoundVT, isVolatile, 1290 isNonTemporal, Alignment); 1291 1292 // Load the remaining ExtraWidth bits. 1293 IncrementSize = RoundWidth / 8; 1294 Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2, 1295 DAG.getIntPtrConstant(IncrementSize)); 1296 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, 1297 dl, Node->getValueType(0), Tmp1, Tmp2, 1298 LD->getPointerInfo().getWithOffset(IncrementSize), 1299 ExtraVT, isVolatile, isNonTemporal, 1300 MinAlign(Alignment, IncrementSize)); 1301 1302 // Build a factor node to remember that this load is independent of 1303 // the other one. 1304 Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1), 1305 Hi.getValue(1)); 1306 1307 // Move the top bits to the right place. 1308 Hi = DAG.getNode(ISD::SHL, dl, Hi.getValueType(), Hi, 1309 DAG.getConstant(ExtraWidth, 1310 TLI.getShiftAmountTy(Hi.getValueType()))); 1311 1312 // Join the hi and lo parts. 1313 Result = DAG.getNode(ISD::OR, dl, Node->getValueType(0), Lo, Hi); 1314 } 1315 1316 Tmp1 = LegalizeOp(Result); 1317 Tmp2 = LegalizeOp(Ch); 1318 } else { 1319 switch (TLI.getLoadExtAction(ExtType, SrcVT)) { 1320 default: assert(0 && "This action is not supported yet!"); 1321 case TargetLowering::Custom: 1322 isCustom = true; 1323 // FALLTHROUGH 1324 case TargetLowering::Legal: 1325 Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(), 1326 Tmp1, Tmp2, LD->getOffset()), 1327 Result.getResNo()); 1328 Tmp1 = Result.getValue(0); 1329 Tmp2 = Result.getValue(1); 1330 1331 if (isCustom) { 1332 Tmp3 = TLI.LowerOperation(Result, DAG); 1333 if (Tmp3.getNode()) { 1334 Tmp1 = LegalizeOp(Tmp3); 1335 Tmp2 = LegalizeOp(Tmp3.getValue(1)); 1336 } 1337 } else { 1338 // If this is an unaligned load and the target doesn't support it, 1339 // expand it. 1340 if (!TLI.allowsUnalignedMemoryAccesses(LD->getMemoryVT())) { 1341 const Type *Ty = 1342 LD->getMemoryVT().getTypeForEVT(*DAG.getContext()); 1343 unsigned ABIAlignment = 1344 TLI.getTargetData()->getABITypeAlignment(Ty); 1345 if (LD->getAlignment() < ABIAlignment){ 1346 Result = ExpandUnalignedLoad(cast<LoadSDNode>(Result.getNode()), 1347 DAG, TLI); 1348 Tmp1 = Result.getOperand(0); 1349 Tmp2 = Result.getOperand(1); 1350 Tmp1 = LegalizeOp(Tmp1); 1351 Tmp2 = LegalizeOp(Tmp2); 1352 } 1353 } 1354 } 1355 break; 1356 case TargetLowering::Expand: 1357 if (!TLI.isLoadExtLegal(ISD::EXTLOAD, SrcVT) && isTypeLegal(SrcVT)) { 1358 SDValue Load = DAG.getLoad(SrcVT, dl, Tmp1, Tmp2, 1359 LD->getPointerInfo(), 1360 LD->isVolatile(), LD->isNonTemporal(), 1361 LD->getAlignment()); 1362 unsigned ExtendOp; 1363 switch (ExtType) { 1364 case ISD::EXTLOAD: 1365 ExtendOp = (SrcVT.isFloatingPoint() ? 1366 ISD::FP_EXTEND : ISD::ANY_EXTEND); 1367 break; 1368 case ISD::SEXTLOAD: ExtendOp = ISD::SIGN_EXTEND; break; 1369 case ISD::ZEXTLOAD: ExtendOp = ISD::ZERO_EXTEND; break; 1370 default: llvm_unreachable("Unexpected extend load type!"); 1371 } 1372 Result = DAG.getNode(ExtendOp, dl, Node->getValueType(0), Load); 1373 Tmp1 = LegalizeOp(Result); // Relegalize new nodes. 1374 Tmp2 = LegalizeOp(Load.getValue(1)); 1375 break; 1376 } 1377 // FIXME: This does not work for vectors on most targets. Sign- and 1378 // zero-extend operations are currently folded into extending loads, 1379 // whether they are legal or not, and then we end up here without any 1380 // support for legalizing them. 1381 assert(ExtType != ISD::EXTLOAD && 1382 "EXTLOAD should always be supported!"); 1383 // Turn the unsupported load into an EXTLOAD followed by an explicit 1384 // zero/sign extend inreg. 1385 Result = DAG.getExtLoad(ISD::EXTLOAD, dl, Node->getValueType(0), 1386 Tmp1, Tmp2, LD->getPointerInfo(), SrcVT, 1387 LD->isVolatile(), LD->isNonTemporal(), 1388 LD->getAlignment()); 1389 SDValue ValRes; 1390 if (ExtType == ISD::SEXTLOAD) 1391 ValRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, 1392 Result.getValueType(), 1393 Result, DAG.getValueType(SrcVT)); 1394 else 1395 ValRes = DAG.getZeroExtendInReg(Result, dl, SrcVT.getScalarType()); 1396 Tmp1 = LegalizeOp(ValRes); // Relegalize new nodes. 1397 Tmp2 = LegalizeOp(Result.getValue(1)); // Relegalize new nodes. 1398 break; 1399 } 1400 } 1401 1402 // Since loads produce two values, make sure to remember that we legalized 1403 // both of them. 1404 AddLegalizedOperand(SDValue(Node, 0), Tmp1); 1405 AddLegalizedOperand(SDValue(Node, 1), Tmp2); 1406 return Op.getResNo() ? Tmp2 : Tmp1; 1407 } 1408 case ISD::STORE: { 1409 StoreSDNode *ST = cast<StoreSDNode>(Node); 1410 Tmp1 = LegalizeOp(ST->getChain()); // Legalize the chain. 1411 Tmp2 = LegalizeOp(ST->getBasePtr()); // Legalize the pointer. 1412 unsigned Alignment = ST->getAlignment(); 1413 bool isVolatile = ST->isVolatile(); 1414 bool isNonTemporal = ST->isNonTemporal(); 1415 1416 if (!ST->isTruncatingStore()) { 1417 if (SDNode *OptStore = OptimizeFloatStore(ST).getNode()) { 1418 Result = SDValue(OptStore, 0); 1419 break; 1420 } 1421 1422 { 1423 Tmp3 = LegalizeOp(ST->getValue()); 1424 Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(), 1425 Tmp1, Tmp3, Tmp2, 1426 ST->getOffset()), 1427 Result.getResNo()); 1428 1429 EVT VT = Tmp3.getValueType(); 1430 switch (TLI.getOperationAction(ISD::STORE, VT)) { 1431 default: assert(0 && "This action is not supported yet!"); 1432 case TargetLowering::Legal: 1433 // If this is an unaligned store and the target doesn't support it, 1434 // expand it. 1435 if (!TLI.allowsUnalignedMemoryAccesses(ST->getMemoryVT())) { 1436 const Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext()); 1437 unsigned ABIAlignment= TLI.getTargetData()->getABITypeAlignment(Ty); 1438 if (ST->getAlignment() < ABIAlignment) 1439 Result = ExpandUnalignedStore(cast<StoreSDNode>(Result.getNode()), 1440 DAG, TLI); 1441 } 1442 break; 1443 case TargetLowering::Custom: 1444 Tmp1 = TLI.LowerOperation(Result, DAG); 1445 if (Tmp1.getNode()) Result = Tmp1; 1446 break; 1447 case TargetLowering::Promote: 1448 assert(VT.isVector() && "Unknown legal promote case!"); 1449 Tmp3 = DAG.getNode(ISD::BITCAST, dl, 1450 TLI.getTypeToPromoteTo(ISD::STORE, VT), Tmp3); 1451 Result = DAG.getStore(Tmp1, dl, Tmp3, Tmp2, 1452 ST->getPointerInfo(), isVolatile, 1453 isNonTemporal, Alignment); 1454 break; 1455 } 1456 break; 1457 } 1458 } else { 1459 Tmp3 = LegalizeOp(ST->getValue()); 1460 1461 EVT StVT = ST->getMemoryVT(); 1462 unsigned StWidth = StVT.getSizeInBits(); 1463 1464 if (StWidth != StVT.getStoreSizeInBits()) { 1465 // Promote to a byte-sized store with upper bits zero if not 1466 // storing an integral number of bytes. For example, promote 1467 // TRUNCSTORE:i1 X -> TRUNCSTORE:i8 (and X, 1) 1468 EVT NVT = EVT::getIntegerVT(*DAG.getContext(), 1469 StVT.getStoreSizeInBits()); 1470 Tmp3 = DAG.getZeroExtendInReg(Tmp3, dl, StVT); 1471 Result = DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(), 1472 NVT, isVolatile, isNonTemporal, Alignment); 1473 } else if (StWidth & (StWidth - 1)) { 1474 // If not storing a power-of-2 number of bits, expand as two stores. 1475 assert(!StVT.isVector() && "Unsupported truncstore!"); 1476 unsigned RoundWidth = 1 << Log2_32(StWidth); 1477 assert(RoundWidth < StWidth); 1478 unsigned ExtraWidth = StWidth - RoundWidth; 1479 assert(ExtraWidth < RoundWidth); 1480 assert(!(RoundWidth % 8) && !(ExtraWidth % 8) && 1481 "Store size not an integral number of bytes!"); 1482 EVT RoundVT = EVT::getIntegerVT(*DAG.getContext(), RoundWidth); 1483 EVT ExtraVT = EVT::getIntegerVT(*DAG.getContext(), ExtraWidth); 1484 SDValue Lo, Hi; 1485 unsigned IncrementSize; 1486 1487 if (TLI.isLittleEndian()) { 1488 // TRUNCSTORE:i24 X -> TRUNCSTORE:i16 X, TRUNCSTORE@+2:i8 (srl X, 16) 1489 // Store the bottom RoundWidth bits. 1490 Lo = DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(), 1491 RoundVT, 1492 isVolatile, isNonTemporal, Alignment); 1493 1494 // Store the remaining ExtraWidth bits. 1495 IncrementSize = RoundWidth / 8; 1496 Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2, 1497 DAG.getIntPtrConstant(IncrementSize)); 1498 Hi = DAG.getNode(ISD::SRL, dl, Tmp3.getValueType(), Tmp3, 1499 DAG.getConstant(RoundWidth, 1500 TLI.getShiftAmountTy(Tmp3.getValueType()))); 1501 Hi = DAG.getTruncStore(Tmp1, dl, Hi, Tmp2, 1502 ST->getPointerInfo().getWithOffset(IncrementSize), 1503 ExtraVT, isVolatile, isNonTemporal, 1504 MinAlign(Alignment, IncrementSize)); 1505 } else { 1506 // Big endian - avoid unaligned stores. 1507 // TRUNCSTORE:i24 X -> TRUNCSTORE:i16 (srl X, 8), TRUNCSTORE@+2:i8 X 1508 // Store the top RoundWidth bits. 1509 Hi = DAG.getNode(ISD::SRL, dl, Tmp3.getValueType(), Tmp3, 1510 DAG.getConstant(ExtraWidth, 1511 TLI.getShiftAmountTy(Tmp3.getValueType()))); 1512 Hi = DAG.getTruncStore(Tmp1, dl, Hi, Tmp2, ST->getPointerInfo(), 1513 RoundVT, isVolatile, isNonTemporal, Alignment); 1514 1515 // Store the remaining ExtraWidth bits. 1516 IncrementSize = RoundWidth / 8; 1517 Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2, 1518 DAG.getIntPtrConstant(IncrementSize)); 1519 Lo = DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2, 1520 ST->getPointerInfo().getWithOffset(IncrementSize), 1521 ExtraVT, isVolatile, isNonTemporal, 1522 MinAlign(Alignment, IncrementSize)); 1523 } 1524 1525 // The order of the stores doesn't matter. 1526 Result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo, Hi); 1527 } else { 1528 if (Tmp1 != ST->getChain() || Tmp3 != ST->getValue() || 1529 Tmp2 != ST->getBasePtr()) 1530 Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(), 1531 Tmp1, Tmp3, Tmp2, 1532 ST->getOffset()), 1533 Result.getResNo()); 1534 1535 switch (TLI.getTruncStoreAction(ST->getValue().getValueType(), StVT)) { 1536 default: assert(0 && "This action is not supported yet!"); 1537 case TargetLowering::Legal: 1538 // If this is an unaligned store and the target doesn't support it, 1539 // expand it. 1540 if (!TLI.allowsUnalignedMemoryAccesses(ST->getMemoryVT())) { 1541 const Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext()); 1542 unsigned ABIAlignment= TLI.getTargetData()->getABITypeAlignment(Ty); 1543 if (ST->getAlignment() < ABIAlignment) 1544 Result = ExpandUnalignedStore(cast<StoreSDNode>(Result.getNode()), 1545 DAG, TLI); 1546 } 1547 break; 1548 case TargetLowering::Custom: 1549 Result = TLI.LowerOperation(Result, DAG); 1550 break; 1551 case Expand: 1552 // TRUNCSTORE:i16 i32 -> STORE i16 1553 assert(isTypeLegal(StVT) && "Do not know how to expand this store!"); 1554 Tmp3 = DAG.getNode(ISD::TRUNCATE, dl, StVT, Tmp3); 1555 Result = DAG.getStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(), 1556 isVolatile, isNonTemporal, Alignment); 1557 break; 1558 } 1559 } 1560 } 1561 break; 1562 } 1563 } 1564 assert(Result.getValueType() == Op.getValueType() && 1565 "Bad legalization!"); 1566 1567 // Make sure that the generated code is itself legal. 1568 if (Result != Op) 1569 Result = LegalizeOp(Result); 1570 1571 // Note that LegalizeOp may be reentered even from single-use nodes, which 1572 // means that we always must cache transformed nodes. 1573 AddLegalizedOperand(Op, Result); 1574 return Result; 1575} 1576 1577SDValue SelectionDAGLegalize::ExpandExtractFromVectorThroughStack(SDValue Op) { 1578 SDValue Vec = Op.getOperand(0); 1579 SDValue Idx = Op.getOperand(1); 1580 DebugLoc dl = Op.getDebugLoc(); 1581 // Store the value to a temporary stack slot, then LOAD the returned part. 1582 SDValue StackPtr = DAG.CreateStackTemporary(Vec.getValueType()); 1583 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, 1584 MachinePointerInfo(), false, false, 0); 1585 1586 // Add the offset to the index. 1587 unsigned EltSize = 1588 Vec.getValueType().getVectorElementType().getSizeInBits()/8; 1589 Idx = DAG.getNode(ISD::MUL, dl, Idx.getValueType(), Idx, 1590 DAG.getConstant(EltSize, Idx.getValueType())); 1591 1592 if (Idx.getValueType().bitsGT(TLI.getPointerTy())) 1593 Idx = DAG.getNode(ISD::TRUNCATE, dl, TLI.getPointerTy(), Idx); 1594 else 1595 Idx = DAG.getNode(ISD::ZERO_EXTEND, dl, TLI.getPointerTy(), Idx); 1596 1597 StackPtr = DAG.getNode(ISD::ADD, dl, Idx.getValueType(), Idx, StackPtr); 1598 1599 if (Op.getValueType().isVector()) 1600 return DAG.getLoad(Op.getValueType(), dl, Ch, StackPtr,MachinePointerInfo(), 1601 false, false, 0); 1602 return DAG.getExtLoad(ISD::EXTLOAD, dl, Op.getValueType(), Ch, StackPtr, 1603 MachinePointerInfo(), 1604 Vec.getValueType().getVectorElementType(), 1605 false, false, 0); 1606} 1607 1608SDValue SelectionDAGLegalize::ExpandInsertToVectorThroughStack(SDValue Op) { 1609 assert(Op.getValueType().isVector() && "Non-vector insert subvector!"); 1610 1611 SDValue Vec = Op.getOperand(0); 1612 SDValue Part = Op.getOperand(1); 1613 SDValue Idx = Op.getOperand(2); 1614 DebugLoc dl = Op.getDebugLoc(); 1615 1616 // Store the value to a temporary stack slot, then LOAD the returned part. 1617 1618 SDValue StackPtr = DAG.CreateStackTemporary(Vec.getValueType()); 1619 int FI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex(); 1620 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(FI); 1621 1622 // First store the whole vector. 1623 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, PtrInfo, 1624 false, false, 0); 1625 1626 // Then store the inserted part. 1627 1628 // Add the offset to the index. 1629 unsigned EltSize = 1630 Vec.getValueType().getVectorElementType().getSizeInBits()/8; 1631 1632 Idx = DAG.getNode(ISD::MUL, dl, Idx.getValueType(), Idx, 1633 DAG.getConstant(EltSize, Idx.getValueType())); 1634 1635 if (Idx.getValueType().bitsGT(TLI.getPointerTy())) 1636 Idx = DAG.getNode(ISD::TRUNCATE, dl, TLI.getPointerTy(), Idx); 1637 else 1638 Idx = DAG.getNode(ISD::ZERO_EXTEND, dl, TLI.getPointerTy(), Idx); 1639 1640 SDValue SubStackPtr = DAG.getNode(ISD::ADD, dl, Idx.getValueType(), Idx, 1641 StackPtr); 1642 1643 // Store the subvector. 1644 Ch = DAG.getStore(DAG.getEntryNode(), dl, Part, SubStackPtr, 1645 MachinePointerInfo(), false, false, 0); 1646 1647 // Finally, load the updated vector. 1648 return DAG.getLoad(Op.getValueType(), dl, Ch, StackPtr, PtrInfo, 1649 false, false, 0); 1650} 1651 1652SDValue SelectionDAGLegalize::ExpandVectorBuildThroughStack(SDNode* Node) { 1653 // We can't handle this case efficiently. Allocate a sufficiently 1654 // aligned object on the stack, store each element into it, then load 1655 // the result as a vector. 1656 // Create the stack frame object. 1657 EVT VT = Node->getValueType(0); 1658 EVT EltVT = VT.getVectorElementType(); 1659 DebugLoc dl = Node->getDebugLoc(); 1660 SDValue FIPtr = DAG.CreateStackTemporary(VT); 1661 int FI = cast<FrameIndexSDNode>(FIPtr.getNode())->getIndex(); 1662 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(FI); 1663 1664 // Emit a store of each element to the stack slot. 1665 SmallVector<SDValue, 8> Stores; 1666 unsigned TypeByteSize = EltVT.getSizeInBits() / 8; 1667 // Store (in the right endianness) the elements to memory. 1668 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) { 1669 // Ignore undef elements. 1670 if (Node->getOperand(i).getOpcode() == ISD::UNDEF) continue; 1671 1672 unsigned Offset = TypeByteSize*i; 1673 1674 SDValue Idx = DAG.getConstant(Offset, FIPtr.getValueType()); 1675 Idx = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr, Idx); 1676 1677 // If the destination vector element type is narrower than the source 1678 // element type, only store the bits necessary. 1679 if (EltVT.bitsLT(Node->getOperand(i).getValueType().getScalarType())) { 1680 Stores.push_back(DAG.getTruncStore(DAG.getEntryNode(), dl, 1681 Node->getOperand(i), Idx, 1682 PtrInfo.getWithOffset(Offset), 1683 EltVT, false, false, 0)); 1684 } else 1685 Stores.push_back(DAG.getStore(DAG.getEntryNode(), dl, 1686 Node->getOperand(i), Idx, 1687 PtrInfo.getWithOffset(Offset), 1688 false, false, 0)); 1689 } 1690 1691 SDValue StoreChain; 1692 if (!Stores.empty()) // Not all undef elements? 1693 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 1694 &Stores[0], Stores.size()); 1695 else 1696 StoreChain = DAG.getEntryNode(); 1697 1698 // Result is a load from the stack slot. 1699 return DAG.getLoad(VT, dl, StoreChain, FIPtr, PtrInfo, false, false, 0); 1700} 1701 1702SDValue SelectionDAGLegalize::ExpandFCOPYSIGN(SDNode* Node) { 1703 DebugLoc dl = Node->getDebugLoc(); 1704 SDValue Tmp1 = Node->getOperand(0); 1705 SDValue Tmp2 = Node->getOperand(1); 1706 1707 // Get the sign bit of the RHS. First obtain a value that has the same 1708 // sign as the sign bit, i.e. negative if and only if the sign bit is 1. 1709 SDValue SignBit; 1710 EVT FloatVT = Tmp2.getValueType(); 1711 EVT IVT = EVT::getIntegerVT(*DAG.getContext(), FloatVT.getSizeInBits()); 1712 if (isTypeLegal(IVT)) { 1713 // Convert to an integer with the same sign bit. 1714 SignBit = DAG.getNode(ISD::BITCAST, dl, IVT, Tmp2); 1715 } else { 1716 // Store the float to memory, then load the sign part out as an integer. 1717 MVT LoadTy = TLI.getPointerTy(); 1718 // First create a temporary that is aligned for both the load and store. 1719 SDValue StackPtr = DAG.CreateStackTemporary(FloatVT, LoadTy); 1720 // Then store the float to it. 1721 SDValue Ch = 1722 DAG.getStore(DAG.getEntryNode(), dl, Tmp2, StackPtr, MachinePointerInfo(), 1723 false, false, 0); 1724 if (TLI.isBigEndian()) { 1725 assert(FloatVT.isByteSized() && "Unsupported floating point type!"); 1726 // Load out a legal integer with the same sign bit as the float. 1727 SignBit = DAG.getLoad(LoadTy, dl, Ch, StackPtr, MachinePointerInfo(), 1728 false, false, 0); 1729 } else { // Little endian 1730 SDValue LoadPtr = StackPtr; 1731 // The float may be wider than the integer we are going to load. Advance 1732 // the pointer so that the loaded integer will contain the sign bit. 1733 unsigned Strides = (FloatVT.getSizeInBits()-1)/LoadTy.getSizeInBits(); 1734 unsigned ByteOffset = (Strides * LoadTy.getSizeInBits()) / 8; 1735 LoadPtr = DAG.getNode(ISD::ADD, dl, LoadPtr.getValueType(), 1736 LoadPtr, DAG.getIntPtrConstant(ByteOffset)); 1737 // Load a legal integer containing the sign bit. 1738 SignBit = DAG.getLoad(LoadTy, dl, Ch, LoadPtr, MachinePointerInfo(), 1739 false, false, 0); 1740 // Move the sign bit to the top bit of the loaded integer. 1741 unsigned BitShift = LoadTy.getSizeInBits() - 1742 (FloatVT.getSizeInBits() - 8 * ByteOffset); 1743 assert(BitShift < LoadTy.getSizeInBits() && "Pointer advanced wrong?"); 1744 if (BitShift) 1745 SignBit = DAG.getNode(ISD::SHL, dl, LoadTy, SignBit, 1746 DAG.getConstant(BitShift, 1747 TLI.getShiftAmountTy(SignBit.getValueType()))); 1748 } 1749 } 1750 // Now get the sign bit proper, by seeing whether the value is negative. 1751 SignBit = DAG.getSetCC(dl, TLI.getSetCCResultType(SignBit.getValueType()), 1752 SignBit, DAG.getConstant(0, SignBit.getValueType()), 1753 ISD::SETLT); 1754 // Get the absolute value of the result. 1755 SDValue AbsVal = DAG.getNode(ISD::FABS, dl, Tmp1.getValueType(), Tmp1); 1756 // Select between the nabs and abs value based on the sign bit of 1757 // the input. 1758 return DAG.getNode(ISD::SELECT, dl, AbsVal.getValueType(), SignBit, 1759 DAG.getNode(ISD::FNEG, dl, AbsVal.getValueType(), AbsVal), 1760 AbsVal); 1761} 1762 1763void SelectionDAGLegalize::ExpandDYNAMIC_STACKALLOC(SDNode* Node, 1764 SmallVectorImpl<SDValue> &Results) { 1765 unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore(); 1766 assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and" 1767 " not tell us which reg is the stack pointer!"); 1768 DebugLoc dl = Node->getDebugLoc(); 1769 EVT VT = Node->getValueType(0); 1770 SDValue Tmp1 = SDValue(Node, 0); 1771 SDValue Tmp2 = SDValue(Node, 1); 1772 SDValue Tmp3 = Node->getOperand(2); 1773 SDValue Chain = Tmp1.getOperand(0); 1774 1775 // Chain the dynamic stack allocation so that it doesn't modify the stack 1776 // pointer when other instructions are using the stack. 1777 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(0, true)); 1778 1779 SDValue Size = Tmp2.getOperand(1); 1780 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT); 1781 Chain = SP.getValue(1); 1782 unsigned Align = cast<ConstantSDNode>(Tmp3)->getZExtValue(); 1783 unsigned StackAlign = TM.getFrameLowering()->getStackAlignment(); 1784 if (Align > StackAlign) 1785 SP = DAG.getNode(ISD::AND, dl, VT, SP, 1786 DAG.getConstant(-(uint64_t)Align, VT)); 1787 Tmp1 = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value 1788 Chain = DAG.getCopyToReg(Chain, dl, SPReg, Tmp1); // Output chain 1789 1790 Tmp2 = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, true), 1791 DAG.getIntPtrConstant(0, true), SDValue()); 1792 1793 Results.push_back(Tmp1); 1794 Results.push_back(Tmp2); 1795} 1796 1797/// LegalizeSetCCCondCode - Legalize a SETCC with given LHS and RHS and 1798/// condition code CC on the current target. This routine expands SETCC with 1799/// illegal condition code into AND / OR of multiple SETCC values. 1800void SelectionDAGLegalize::LegalizeSetCCCondCode(EVT VT, 1801 SDValue &LHS, SDValue &RHS, 1802 SDValue &CC, 1803 DebugLoc dl) { 1804 EVT OpVT = LHS.getValueType(); 1805 ISD::CondCode CCCode = cast<CondCodeSDNode>(CC)->get(); 1806 switch (TLI.getCondCodeAction(CCCode, OpVT)) { 1807 default: assert(0 && "Unknown condition code action!"); 1808 case TargetLowering::Legal: 1809 // Nothing to do. 1810 break; 1811 case TargetLowering::Expand: { 1812 ISD::CondCode CC1 = ISD::SETCC_INVALID, CC2 = ISD::SETCC_INVALID; 1813 unsigned Opc = 0; 1814 switch (CCCode) { 1815 default: assert(0 && "Don't know how to expand this condition!"); 1816 case ISD::SETOEQ: CC1 = ISD::SETEQ; CC2 = ISD::SETO; Opc = ISD::AND; break; 1817 case ISD::SETOGT: CC1 = ISD::SETGT; CC2 = ISD::SETO; Opc = ISD::AND; break; 1818 case ISD::SETOGE: CC1 = ISD::SETGE; CC2 = ISD::SETO; Opc = ISD::AND; break; 1819 case ISD::SETOLT: CC1 = ISD::SETLT; CC2 = ISD::SETO; Opc = ISD::AND; break; 1820 case ISD::SETOLE: CC1 = ISD::SETLE; CC2 = ISD::SETO; Opc = ISD::AND; break; 1821 case ISD::SETONE: CC1 = ISD::SETNE; CC2 = ISD::SETO; Opc = ISD::AND; break; 1822 case ISD::SETUEQ: CC1 = ISD::SETEQ; CC2 = ISD::SETUO; Opc = ISD::OR; break; 1823 case ISD::SETUGT: CC1 = ISD::SETGT; CC2 = ISD::SETUO; Opc = ISD::OR; break; 1824 case ISD::SETUGE: CC1 = ISD::SETGE; CC2 = ISD::SETUO; Opc = ISD::OR; break; 1825 case ISD::SETULT: CC1 = ISD::SETLT; CC2 = ISD::SETUO; Opc = ISD::OR; break; 1826 case ISD::SETULE: CC1 = ISD::SETLE; CC2 = ISD::SETUO; Opc = ISD::OR; break; 1827 case ISD::SETUNE: CC1 = ISD::SETNE; CC2 = ISD::SETUO; Opc = ISD::OR; break; 1828 // FIXME: Implement more expansions. 1829 } 1830 1831 SDValue SetCC1 = DAG.getSetCC(dl, VT, LHS, RHS, CC1); 1832 SDValue SetCC2 = DAG.getSetCC(dl, VT, LHS, RHS, CC2); 1833 LHS = DAG.getNode(Opc, dl, VT, SetCC1, SetCC2); 1834 RHS = SDValue(); 1835 CC = SDValue(); 1836 break; 1837 } 1838 } 1839} 1840 1841/// EmitStackConvert - Emit a store/load combination to the stack. This stores 1842/// SrcOp to a stack slot of type SlotVT, truncating it if needed. It then does 1843/// a load from the stack slot to DestVT, extending it if needed. 1844/// The resultant code need not be legal. 1845SDValue SelectionDAGLegalize::EmitStackConvert(SDValue SrcOp, 1846 EVT SlotVT, 1847 EVT DestVT, 1848 DebugLoc dl) { 1849 // Create the stack frame object. 1850 unsigned SrcAlign = 1851 TLI.getTargetData()->getPrefTypeAlignment(SrcOp.getValueType(). 1852 getTypeForEVT(*DAG.getContext())); 1853 SDValue FIPtr = DAG.CreateStackTemporary(SlotVT, SrcAlign); 1854 1855 FrameIndexSDNode *StackPtrFI = cast<FrameIndexSDNode>(FIPtr); 1856 int SPFI = StackPtrFI->getIndex(); 1857 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(SPFI); 1858 1859 unsigned SrcSize = SrcOp.getValueType().getSizeInBits(); 1860 unsigned SlotSize = SlotVT.getSizeInBits(); 1861 unsigned DestSize = DestVT.getSizeInBits(); 1862 const Type *DestType = DestVT.getTypeForEVT(*DAG.getContext()); 1863 unsigned DestAlign = TLI.getTargetData()->getPrefTypeAlignment(DestType); 1864 1865 // Emit a store to the stack slot. Use a truncstore if the input value is 1866 // later than DestVT. 1867 SDValue Store; 1868 1869 if (SrcSize > SlotSize) 1870 Store = DAG.getTruncStore(DAG.getEntryNode(), dl, SrcOp, FIPtr, 1871 PtrInfo, SlotVT, false, false, SrcAlign); 1872 else { 1873 assert(SrcSize == SlotSize && "Invalid store"); 1874 Store = DAG.getStore(DAG.getEntryNode(), dl, SrcOp, FIPtr, 1875 PtrInfo, false, false, SrcAlign); 1876 } 1877 1878 // Result is a load from the stack slot. 1879 if (SlotSize == DestSize) 1880 return DAG.getLoad(DestVT, dl, Store, FIPtr, PtrInfo, 1881 false, false, DestAlign); 1882 1883 assert(SlotSize < DestSize && "Unknown extension!"); 1884 return DAG.getExtLoad(ISD::EXTLOAD, dl, DestVT, Store, FIPtr, 1885 PtrInfo, SlotVT, false, false, DestAlign); 1886} 1887 1888SDValue SelectionDAGLegalize::ExpandSCALAR_TO_VECTOR(SDNode *Node) { 1889 DebugLoc dl = Node->getDebugLoc(); 1890 // Create a vector sized/aligned stack slot, store the value to element #0, 1891 // then load the whole vector back out. 1892 SDValue StackPtr = DAG.CreateStackTemporary(Node->getValueType(0)); 1893 1894 FrameIndexSDNode *StackPtrFI = cast<FrameIndexSDNode>(StackPtr); 1895 int SPFI = StackPtrFI->getIndex(); 1896 1897 SDValue Ch = DAG.getTruncStore(DAG.getEntryNode(), dl, Node->getOperand(0), 1898 StackPtr, 1899 MachinePointerInfo::getFixedStack(SPFI), 1900 Node->getValueType(0).getVectorElementType(), 1901 false, false, 0); 1902 return DAG.getLoad(Node->getValueType(0), dl, Ch, StackPtr, 1903 MachinePointerInfo::getFixedStack(SPFI), 1904 false, false, 0); 1905} 1906 1907 1908/// ExpandBUILD_VECTOR - Expand a BUILD_VECTOR node on targets that don't 1909/// support the operation, but do support the resultant vector type. 1910SDValue SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) { 1911 unsigned NumElems = Node->getNumOperands(); 1912 SDValue Value1, Value2; 1913 DebugLoc dl = Node->getDebugLoc(); 1914 EVT VT = Node->getValueType(0); 1915 EVT OpVT = Node->getOperand(0).getValueType(); 1916 EVT EltVT = VT.getVectorElementType(); 1917 1918 // If the only non-undef value is the low element, turn this into a 1919 // SCALAR_TO_VECTOR node. If this is { X, X, X, X }, determine X. 1920 bool isOnlyLowElement = true; 1921 bool MoreThanTwoValues = false; 1922 bool isConstant = true; 1923 for (unsigned i = 0; i < NumElems; ++i) { 1924 SDValue V = Node->getOperand(i); 1925 if (V.getOpcode() == ISD::UNDEF) 1926 continue; 1927 if (i > 0) 1928 isOnlyLowElement = false; 1929 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V)) 1930 isConstant = false; 1931 1932 if (!Value1.getNode()) { 1933 Value1 = V; 1934 } else if (!Value2.getNode()) { 1935 if (V != Value1) 1936 Value2 = V; 1937 } else if (V != Value1 && V != Value2) { 1938 MoreThanTwoValues = true; 1939 } 1940 } 1941 1942 if (!Value1.getNode()) 1943 return DAG.getUNDEF(VT); 1944 1945 if (isOnlyLowElement) 1946 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Node->getOperand(0)); 1947 1948 // If all elements are constants, create a load from the constant pool. 1949 if (isConstant) { 1950 std::vector<Constant*> CV; 1951 for (unsigned i = 0, e = NumElems; i != e; ++i) { 1952 if (ConstantFPSDNode *V = 1953 dyn_cast<ConstantFPSDNode>(Node->getOperand(i))) { 1954 CV.push_back(const_cast<ConstantFP *>(V->getConstantFPValue())); 1955 } else if (ConstantSDNode *V = 1956 dyn_cast<ConstantSDNode>(Node->getOperand(i))) { 1957 if (OpVT==EltVT) 1958 CV.push_back(const_cast<ConstantInt *>(V->getConstantIntValue())); 1959 else { 1960 // If OpVT and EltVT don't match, EltVT is not legal and the 1961 // element values have been promoted/truncated earlier. Undo this; 1962 // we don't want a v16i8 to become a v16i32 for example. 1963 const ConstantInt *CI = V->getConstantIntValue(); 1964 CV.push_back(ConstantInt::get(EltVT.getTypeForEVT(*DAG.getContext()), 1965 CI->getZExtValue())); 1966 } 1967 } else { 1968 assert(Node->getOperand(i).getOpcode() == ISD::UNDEF); 1969 const Type *OpNTy = EltVT.getTypeForEVT(*DAG.getContext()); 1970 CV.push_back(UndefValue::get(OpNTy)); 1971 } 1972 } 1973 Constant *CP = ConstantVector::get(CV); 1974 SDValue CPIdx = DAG.getConstantPool(CP, TLI.getPointerTy()); 1975 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment(); 1976 return DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 1977 MachinePointerInfo::getConstantPool(), 1978 false, false, Alignment); 1979 } 1980 1981 if (!MoreThanTwoValues) { 1982 SmallVector<int, 8> ShuffleVec(NumElems, -1); 1983 for (unsigned i = 0; i < NumElems; ++i) { 1984 SDValue V = Node->getOperand(i); 1985 if (V.getOpcode() == ISD::UNDEF) 1986 continue; 1987 ShuffleVec[i] = V == Value1 ? 0 : NumElems; 1988 } 1989 if (TLI.isShuffleMaskLegal(ShuffleVec, Node->getValueType(0))) { 1990 // Get the splatted value into the low element of a vector register. 1991 SDValue Vec1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value1); 1992 SDValue Vec2; 1993 if (Value2.getNode()) 1994 Vec2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value2); 1995 else 1996 Vec2 = DAG.getUNDEF(VT); 1997 1998 // Return shuffle(LowValVec, undef, <0,0,0,0>) 1999 return DAG.getVectorShuffle(VT, dl, Vec1, Vec2, ShuffleVec.data()); 2000 } 2001 } 2002 2003 // Otherwise, we can't handle this case efficiently. 2004 return ExpandVectorBuildThroughStack(Node); 2005} 2006 2007// ExpandLibCall - Expand a node into a call to a libcall. If the result value 2008// does not fit into a register, return the lo part and set the hi part to the 2009// by-reg argument. If it does fit into a single register, return the result 2010// and leave the Hi part unset. 2011SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, SDNode *Node, 2012 bool isSigned) { 2013 // The input chain to this libcall is the entry node of the function. 2014 // Legalizing the call will automatically add the previous call to the 2015 // dependence. 2016 SDValue InChain = DAG.getEntryNode(); 2017 2018 TargetLowering::ArgListTy Args; 2019 TargetLowering::ArgListEntry Entry; 2020 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) { 2021 EVT ArgVT = Node->getOperand(i).getValueType(); 2022 const Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); 2023 Entry.Node = Node->getOperand(i); Entry.Ty = ArgTy; 2024 Entry.isSExt = isSigned; 2025 Entry.isZExt = !isSigned; 2026 Args.push_back(Entry); 2027 } 2028 SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC), 2029 TLI.getPointerTy()); 2030 2031 // Splice the libcall in wherever FindInputOutputChains tells us to. 2032 const Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext()); 2033 2034 // isTailCall may be true since the callee does not reference caller stack 2035 // frame. Check if it's in the right position. 2036 bool isTailCall = isInTailCallPosition(DAG, Node, TLI); 2037 std::pair<SDValue, SDValue> CallInfo = 2038 TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false, 2039 0, TLI.getLibcallCallingConv(LC), isTailCall, 2040 /*isReturnValueUsed=*/true, 2041 Callee, Args, DAG, Node->getDebugLoc()); 2042 2043 if (!CallInfo.second.getNode()) 2044 // It's a tailcall, return the chain (which is the DAG root). 2045 return DAG.getRoot(); 2046 2047 // Legalize the call sequence, starting with the chain. This will advance 2048 // the LastCALLSEQ to the legalized version of the CALLSEQ_END node that 2049 // was added by LowerCallTo (guaranteeing proper serialization of calls). 2050 LegalizeOp(CallInfo.second); 2051 return CallInfo.first; 2052} 2053 2054/// ExpandLibCall - Generate a libcall taking the given operands as arguments 2055/// and returning a result of type RetVT. 2056SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, EVT RetVT, 2057 const SDValue *Ops, unsigned NumOps, 2058 bool isSigned, DebugLoc dl) { 2059 TargetLowering::ArgListTy Args; 2060 Args.reserve(NumOps); 2061 2062 TargetLowering::ArgListEntry Entry; 2063 for (unsigned i = 0; i != NumOps; ++i) { 2064 Entry.Node = Ops[i]; 2065 Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.getContext()); 2066 Entry.isSExt = isSigned; 2067 Entry.isZExt = !isSigned; 2068 Args.push_back(Entry); 2069 } 2070 SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC), 2071 TLI.getPointerTy()); 2072 2073 const Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext()); 2074 std::pair<SDValue,SDValue> CallInfo = 2075 TLI.LowerCallTo(DAG.getEntryNode(), RetTy, isSigned, !isSigned, false, 2076 false, 0, TLI.getLibcallCallingConv(LC), false, 2077 /*isReturnValueUsed=*/true, 2078 Callee, Args, DAG, dl); 2079 2080 // Legalize the call sequence, starting with the chain. This will advance 2081 // the LastCALLSEQ_END to the legalized version of the CALLSEQ_END node that 2082 // was added by LowerCallTo (guaranteeing proper serialization of calls). 2083 LegalizeOp(CallInfo.second); 2084 2085 return CallInfo.first; 2086} 2087 2088// ExpandChainLibCall - Expand a node into a call to a libcall. Similar to 2089// ExpandLibCall except that the first operand is the in-chain. 2090std::pair<SDValue, SDValue> 2091SelectionDAGLegalize::ExpandChainLibCall(RTLIB::Libcall LC, 2092 SDNode *Node, 2093 bool isSigned) { 2094 SDValue InChain = Node->getOperand(0); 2095 2096 TargetLowering::ArgListTy Args; 2097 TargetLowering::ArgListEntry Entry; 2098 for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i) { 2099 EVT ArgVT = Node->getOperand(i).getValueType(); 2100 const Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); 2101 Entry.Node = Node->getOperand(i); 2102 Entry.Ty = ArgTy; 2103 Entry.isSExt = isSigned; 2104 Entry.isZExt = !isSigned; 2105 Args.push_back(Entry); 2106 } 2107 SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC), 2108 TLI.getPointerTy()); 2109 2110 // Splice the libcall in wherever FindInputOutputChains tells us to. 2111 const Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext()); 2112 std::pair<SDValue, SDValue> CallInfo = 2113 TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false, 2114 0, TLI.getLibcallCallingConv(LC), /*isTailCall=*/false, 2115 /*isReturnValueUsed=*/true, 2116 Callee, Args, DAG, Node->getDebugLoc()); 2117 2118 // Legalize the call sequence, starting with the chain. This will advance 2119 // the LastCALLSEQ to the legalized version of the CALLSEQ_END node that 2120 // was added by LowerCallTo (guaranteeing proper serialization of calls). 2121 LegalizeOp(CallInfo.second); 2122 return CallInfo; 2123} 2124 2125SDValue SelectionDAGLegalize::ExpandFPLibCall(SDNode* Node, 2126 RTLIB::Libcall Call_F32, 2127 RTLIB::Libcall Call_F64, 2128 RTLIB::Libcall Call_F80, 2129 RTLIB::Libcall Call_PPCF128) { 2130 RTLIB::Libcall LC; 2131 switch (Node->getValueType(0).getSimpleVT().SimpleTy) { 2132 default: assert(0 && "Unexpected request for libcall!"); 2133 case MVT::f32: LC = Call_F32; break; 2134 case MVT::f64: LC = Call_F64; break; 2135 case MVT::f80: LC = Call_F80; break; 2136 case MVT::ppcf128: LC = Call_PPCF128; break; 2137 } 2138 return ExpandLibCall(LC, Node, false); 2139} 2140 2141SDValue SelectionDAGLegalize::ExpandIntLibCall(SDNode* Node, bool isSigned, 2142 RTLIB::Libcall Call_I8, 2143 RTLIB::Libcall Call_I16, 2144 RTLIB::Libcall Call_I32, 2145 RTLIB::Libcall Call_I64, 2146 RTLIB::Libcall Call_I128) { 2147 RTLIB::Libcall LC; 2148 switch (Node->getValueType(0).getSimpleVT().SimpleTy) { 2149 default: assert(0 && "Unexpected request for libcall!"); 2150 case MVT::i8: LC = Call_I8; break; 2151 case MVT::i16: LC = Call_I16; break; 2152 case MVT::i32: LC = Call_I32; break; 2153 case MVT::i64: LC = Call_I64; break; 2154 case MVT::i128: LC = Call_I128; break; 2155 } 2156 return ExpandLibCall(LC, Node, isSigned); 2157} 2158 2159/// isDivRemLibcallAvailable - Return true if divmod libcall is available. 2160static bool isDivRemLibcallAvailable(SDNode *Node, bool isSigned, 2161 const TargetLowering &TLI) { 2162 RTLIB::Libcall LC; 2163 switch (Node->getValueType(0).getSimpleVT().SimpleTy) { 2164 default: assert(0 && "Unexpected request for libcall!"); 2165 case MVT::i8: LC= isSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8; break; 2166 case MVT::i16: LC= isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break; 2167 case MVT::i32: LC= isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break; 2168 case MVT::i64: LC= isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break; 2169 case MVT::i128: LC= isSigned ? RTLIB::SDIVREM_I128:RTLIB::UDIVREM_I128; break; 2170 } 2171 2172 return TLI.getLibcallName(LC) != 0; 2173} 2174 2175/// UseDivRem - Only issue divrem libcall if both quotient and remainder are 2176/// needed. 2177static bool UseDivRem(SDNode *Node, bool isSigned, bool isDIV) { 2178 unsigned OtherOpcode = 0; 2179 if (isSigned) 2180 OtherOpcode = isDIV ? ISD::SREM : ISD::SDIV; 2181 else 2182 OtherOpcode = isDIV ? ISD::UREM : ISD::UDIV; 2183 2184 SDValue Op0 = Node->getOperand(0); 2185 SDValue Op1 = Node->getOperand(1); 2186 for (SDNode::use_iterator UI = Op0.getNode()->use_begin(), 2187 UE = Op0.getNode()->use_end(); UI != UE; ++UI) { 2188 SDNode *User = *UI; 2189 if (User == Node) 2190 continue; 2191 if (User->getOpcode() == OtherOpcode && 2192 User->getOperand(0) == Op0 && 2193 User->getOperand(1) == Op1) 2194 return true; 2195 } 2196 return false; 2197} 2198 2199/// ExpandDivRemLibCall - Issue libcalls to __{u}divmod to compute div / rem 2200/// pairs. 2201void 2202SelectionDAGLegalize::ExpandDivRemLibCall(SDNode *Node, 2203 SmallVectorImpl<SDValue> &Results) { 2204 unsigned Opcode = Node->getOpcode(); 2205 bool isSigned = Opcode == ISD::SDIVREM; 2206 2207 RTLIB::Libcall LC; 2208 switch (Node->getValueType(0).getSimpleVT().SimpleTy) { 2209 default: assert(0 && "Unexpected request for libcall!"); 2210 case MVT::i8: LC= isSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8; break; 2211 case MVT::i16: LC= isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break; 2212 case MVT::i32: LC= isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break; 2213 case MVT::i64: LC= isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break; 2214 case MVT::i128: LC= isSigned ? RTLIB::SDIVREM_I128:RTLIB::UDIVREM_I128; break; 2215 } 2216 2217 // The input chain to this libcall is the entry node of the function. 2218 // Legalizing the call will automatically add the previous call to the 2219 // dependence. 2220 SDValue InChain = DAG.getEntryNode(); 2221 2222 EVT RetVT = Node->getValueType(0); 2223 const Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext()); 2224 2225 TargetLowering::ArgListTy Args; 2226 TargetLowering::ArgListEntry Entry; 2227 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) { 2228 EVT ArgVT = Node->getOperand(i).getValueType(); 2229 const Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); 2230 Entry.Node = Node->getOperand(i); Entry.Ty = ArgTy; 2231 Entry.isSExt = isSigned; 2232 Entry.isZExt = !isSigned; 2233 Args.push_back(Entry); 2234 } 2235 2236 // Also pass the return address of the remainder. 2237 SDValue FIPtr = DAG.CreateStackTemporary(RetVT); 2238 Entry.Node = FIPtr; 2239 Entry.Ty = RetTy->getPointerTo(); 2240 Entry.isSExt = isSigned; 2241 Entry.isZExt = !isSigned; 2242 Args.push_back(Entry); 2243 2244 SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC), 2245 TLI.getPointerTy()); 2246 2247 // Splice the libcall in wherever FindInputOutputChains tells us to. 2248 DebugLoc dl = Node->getDebugLoc(); 2249 std::pair<SDValue, SDValue> CallInfo = 2250 TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false, 2251 0, TLI.getLibcallCallingConv(LC), /*isTailCall=*/false, 2252 /*isReturnValueUsed=*/true, Callee, Args, DAG, dl); 2253 2254 // Legalize the call sequence, starting with the chain. This will advance 2255 // the LastCALLSEQ to the legalized version of the CALLSEQ_END node that 2256 // was added by LowerCallTo (guaranteeing proper serialization of calls). 2257 LegalizeOp(CallInfo.second); 2258 2259 // Remainder is loaded back from the stack frame. 2260 SDValue Rem = DAG.getLoad(RetVT, dl, getLastCALLSEQ(), FIPtr, 2261 MachinePointerInfo(), false, false, 0); 2262 Results.push_back(CallInfo.first); 2263 Results.push_back(Rem); 2264} 2265 2266/// ExpandLegalINT_TO_FP - This function is responsible for legalizing a 2267/// INT_TO_FP operation of the specified operand when the target requests that 2268/// we expand it. At this point, we know that the result and operand types are 2269/// legal for the target. 2270SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned, 2271 SDValue Op0, 2272 EVT DestVT, 2273 DebugLoc dl) { 2274 if (Op0.getValueType() == MVT::i32) { 2275 // simple 32-bit [signed|unsigned] integer to float/double expansion 2276 2277 // Get the stack frame index of a 8 byte buffer. 2278 SDValue StackSlot = DAG.CreateStackTemporary(MVT::f64); 2279 2280 // word offset constant for Hi/Lo address computation 2281 SDValue WordOff = DAG.getConstant(sizeof(int), TLI.getPointerTy()); 2282 // set up Hi and Lo (into buffer) address based on endian 2283 SDValue Hi = StackSlot; 2284 SDValue Lo = DAG.getNode(ISD::ADD, dl, 2285 TLI.getPointerTy(), StackSlot, WordOff); 2286 if (TLI.isLittleEndian()) 2287 std::swap(Hi, Lo); 2288 2289 // if signed map to unsigned space 2290 SDValue Op0Mapped; 2291 if (isSigned) { 2292 // constant used to invert sign bit (signed to unsigned mapping) 2293 SDValue SignBit = DAG.getConstant(0x80000000u, MVT::i32); 2294 Op0Mapped = DAG.getNode(ISD::XOR, dl, MVT::i32, Op0, SignBit); 2295 } else { 2296 Op0Mapped = Op0; 2297 } 2298 // store the lo of the constructed double - based on integer input 2299 SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, 2300 Op0Mapped, Lo, MachinePointerInfo(), 2301 false, false, 0); 2302 // initial hi portion of constructed double 2303 SDValue InitialHi = DAG.getConstant(0x43300000u, MVT::i32); 2304 // store the hi of the constructed double - biased exponent 2305 SDValue Store2 = DAG.getStore(Store1, dl, InitialHi, Hi, 2306 MachinePointerInfo(), 2307 false, false, 0); 2308 // load the constructed double 2309 SDValue Load = DAG.getLoad(MVT::f64, dl, Store2, StackSlot, 2310 MachinePointerInfo(), false, false, 0); 2311 // FP constant to bias correct the final result 2312 SDValue Bias = DAG.getConstantFP(isSigned ? 2313 BitsToDouble(0x4330000080000000ULL) : 2314 BitsToDouble(0x4330000000000000ULL), 2315 MVT::f64); 2316 // subtract the bias 2317 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Load, Bias); 2318 // final result 2319 SDValue Result; 2320 // handle final rounding 2321 if (DestVT == MVT::f64) { 2322 // do nothing 2323 Result = Sub; 2324 } else if (DestVT.bitsLT(MVT::f64)) { 2325 Result = DAG.getNode(ISD::FP_ROUND, dl, DestVT, Sub, 2326 DAG.getIntPtrConstant(0)); 2327 } else if (DestVT.bitsGT(MVT::f64)) { 2328 Result = DAG.getNode(ISD::FP_EXTEND, dl, DestVT, Sub); 2329 } 2330 return Result; 2331 } 2332 assert(!isSigned && "Legalize cannot Expand SINT_TO_FP for i64 yet"); 2333 // Code below here assumes !isSigned without checking again. 2334 2335 // Implementation of unsigned i64 to f64 following the algorithm in 2336 // __floatundidf in compiler_rt. This implementation has the advantage 2337 // of performing rounding correctly, both in the default rounding mode 2338 // and in all alternate rounding modes. 2339 // TODO: Generalize this for use with other types. 2340 if (Op0.getValueType() == MVT::i64 && DestVT == MVT::f64) { 2341 SDValue TwoP52 = 2342 DAG.getConstant(UINT64_C(0x4330000000000000), MVT::i64); 2343 SDValue TwoP84PlusTwoP52 = 2344 DAG.getConstantFP(BitsToDouble(UINT64_C(0x4530000000100000)), MVT::f64); 2345 SDValue TwoP84 = 2346 DAG.getConstant(UINT64_C(0x4530000000000000), MVT::i64); 2347 2348 SDValue Lo = DAG.getZeroExtendInReg(Op0, dl, MVT::i32); 2349 SDValue Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Op0, 2350 DAG.getConstant(32, MVT::i64)); 2351 SDValue LoOr = DAG.getNode(ISD::OR, dl, MVT::i64, Lo, TwoP52); 2352 SDValue HiOr = DAG.getNode(ISD::OR, dl, MVT::i64, Hi, TwoP84); 2353 SDValue LoFlt = DAG.getNode(ISD::BITCAST, dl, MVT::f64, LoOr); 2354 SDValue HiFlt = DAG.getNode(ISD::BITCAST, dl, MVT::f64, HiOr); 2355 SDValue HiSub = DAG.getNode(ISD::FSUB, dl, MVT::f64, HiFlt, 2356 TwoP84PlusTwoP52); 2357 return DAG.getNode(ISD::FADD, dl, MVT::f64, LoFlt, HiSub); 2358 } 2359 2360 // Implementation of unsigned i64 to f32. 2361 // TODO: Generalize this for use with other types. 2362 if (Op0.getValueType() == MVT::i64 && DestVT == MVT::f32) { 2363 // For unsigned conversions, convert them to signed conversions using the 2364 // algorithm from the x86_64 __floatundidf in compiler_rt. 2365 if (!isSigned) { 2366 SDValue Fast = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, Op0); 2367 2368 SDValue ShiftConst = 2369 DAG.getConstant(1, TLI.getShiftAmountTy(Op0.getValueType())); 2370 SDValue Shr = DAG.getNode(ISD::SRL, dl, MVT::i64, Op0, ShiftConst); 2371 SDValue AndConst = DAG.getConstant(1, MVT::i64); 2372 SDValue And = DAG.getNode(ISD::AND, dl, MVT::i64, Op0, AndConst); 2373 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::i64, And, Shr); 2374 2375 SDValue SignCvt = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, Or); 2376 SDValue Slow = DAG.getNode(ISD::FADD, dl, MVT::f32, SignCvt, SignCvt); 2377 2378 // TODO: This really should be implemented using a branch rather than a 2379 // select. We happen to get lucky and machinesink does the right 2380 // thing most of the time. This would be a good candidate for a 2381 //pseudo-op, or, even better, for whole-function isel. 2382 SDValue SignBitTest = DAG.getSetCC(dl, TLI.getSetCCResultType(MVT::i64), 2383 Op0, DAG.getConstant(0, MVT::i64), ISD::SETLT); 2384 return DAG.getNode(ISD::SELECT, dl, MVT::f32, SignBitTest, Slow, Fast); 2385 } 2386 2387 // Otherwise, implement the fully general conversion. 2388 2389 SDValue And = DAG.getNode(ISD::AND, dl, MVT::i64, Op0, 2390 DAG.getConstant(UINT64_C(0xfffffffffffff800), MVT::i64)); 2391 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::i64, And, 2392 DAG.getConstant(UINT64_C(0x800), MVT::i64)); 2393 SDValue And2 = DAG.getNode(ISD::AND, dl, MVT::i64, Op0, 2394 DAG.getConstant(UINT64_C(0x7ff), MVT::i64)); 2395 SDValue Ne = DAG.getSetCC(dl, TLI.getSetCCResultType(MVT::i64), 2396 And2, DAG.getConstant(UINT64_C(0), MVT::i64), ISD::SETNE); 2397 SDValue Sel = DAG.getNode(ISD::SELECT, dl, MVT::i64, Ne, Or, Op0); 2398 SDValue Ge = DAG.getSetCC(dl, TLI.getSetCCResultType(MVT::i64), 2399 Op0, DAG.getConstant(UINT64_C(0x0020000000000000), MVT::i64), 2400 ISD::SETUGE); 2401 SDValue Sel2 = DAG.getNode(ISD::SELECT, dl, MVT::i64, Ge, Sel, Op0); 2402 EVT SHVT = TLI.getShiftAmountTy(Sel2.getValueType()); 2403 2404 SDValue Sh = DAG.getNode(ISD::SRL, dl, MVT::i64, Sel2, 2405 DAG.getConstant(32, SHVT)); 2406 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Sh); 2407 SDValue Fcvt = DAG.getNode(ISD::UINT_TO_FP, dl, MVT::f64, Trunc); 2408 SDValue TwoP32 = 2409 DAG.getConstantFP(BitsToDouble(UINT64_C(0x41f0000000000000)), MVT::f64); 2410 SDValue Fmul = DAG.getNode(ISD::FMUL, dl, MVT::f64, TwoP32, Fcvt); 2411 SDValue Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Sel2); 2412 SDValue Fcvt2 = DAG.getNode(ISD::UINT_TO_FP, dl, MVT::f64, Lo); 2413 SDValue Fadd = DAG.getNode(ISD::FADD, dl, MVT::f64, Fmul, Fcvt2); 2414 return DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, Fadd, 2415 DAG.getIntPtrConstant(0)); 2416 } 2417 2418 SDValue Tmp1 = DAG.getNode(ISD::SINT_TO_FP, dl, DestVT, Op0); 2419 2420 SDValue SignSet = DAG.getSetCC(dl, TLI.getSetCCResultType(Op0.getValueType()), 2421 Op0, DAG.getConstant(0, Op0.getValueType()), 2422 ISD::SETLT); 2423 SDValue Zero = DAG.getIntPtrConstant(0), Four = DAG.getIntPtrConstant(4); 2424 SDValue CstOffset = DAG.getNode(ISD::SELECT, dl, Zero.getValueType(), 2425 SignSet, Four, Zero); 2426 2427 // If the sign bit of the integer is set, the large number will be treated 2428 // as a negative number. To counteract this, the dynamic code adds an 2429 // offset depending on the data type. 2430 uint64_t FF; 2431 switch (Op0.getValueType().getSimpleVT().SimpleTy) { 2432 default: assert(0 && "Unsupported integer type!"); 2433 case MVT::i8 : FF = 0x43800000ULL; break; // 2^8 (as a float) 2434 case MVT::i16: FF = 0x47800000ULL; break; // 2^16 (as a float) 2435 case MVT::i32: FF = 0x4F800000ULL; break; // 2^32 (as a float) 2436 case MVT::i64: FF = 0x5F800000ULL; break; // 2^64 (as a float) 2437 } 2438 if (TLI.isLittleEndian()) FF <<= 32; 2439 Constant *FudgeFactor = ConstantInt::get( 2440 Type::getInt64Ty(*DAG.getContext()), FF); 2441 2442 SDValue CPIdx = DAG.getConstantPool(FudgeFactor, TLI.getPointerTy()); 2443 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment(); 2444 CPIdx = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), CPIdx, CstOffset); 2445 Alignment = std::min(Alignment, 4u); 2446 SDValue FudgeInReg; 2447 if (DestVT == MVT::f32) 2448 FudgeInReg = DAG.getLoad(MVT::f32, dl, DAG.getEntryNode(), CPIdx, 2449 MachinePointerInfo::getConstantPool(), 2450 false, false, Alignment); 2451 else { 2452 FudgeInReg = 2453 LegalizeOp(DAG.getExtLoad(ISD::EXTLOAD, dl, DestVT, 2454 DAG.getEntryNode(), CPIdx, 2455 MachinePointerInfo::getConstantPool(), 2456 MVT::f32, false, false, Alignment)); 2457 } 2458 2459 return DAG.getNode(ISD::FADD, dl, DestVT, Tmp1, FudgeInReg); 2460} 2461 2462/// PromoteLegalINT_TO_FP - This function is responsible for legalizing a 2463/// *INT_TO_FP operation of the specified operand when the target requests that 2464/// we promote it. At this point, we know that the result and operand types are 2465/// legal for the target, and that there is a legal UINT_TO_FP or SINT_TO_FP 2466/// operation that takes a larger input. 2467SDValue SelectionDAGLegalize::PromoteLegalINT_TO_FP(SDValue LegalOp, 2468 EVT DestVT, 2469 bool isSigned, 2470 DebugLoc dl) { 2471 // First step, figure out the appropriate *INT_TO_FP operation to use. 2472 EVT NewInTy = LegalOp.getValueType(); 2473 2474 unsigned OpToUse = 0; 2475 2476 // Scan for the appropriate larger type to use. 2477 while (1) { 2478 NewInTy = (MVT::SimpleValueType)(NewInTy.getSimpleVT().SimpleTy+1); 2479 assert(NewInTy.isInteger() && "Ran out of possibilities!"); 2480 2481 // If the target supports SINT_TO_FP of this type, use it. 2482 if (TLI.isOperationLegalOrCustom(ISD::SINT_TO_FP, NewInTy)) { 2483 OpToUse = ISD::SINT_TO_FP; 2484 break; 2485 } 2486 if (isSigned) continue; 2487 2488 // If the target supports UINT_TO_FP of this type, use it. 2489 if (TLI.isOperationLegalOrCustom(ISD::UINT_TO_FP, NewInTy)) { 2490 OpToUse = ISD::UINT_TO_FP; 2491 break; 2492 } 2493 2494 // Otherwise, try a larger type. 2495 } 2496 2497 // Okay, we found the operation and type to use. Zero extend our input to the 2498 // desired type then run the operation on it. 2499 return DAG.getNode(OpToUse, dl, DestVT, 2500 DAG.getNode(isSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, 2501 dl, NewInTy, LegalOp)); 2502} 2503 2504/// PromoteLegalFP_TO_INT - This function is responsible for legalizing a 2505/// FP_TO_*INT operation of the specified operand when the target requests that 2506/// we promote it. At this point, we know that the result and operand types are 2507/// legal for the target, and that there is a legal FP_TO_UINT or FP_TO_SINT 2508/// operation that returns a larger result. 2509SDValue SelectionDAGLegalize::PromoteLegalFP_TO_INT(SDValue LegalOp, 2510 EVT DestVT, 2511 bool isSigned, 2512 DebugLoc dl) { 2513 // First step, figure out the appropriate FP_TO*INT operation to use. 2514 EVT NewOutTy = DestVT; 2515 2516 unsigned OpToUse = 0; 2517 2518 // Scan for the appropriate larger type to use. 2519 while (1) { 2520 NewOutTy = (MVT::SimpleValueType)(NewOutTy.getSimpleVT().SimpleTy+1); 2521 assert(NewOutTy.isInteger() && "Ran out of possibilities!"); 2522 2523 if (TLI.isOperationLegalOrCustom(ISD::FP_TO_SINT, NewOutTy)) { 2524 OpToUse = ISD::FP_TO_SINT; 2525 break; 2526 } 2527 2528 if (TLI.isOperationLegalOrCustom(ISD::FP_TO_UINT, NewOutTy)) { 2529 OpToUse = ISD::FP_TO_UINT; 2530 break; 2531 } 2532 2533 // Otherwise, try a larger type. 2534 } 2535 2536 2537 // Okay, we found the operation and type to use. 2538 SDValue Operation = DAG.getNode(OpToUse, dl, NewOutTy, LegalOp); 2539 2540 // Truncate the result of the extended FP_TO_*INT operation to the desired 2541 // size. 2542 return DAG.getNode(ISD::TRUNCATE, dl, DestVT, Operation); 2543} 2544 2545/// ExpandBSWAP - Open code the operations for BSWAP of the specified operation. 2546/// 2547SDValue SelectionDAGLegalize::ExpandBSWAP(SDValue Op, DebugLoc dl) { 2548 EVT VT = Op.getValueType(); 2549 EVT SHVT = TLI.getShiftAmountTy(VT); 2550 SDValue Tmp1, Tmp2, Tmp3, Tmp4, Tmp5, Tmp6, Tmp7, Tmp8; 2551 switch (VT.getSimpleVT().SimpleTy) { 2552 default: assert(0 && "Unhandled Expand type in BSWAP!"); 2553 case MVT::i16: 2554 Tmp2 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(8, SHVT)); 2555 Tmp1 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(8, SHVT)); 2556 return DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 2557 case MVT::i32: 2558 Tmp4 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(24, SHVT)); 2559 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(8, SHVT)); 2560 Tmp2 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(8, SHVT)); 2561 Tmp1 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(24, SHVT)); 2562 Tmp3 = DAG.getNode(ISD::AND, dl, VT, Tmp3, DAG.getConstant(0xFF0000, VT)); 2563 Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, DAG.getConstant(0xFF00, VT)); 2564 Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp3); 2565 Tmp2 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp1); 2566 return DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp2); 2567 case MVT::i64: 2568 Tmp8 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(56, SHVT)); 2569 Tmp7 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(40, SHVT)); 2570 Tmp6 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(24, SHVT)); 2571 Tmp5 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(8, SHVT)); 2572 Tmp4 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(8, SHVT)); 2573 Tmp3 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(24, SHVT)); 2574 Tmp2 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(40, SHVT)); 2575 Tmp1 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(56, SHVT)); 2576 Tmp7 = DAG.getNode(ISD::AND, dl, VT, Tmp7, DAG.getConstant(255ULL<<48, VT)); 2577 Tmp6 = DAG.getNode(ISD::AND, dl, VT, Tmp6, DAG.getConstant(255ULL<<40, VT)); 2578 Tmp5 = DAG.getNode(ISD::AND, dl, VT, Tmp5, DAG.getConstant(255ULL<<32, VT)); 2579 Tmp4 = DAG.getNode(ISD::AND, dl, VT, Tmp4, DAG.getConstant(255ULL<<24, VT)); 2580 Tmp3 = DAG.getNode(ISD::AND, dl, VT, Tmp3, DAG.getConstant(255ULL<<16, VT)); 2581 Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, DAG.getConstant(255ULL<<8 , VT)); 2582 Tmp8 = DAG.getNode(ISD::OR, dl, VT, Tmp8, Tmp7); 2583 Tmp6 = DAG.getNode(ISD::OR, dl, VT, Tmp6, Tmp5); 2584 Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp3); 2585 Tmp2 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp1); 2586 Tmp8 = DAG.getNode(ISD::OR, dl, VT, Tmp8, Tmp6); 2587 Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp2); 2588 return DAG.getNode(ISD::OR, dl, VT, Tmp8, Tmp4); 2589 } 2590} 2591 2592/// SplatByte - Distribute ByteVal over NumBits bits. 2593// FIXME: Move this helper to a common place. 2594static APInt SplatByte(unsigned NumBits, uint8_t ByteVal) { 2595 APInt Val = APInt(NumBits, ByteVal); 2596 unsigned Shift = 8; 2597 for (unsigned i = NumBits; i > 8; i >>= 1) { 2598 Val = (Val << Shift) | Val; 2599 Shift <<= 1; 2600 } 2601 return Val; 2602} 2603 2604/// ExpandBitCount - Expand the specified bitcount instruction into operations. 2605/// 2606SDValue SelectionDAGLegalize::ExpandBitCount(unsigned Opc, SDValue Op, 2607 DebugLoc dl) { 2608 switch (Opc) { 2609 default: assert(0 && "Cannot expand this yet!"); 2610 case ISD::CTPOP: { 2611 EVT VT = Op.getValueType(); 2612 EVT ShVT = TLI.getShiftAmountTy(VT); 2613 unsigned Len = VT.getSizeInBits(); 2614 2615 assert(VT.isInteger() && Len <= 128 && Len % 8 == 0 && 2616 "CTPOP not implemented for this type."); 2617 2618 // This is the "best" algorithm from 2619 // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel 2620 2621 SDValue Mask55 = DAG.getConstant(SplatByte(Len, 0x55), VT); 2622 SDValue Mask33 = DAG.getConstant(SplatByte(Len, 0x33), VT); 2623 SDValue Mask0F = DAG.getConstant(SplatByte(Len, 0x0F), VT); 2624 SDValue Mask01 = DAG.getConstant(SplatByte(Len, 0x01), VT); 2625 2626 // v = v - ((v >> 1) & 0x55555555...) 2627 Op = DAG.getNode(ISD::SUB, dl, VT, Op, 2628 DAG.getNode(ISD::AND, dl, VT, 2629 DAG.getNode(ISD::SRL, dl, VT, Op, 2630 DAG.getConstant(1, ShVT)), 2631 Mask55)); 2632 // v = (v & 0x33333333...) + ((v >> 2) & 0x33333333...) 2633 Op = DAG.getNode(ISD::ADD, dl, VT, 2634 DAG.getNode(ISD::AND, dl, VT, Op, Mask33), 2635 DAG.getNode(ISD::AND, dl, VT, 2636 DAG.getNode(ISD::SRL, dl, VT, Op, 2637 DAG.getConstant(2, ShVT)), 2638 Mask33)); 2639 // v = (v + (v >> 4)) & 0x0F0F0F0F... 2640 Op = DAG.getNode(ISD::AND, dl, VT, 2641 DAG.getNode(ISD::ADD, dl, VT, Op, 2642 DAG.getNode(ISD::SRL, dl, VT, Op, 2643 DAG.getConstant(4, ShVT))), 2644 Mask0F); 2645 // v = (v * 0x01010101...) >> (Len - 8) 2646 Op = DAG.getNode(ISD::SRL, dl, VT, 2647 DAG.getNode(ISD::MUL, dl, VT, Op, Mask01), 2648 DAG.getConstant(Len - 8, ShVT)); 2649 2650 return Op; 2651 } 2652 case ISD::CTLZ: { 2653 // for now, we do this: 2654 // x = x | (x >> 1); 2655 // x = x | (x >> 2); 2656 // ... 2657 // x = x | (x >>16); 2658 // x = x | (x >>32); // for 64-bit input 2659 // return popcount(~x); 2660 // 2661 // but see also: http://www.hackersdelight.org/HDcode/nlz.cc 2662 EVT VT = Op.getValueType(); 2663 EVT ShVT = TLI.getShiftAmountTy(VT); 2664 unsigned len = VT.getSizeInBits(); 2665 for (unsigned i = 0; (1U << i) <= (len / 2); ++i) { 2666 SDValue Tmp3 = DAG.getConstant(1ULL << i, ShVT); 2667 Op = DAG.getNode(ISD::OR, dl, VT, Op, 2668 DAG.getNode(ISD::SRL, dl, VT, Op, Tmp3)); 2669 } 2670 Op = DAG.getNOT(dl, Op, VT); 2671 return DAG.getNode(ISD::CTPOP, dl, VT, Op); 2672 } 2673 case ISD::CTTZ: { 2674 // for now, we use: { return popcount(~x & (x - 1)); } 2675 // unless the target has ctlz but not ctpop, in which case we use: 2676 // { return 32 - nlz(~x & (x-1)); } 2677 // see also http://www.hackersdelight.org/HDcode/ntz.cc 2678 EVT VT = Op.getValueType(); 2679 SDValue Tmp3 = DAG.getNode(ISD::AND, dl, VT, 2680 DAG.getNOT(dl, Op, VT), 2681 DAG.getNode(ISD::SUB, dl, VT, Op, 2682 DAG.getConstant(1, VT))); 2683 // If ISD::CTLZ is legal and CTPOP isn't, then do that instead. 2684 if (!TLI.isOperationLegalOrCustom(ISD::CTPOP, VT) && 2685 TLI.isOperationLegalOrCustom(ISD::CTLZ, VT)) 2686 return DAG.getNode(ISD::SUB, dl, VT, 2687 DAG.getConstant(VT.getSizeInBits(), VT), 2688 DAG.getNode(ISD::CTLZ, dl, VT, Tmp3)); 2689 return DAG.getNode(ISD::CTPOP, dl, VT, Tmp3); 2690 } 2691 } 2692} 2693 2694std::pair <SDValue, SDValue> SelectionDAGLegalize::ExpandAtomic(SDNode *Node) { 2695 unsigned Opc = Node->getOpcode(); 2696 MVT VT = cast<AtomicSDNode>(Node)->getMemoryVT().getSimpleVT(); 2697 RTLIB::Libcall LC; 2698 2699 switch (Opc) { 2700 default: 2701 llvm_unreachable("Unhandled atomic intrinsic Expand!"); 2702 break; 2703 case ISD::ATOMIC_SWAP: 2704 switch (VT.SimpleTy) { 2705 default: llvm_unreachable("Unexpected value type for atomic!"); 2706 case MVT::i8: LC = RTLIB::SYNC_LOCK_TEST_AND_SET_1; break; 2707 case MVT::i16: LC = RTLIB::SYNC_LOCK_TEST_AND_SET_2; break; 2708 case MVT::i32: LC = RTLIB::SYNC_LOCK_TEST_AND_SET_4; break; 2709 case MVT::i64: LC = RTLIB::SYNC_LOCK_TEST_AND_SET_8; break; 2710 } 2711 break; 2712 case ISD::ATOMIC_CMP_SWAP: 2713 switch (VT.SimpleTy) { 2714 default: llvm_unreachable("Unexpected value type for atomic!"); 2715 case MVT::i8: LC = RTLIB::SYNC_VAL_COMPARE_AND_SWAP_1; break; 2716 case MVT::i16: LC = RTLIB::SYNC_VAL_COMPARE_AND_SWAP_2; break; 2717 case MVT::i32: LC = RTLIB::SYNC_VAL_COMPARE_AND_SWAP_4; break; 2718 case MVT::i64: LC = RTLIB::SYNC_VAL_COMPARE_AND_SWAP_8; break; 2719 } 2720 break; 2721 case ISD::ATOMIC_LOAD_ADD: 2722 switch (VT.SimpleTy) { 2723 default: llvm_unreachable("Unexpected value type for atomic!"); 2724 case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_ADD_1; break; 2725 case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_ADD_2; break; 2726 case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_ADD_4; break; 2727 case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_ADD_8; break; 2728 } 2729 break; 2730 case ISD::ATOMIC_LOAD_SUB: 2731 switch (VT.SimpleTy) { 2732 default: llvm_unreachable("Unexpected value type for atomic!"); 2733 case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_SUB_1; break; 2734 case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_SUB_2; break; 2735 case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_SUB_4; break; 2736 case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_SUB_8; break; 2737 } 2738 break; 2739 case ISD::ATOMIC_LOAD_AND: 2740 switch (VT.SimpleTy) { 2741 default: llvm_unreachable("Unexpected value type for atomic!"); 2742 case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_AND_1; break; 2743 case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_AND_2; break; 2744 case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_AND_4; break; 2745 case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_AND_8; break; 2746 } 2747 break; 2748 case ISD::ATOMIC_LOAD_OR: 2749 switch (VT.SimpleTy) { 2750 default: llvm_unreachable("Unexpected value type for atomic!"); 2751 case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_OR_1; break; 2752 case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_OR_2; break; 2753 case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_OR_4; break; 2754 case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_OR_8; break; 2755 } 2756 break; 2757 case ISD::ATOMIC_LOAD_XOR: 2758 switch (VT.SimpleTy) { 2759 default: llvm_unreachable("Unexpected value type for atomic!"); 2760 case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_XOR_1; break; 2761 case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_XOR_2; break; 2762 case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_XOR_4; break; 2763 case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_XOR_8; break; 2764 } 2765 break; 2766 case ISD::ATOMIC_LOAD_NAND: 2767 switch (VT.SimpleTy) { 2768 default: llvm_unreachable("Unexpected value type for atomic!"); 2769 case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_NAND_1; break; 2770 case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_NAND_2; break; 2771 case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_NAND_4; break; 2772 case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_NAND_8; break; 2773 } 2774 break; 2775 } 2776 2777 return ExpandChainLibCall(LC, Node, false); 2778} 2779 2780void SelectionDAGLegalize::ExpandNode(SDNode *Node, 2781 SmallVectorImpl<SDValue> &Results) { 2782 DebugLoc dl = Node->getDebugLoc(); 2783 SDValue Tmp1, Tmp2, Tmp3, Tmp4; 2784 switch (Node->getOpcode()) { 2785 case ISD::CTPOP: 2786 case ISD::CTLZ: 2787 case ISD::CTTZ: 2788 Tmp1 = ExpandBitCount(Node->getOpcode(), Node->getOperand(0), dl); 2789 Results.push_back(Tmp1); 2790 break; 2791 case ISD::BSWAP: 2792 Results.push_back(ExpandBSWAP(Node->getOperand(0), dl)); 2793 break; 2794 case ISD::FRAMEADDR: 2795 case ISD::RETURNADDR: 2796 case ISD::FRAME_TO_ARGS_OFFSET: 2797 Results.push_back(DAG.getConstant(0, Node->getValueType(0))); 2798 break; 2799 case ISD::FLT_ROUNDS_: 2800 Results.push_back(DAG.getConstant(1, Node->getValueType(0))); 2801 break; 2802 case ISD::EH_RETURN: 2803 case ISD::EH_LABEL: 2804 case ISD::PREFETCH: 2805 case ISD::VAEND: 2806 case ISD::EH_SJLJ_LONGJMP: 2807 case ISD::EH_SJLJ_DISPATCHSETUP: 2808 // If the target didn't expand these, there's nothing to do, so just 2809 // preserve the chain and be done. 2810 Results.push_back(Node->getOperand(0)); 2811 break; 2812 case ISD::EH_SJLJ_SETJMP: 2813 // If the target didn't expand this, just return 'zero' and preserve the 2814 // chain. 2815 Results.push_back(DAG.getConstant(0, MVT::i32)); 2816 Results.push_back(Node->getOperand(0)); 2817 break; 2818 case ISD::MEMBARRIER: { 2819 // If the target didn't lower this, lower it to '__sync_synchronize()' call 2820 TargetLowering::ArgListTy Args; 2821 std::pair<SDValue, SDValue> CallResult = 2822 TLI.LowerCallTo(Node->getOperand(0), Type::getVoidTy(*DAG.getContext()), 2823 false, false, false, false, 0, CallingConv::C, 2824 /*isTailCall=*/false, 2825 /*isReturnValueUsed=*/true, 2826 DAG.getExternalSymbol("__sync_synchronize", 2827 TLI.getPointerTy()), 2828 Args, DAG, dl); 2829 Results.push_back(CallResult.second); 2830 break; 2831 } 2832 // By default, atomic intrinsics are marked Legal and lowered. Targets 2833 // which don't support them directly, however, may want libcalls, in which 2834 // case they mark them Expand, and we get here. 2835 case ISD::ATOMIC_SWAP: 2836 case ISD::ATOMIC_LOAD_ADD: 2837 case ISD::ATOMIC_LOAD_SUB: 2838 case ISD::ATOMIC_LOAD_AND: 2839 case ISD::ATOMIC_LOAD_OR: 2840 case ISD::ATOMIC_LOAD_XOR: 2841 case ISD::ATOMIC_LOAD_NAND: 2842 case ISD::ATOMIC_LOAD_MIN: 2843 case ISD::ATOMIC_LOAD_MAX: 2844 case ISD::ATOMIC_LOAD_UMIN: 2845 case ISD::ATOMIC_LOAD_UMAX: 2846 case ISD::ATOMIC_CMP_SWAP: { 2847 std::pair<SDValue, SDValue> Tmp = ExpandAtomic(Node); 2848 Results.push_back(Tmp.first); 2849 Results.push_back(Tmp.second); 2850 break; 2851 } 2852 case ISD::DYNAMIC_STACKALLOC: 2853 ExpandDYNAMIC_STACKALLOC(Node, Results); 2854 break; 2855 case ISD::MERGE_VALUES: 2856 for (unsigned i = 0; i < Node->getNumValues(); i++) 2857 Results.push_back(Node->getOperand(i)); 2858 break; 2859 case ISD::UNDEF: { 2860 EVT VT = Node->getValueType(0); 2861 if (VT.isInteger()) 2862 Results.push_back(DAG.getConstant(0, VT)); 2863 else { 2864 assert(VT.isFloatingPoint() && "Unknown value type!"); 2865 Results.push_back(DAG.getConstantFP(0, VT)); 2866 } 2867 break; 2868 } 2869 case ISD::TRAP: { 2870 // If this operation is not supported, lower it to 'abort()' call 2871 TargetLowering::ArgListTy Args; 2872 std::pair<SDValue, SDValue> CallResult = 2873 TLI.LowerCallTo(Node->getOperand(0), Type::getVoidTy(*DAG.getContext()), 2874 false, false, false, false, 0, CallingConv::C, 2875 /*isTailCall=*/false, 2876 /*isReturnValueUsed=*/true, 2877 DAG.getExternalSymbol("abort", TLI.getPointerTy()), 2878 Args, DAG, dl); 2879 Results.push_back(CallResult.second); 2880 break; 2881 } 2882 case ISD::FP_ROUND: 2883 case ISD::BITCAST: 2884 Tmp1 = EmitStackConvert(Node->getOperand(0), Node->getValueType(0), 2885 Node->getValueType(0), dl); 2886 Results.push_back(Tmp1); 2887 break; 2888 case ISD::FP_EXTEND: 2889 Tmp1 = EmitStackConvert(Node->getOperand(0), 2890 Node->getOperand(0).getValueType(), 2891 Node->getValueType(0), dl); 2892 Results.push_back(Tmp1); 2893 break; 2894 case ISD::SIGN_EXTEND_INREG: { 2895 // NOTE: we could fall back on load/store here too for targets without 2896 // SAR. However, it is doubtful that any exist. 2897 EVT ExtraVT = cast<VTSDNode>(Node->getOperand(1))->getVT(); 2898 EVT VT = Node->getValueType(0); 2899 EVT ShiftAmountTy = TLI.getShiftAmountTy(VT); 2900 if (VT.isVector()) 2901 ShiftAmountTy = VT; 2902 unsigned BitsDiff = VT.getScalarType().getSizeInBits() - 2903 ExtraVT.getScalarType().getSizeInBits(); 2904 SDValue ShiftCst = DAG.getConstant(BitsDiff, ShiftAmountTy); 2905 Tmp1 = DAG.getNode(ISD::SHL, dl, Node->getValueType(0), 2906 Node->getOperand(0), ShiftCst); 2907 Tmp1 = DAG.getNode(ISD::SRA, dl, Node->getValueType(0), Tmp1, ShiftCst); 2908 Results.push_back(Tmp1); 2909 break; 2910 } 2911 case ISD::FP_ROUND_INREG: { 2912 // The only way we can lower this is to turn it into a TRUNCSTORE, 2913 // EXTLOAD pair, targeting a temporary location (a stack slot). 2914 2915 // NOTE: there is a choice here between constantly creating new stack 2916 // slots and always reusing the same one. We currently always create 2917 // new ones, as reuse may inhibit scheduling. 2918 EVT ExtraVT = cast<VTSDNode>(Node->getOperand(1))->getVT(); 2919 Tmp1 = EmitStackConvert(Node->getOperand(0), ExtraVT, 2920 Node->getValueType(0), dl); 2921 Results.push_back(Tmp1); 2922 break; 2923 } 2924 case ISD::SINT_TO_FP: 2925 case ISD::UINT_TO_FP: 2926 Tmp1 = ExpandLegalINT_TO_FP(Node->getOpcode() == ISD::SINT_TO_FP, 2927 Node->getOperand(0), Node->getValueType(0), dl); 2928 Results.push_back(Tmp1); 2929 break; 2930 case ISD::FP_TO_UINT: { 2931 SDValue True, False; 2932 EVT VT = Node->getOperand(0).getValueType(); 2933 EVT NVT = Node->getValueType(0); 2934 APFloat apf(APInt::getNullValue(VT.getSizeInBits())); 2935 APInt x = APInt::getSignBit(NVT.getSizeInBits()); 2936 (void)apf.convertFromAPInt(x, false, APFloat::rmNearestTiesToEven); 2937 Tmp1 = DAG.getConstantFP(apf, VT); 2938 Tmp2 = DAG.getSetCC(dl, TLI.getSetCCResultType(VT), 2939 Node->getOperand(0), 2940 Tmp1, ISD::SETLT); 2941 True = DAG.getNode(ISD::FP_TO_SINT, dl, NVT, Node->getOperand(0)); 2942 False = DAG.getNode(ISD::FP_TO_SINT, dl, NVT, 2943 DAG.getNode(ISD::FSUB, dl, VT, 2944 Node->getOperand(0), Tmp1)); 2945 False = DAG.getNode(ISD::XOR, dl, NVT, False, 2946 DAG.getConstant(x, NVT)); 2947 Tmp1 = DAG.getNode(ISD::SELECT, dl, NVT, Tmp2, True, False); 2948 Results.push_back(Tmp1); 2949 break; 2950 } 2951 case ISD::VAARG: { 2952 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 2953 EVT VT = Node->getValueType(0); 2954 Tmp1 = Node->getOperand(0); 2955 Tmp2 = Node->getOperand(1); 2956 unsigned Align = Node->getConstantOperandVal(3); 2957 2958 SDValue VAListLoad = DAG.getLoad(TLI.getPointerTy(), dl, Tmp1, Tmp2, 2959 MachinePointerInfo(V), false, false, 0); 2960 SDValue VAList = VAListLoad; 2961 2962 if (Align > TLI.getMinStackArgumentAlignment()) { 2963 assert(((Align & (Align-1)) == 0) && "Expected Align to be a power of 2"); 2964 2965 VAList = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), VAList, 2966 DAG.getConstant(Align - 1, 2967 TLI.getPointerTy())); 2968 2969 VAList = DAG.getNode(ISD::AND, dl, TLI.getPointerTy(), VAList, 2970 DAG.getConstant(-(int64_t)Align, 2971 TLI.getPointerTy())); 2972 } 2973 2974 // Increment the pointer, VAList, to the next vaarg 2975 Tmp3 = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), VAList, 2976 DAG.getConstant(TLI.getTargetData()-> 2977 getTypeAllocSize(VT.getTypeForEVT(*DAG.getContext())), 2978 TLI.getPointerTy())); 2979 // Store the incremented VAList to the legalized pointer 2980 Tmp3 = DAG.getStore(VAListLoad.getValue(1), dl, Tmp3, Tmp2, 2981 MachinePointerInfo(V), false, false, 0); 2982 // Load the actual argument out of the pointer VAList 2983 Results.push_back(DAG.getLoad(VT, dl, Tmp3, VAList, MachinePointerInfo(), 2984 false, false, 0)); 2985 Results.push_back(Results[0].getValue(1)); 2986 break; 2987 } 2988 case ISD::VACOPY: { 2989 // This defaults to loading a pointer from the input and storing it to the 2990 // output, returning the chain. 2991 const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue(); 2992 const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue(); 2993 Tmp1 = DAG.getLoad(TLI.getPointerTy(), dl, Node->getOperand(0), 2994 Node->getOperand(2), MachinePointerInfo(VS), 2995 false, false, 0); 2996 Tmp1 = DAG.getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1), 2997 MachinePointerInfo(VD), false, false, 0); 2998 Results.push_back(Tmp1); 2999 break; 3000 } 3001 case ISD::EXTRACT_VECTOR_ELT: 3002 if (Node->getOperand(0).getValueType().getVectorNumElements() == 1) 3003 // This must be an access of the only element. Return it. 3004 Tmp1 = DAG.getNode(ISD::BITCAST, dl, Node->getValueType(0), 3005 Node->getOperand(0)); 3006 else 3007 Tmp1 = ExpandExtractFromVectorThroughStack(SDValue(Node, 0)); 3008 Results.push_back(Tmp1); 3009 break; 3010 case ISD::EXTRACT_SUBVECTOR: 3011 Results.push_back(ExpandExtractFromVectorThroughStack(SDValue(Node, 0))); 3012 break; 3013 case ISD::INSERT_SUBVECTOR: 3014 Results.push_back(ExpandInsertToVectorThroughStack(SDValue(Node, 0))); 3015 break; 3016 case ISD::CONCAT_VECTORS: { 3017 Results.push_back(ExpandVectorBuildThroughStack(Node)); 3018 break; 3019 } 3020 case ISD::SCALAR_TO_VECTOR: 3021 Results.push_back(ExpandSCALAR_TO_VECTOR(Node)); 3022 break; 3023 case ISD::INSERT_VECTOR_ELT: 3024 Results.push_back(ExpandINSERT_VECTOR_ELT(Node->getOperand(0), 3025 Node->getOperand(1), 3026 Node->getOperand(2), dl)); 3027 break; 3028 case ISD::VECTOR_SHUFFLE: { 3029 SmallVector<int, 8> Mask; 3030 cast<ShuffleVectorSDNode>(Node)->getMask(Mask); 3031 3032 EVT VT = Node->getValueType(0); 3033 EVT EltVT = VT.getVectorElementType(); 3034 if (getTypeAction(EltVT) == Promote) 3035 EltVT = TLI.getTypeToTransformTo(*DAG.getContext(), EltVT); 3036 unsigned NumElems = VT.getVectorNumElements(); 3037 SmallVector<SDValue, 8> Ops; 3038 for (unsigned i = 0; i != NumElems; ++i) { 3039 if (Mask[i] < 0) { 3040 Ops.push_back(DAG.getUNDEF(EltVT)); 3041 continue; 3042 } 3043 unsigned Idx = Mask[i]; 3044 if (Idx < NumElems) 3045 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, 3046 Node->getOperand(0), 3047 DAG.getIntPtrConstant(Idx))); 3048 else 3049 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, 3050 Node->getOperand(1), 3051 DAG.getIntPtrConstant(Idx - NumElems))); 3052 } 3053 Tmp1 = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &Ops[0], Ops.size()); 3054 Results.push_back(Tmp1); 3055 break; 3056 } 3057 case ISD::EXTRACT_ELEMENT: { 3058 EVT OpTy = Node->getOperand(0).getValueType(); 3059 if (cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue()) { 3060 // 1 -> Hi 3061 Tmp1 = DAG.getNode(ISD::SRL, dl, OpTy, Node->getOperand(0), 3062 DAG.getConstant(OpTy.getSizeInBits()/2, 3063 TLI.getShiftAmountTy(Node->getOperand(0).getValueType()))); 3064 Tmp1 = DAG.getNode(ISD::TRUNCATE, dl, Node->getValueType(0), Tmp1); 3065 } else { 3066 // 0 -> Lo 3067 Tmp1 = DAG.getNode(ISD::TRUNCATE, dl, Node->getValueType(0), 3068 Node->getOperand(0)); 3069 } 3070 Results.push_back(Tmp1); 3071 break; 3072 } 3073 case ISD::STACKSAVE: 3074 // Expand to CopyFromReg if the target set 3075 // StackPointerRegisterToSaveRestore. 3076 if (unsigned SP = TLI.getStackPointerRegisterToSaveRestore()) { 3077 Results.push_back(DAG.getCopyFromReg(Node->getOperand(0), dl, SP, 3078 Node->getValueType(0))); 3079 Results.push_back(Results[0].getValue(1)); 3080 } else { 3081 Results.push_back(DAG.getUNDEF(Node->getValueType(0))); 3082 Results.push_back(Node->getOperand(0)); 3083 } 3084 break; 3085 case ISD::STACKRESTORE: 3086 // Expand to CopyToReg if the target set 3087 // StackPointerRegisterToSaveRestore. 3088 if (unsigned SP = TLI.getStackPointerRegisterToSaveRestore()) { 3089 Results.push_back(DAG.getCopyToReg(Node->getOperand(0), dl, SP, 3090 Node->getOperand(1))); 3091 } else { 3092 Results.push_back(Node->getOperand(0)); 3093 } 3094 break; 3095 case ISD::FCOPYSIGN: 3096 Results.push_back(ExpandFCOPYSIGN(Node)); 3097 break; 3098 case ISD::FNEG: 3099 // Expand Y = FNEG(X) -> Y = SUB -0.0, X 3100 Tmp1 = DAG.getConstantFP(-0.0, Node->getValueType(0)); 3101 Tmp1 = DAG.getNode(ISD::FSUB, dl, Node->getValueType(0), Tmp1, 3102 Node->getOperand(0)); 3103 Results.push_back(Tmp1); 3104 break; 3105 case ISD::FABS: { 3106 // Expand Y = FABS(X) -> Y = (X >u 0.0) ? X : fneg(X). 3107 EVT VT = Node->getValueType(0); 3108 Tmp1 = Node->getOperand(0); 3109 Tmp2 = DAG.getConstantFP(0.0, VT); 3110 Tmp2 = DAG.getSetCC(dl, TLI.getSetCCResultType(Tmp1.getValueType()), 3111 Tmp1, Tmp2, ISD::SETUGT); 3112 Tmp3 = DAG.getNode(ISD::FNEG, dl, VT, Tmp1); 3113 Tmp1 = DAG.getNode(ISD::SELECT, dl, VT, Tmp2, Tmp1, Tmp3); 3114 Results.push_back(Tmp1); 3115 break; 3116 } 3117 case ISD::FSQRT: 3118 Results.push_back(ExpandFPLibCall(Node, RTLIB::SQRT_F32, RTLIB::SQRT_F64, 3119 RTLIB::SQRT_F80, RTLIB::SQRT_PPCF128)); 3120 break; 3121 case ISD::FSIN: 3122 Results.push_back(ExpandFPLibCall(Node, RTLIB::SIN_F32, RTLIB::SIN_F64, 3123 RTLIB::SIN_F80, RTLIB::SIN_PPCF128)); 3124 break; 3125 case ISD::FCOS: 3126 Results.push_back(ExpandFPLibCall(Node, RTLIB::COS_F32, RTLIB::COS_F64, 3127 RTLIB::COS_F80, RTLIB::COS_PPCF128)); 3128 break; 3129 case ISD::FLOG: 3130 Results.push_back(ExpandFPLibCall(Node, RTLIB::LOG_F32, RTLIB::LOG_F64, 3131 RTLIB::LOG_F80, RTLIB::LOG_PPCF128)); 3132 break; 3133 case ISD::FLOG2: 3134 Results.push_back(ExpandFPLibCall(Node, RTLIB::LOG2_F32, RTLIB::LOG2_F64, 3135 RTLIB::LOG2_F80, RTLIB::LOG2_PPCF128)); 3136 break; 3137 case ISD::FLOG10: 3138 Results.push_back(ExpandFPLibCall(Node, RTLIB::LOG10_F32, RTLIB::LOG10_F64, 3139 RTLIB::LOG10_F80, RTLIB::LOG10_PPCF128)); 3140 break; 3141 case ISD::FEXP: 3142 Results.push_back(ExpandFPLibCall(Node, RTLIB::EXP_F32, RTLIB::EXP_F64, 3143 RTLIB::EXP_F80, RTLIB::EXP_PPCF128)); 3144 break; 3145 case ISD::FEXP2: 3146 Results.push_back(ExpandFPLibCall(Node, RTLIB::EXP2_F32, RTLIB::EXP2_F64, 3147 RTLIB::EXP2_F80, RTLIB::EXP2_PPCF128)); 3148 break; 3149 case ISD::FTRUNC: 3150 Results.push_back(ExpandFPLibCall(Node, RTLIB::TRUNC_F32, RTLIB::TRUNC_F64, 3151 RTLIB::TRUNC_F80, RTLIB::TRUNC_PPCF128)); 3152 break; 3153 case ISD::FFLOOR: 3154 Results.push_back(ExpandFPLibCall(Node, RTLIB::FLOOR_F32, RTLIB::FLOOR_F64, 3155 RTLIB::FLOOR_F80, RTLIB::FLOOR_PPCF128)); 3156 break; 3157 case ISD::FCEIL: 3158 Results.push_back(ExpandFPLibCall(Node, RTLIB::CEIL_F32, RTLIB::CEIL_F64, 3159 RTLIB::CEIL_F80, RTLIB::CEIL_PPCF128)); 3160 break; 3161 case ISD::FRINT: 3162 Results.push_back(ExpandFPLibCall(Node, RTLIB::RINT_F32, RTLIB::RINT_F64, 3163 RTLIB::RINT_F80, RTLIB::RINT_PPCF128)); 3164 break; 3165 case ISD::FNEARBYINT: 3166 Results.push_back(ExpandFPLibCall(Node, RTLIB::NEARBYINT_F32, 3167 RTLIB::NEARBYINT_F64, 3168 RTLIB::NEARBYINT_F80, 3169 RTLIB::NEARBYINT_PPCF128)); 3170 break; 3171 case ISD::FPOWI: 3172 Results.push_back(ExpandFPLibCall(Node, RTLIB::POWI_F32, RTLIB::POWI_F64, 3173 RTLIB::POWI_F80, RTLIB::POWI_PPCF128)); 3174 break; 3175 case ISD::FPOW: 3176 Results.push_back(ExpandFPLibCall(Node, RTLIB::POW_F32, RTLIB::POW_F64, 3177 RTLIB::POW_F80, RTLIB::POW_PPCF128)); 3178 break; 3179 case ISD::FDIV: 3180 Results.push_back(ExpandFPLibCall(Node, RTLIB::DIV_F32, RTLIB::DIV_F64, 3181 RTLIB::DIV_F80, RTLIB::DIV_PPCF128)); 3182 break; 3183 case ISD::FREM: 3184 Results.push_back(ExpandFPLibCall(Node, RTLIB::REM_F32, RTLIB::REM_F64, 3185 RTLIB::REM_F80, RTLIB::REM_PPCF128)); 3186 break; 3187 case ISD::FP16_TO_FP32: 3188 Results.push_back(ExpandLibCall(RTLIB::FPEXT_F16_F32, Node, false)); 3189 break; 3190 case ISD::FP32_TO_FP16: 3191 Results.push_back(ExpandLibCall(RTLIB::FPROUND_F32_F16, Node, false)); 3192 break; 3193 case ISD::ConstantFP: { 3194 ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Node); 3195 // Check to see if this FP immediate is already legal. 3196 // If this is a legal constant, turn it into a TargetConstantFP node. 3197 if (TLI.isFPImmLegal(CFP->getValueAPF(), Node->getValueType(0))) 3198 Results.push_back(SDValue(Node, 0)); 3199 else 3200 Results.push_back(ExpandConstantFP(CFP, true, DAG, TLI)); 3201 break; 3202 } 3203 case ISD::EHSELECTION: { 3204 unsigned Reg = TLI.getExceptionSelectorRegister(); 3205 assert(Reg && "Can't expand to unknown register!"); 3206 Results.push_back(DAG.getCopyFromReg(Node->getOperand(1), dl, Reg, 3207 Node->getValueType(0))); 3208 Results.push_back(Results[0].getValue(1)); 3209 break; 3210 } 3211 case ISD::EXCEPTIONADDR: { 3212 unsigned Reg = TLI.getExceptionAddressRegister(); 3213 assert(Reg && "Can't expand to unknown register!"); 3214 Results.push_back(DAG.getCopyFromReg(Node->getOperand(0), dl, Reg, 3215 Node->getValueType(0))); 3216 Results.push_back(Results[0].getValue(1)); 3217 break; 3218 } 3219 case ISD::SUB: { 3220 EVT VT = Node->getValueType(0); 3221 assert(TLI.isOperationLegalOrCustom(ISD::ADD, VT) && 3222 TLI.isOperationLegalOrCustom(ISD::XOR, VT) && 3223 "Don't know how to expand this subtraction!"); 3224 Tmp1 = DAG.getNode(ISD::XOR, dl, VT, Node->getOperand(1), 3225 DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), VT)); 3226 Tmp1 = DAG.getNode(ISD::ADD, dl, VT, Tmp2, DAG.getConstant(1, VT)); 3227 Results.push_back(DAG.getNode(ISD::ADD, dl, VT, Node->getOperand(0), Tmp1)); 3228 break; 3229 } 3230 case ISD::UREM: 3231 case ISD::SREM: { 3232 EVT VT = Node->getValueType(0); 3233 SDVTList VTs = DAG.getVTList(VT, VT); 3234 bool isSigned = Node->getOpcode() == ISD::SREM; 3235 unsigned DivOpc = isSigned ? ISD::SDIV : ISD::UDIV; 3236 unsigned DivRemOpc = isSigned ? ISD::SDIVREM : ISD::UDIVREM; 3237 Tmp2 = Node->getOperand(0); 3238 Tmp3 = Node->getOperand(1); 3239 if (TLI.isOperationLegalOrCustom(DivRemOpc, VT) || 3240 (isDivRemLibcallAvailable(Node, isSigned, TLI) && 3241 UseDivRem(Node, isSigned, false))) { 3242 Tmp1 = DAG.getNode(DivRemOpc, dl, VTs, Tmp2, Tmp3).getValue(1); 3243 } else if (TLI.isOperationLegalOrCustom(DivOpc, VT)) { 3244 // X % Y -> X-X/Y*Y 3245 Tmp1 = DAG.getNode(DivOpc, dl, VT, Tmp2, Tmp3); 3246 Tmp1 = DAG.getNode(ISD::MUL, dl, VT, Tmp1, Tmp3); 3247 Tmp1 = DAG.getNode(ISD::SUB, dl, VT, Tmp2, Tmp1); 3248 } else if (isSigned) 3249 Tmp1 = ExpandIntLibCall(Node, true, 3250 RTLIB::SREM_I8, 3251 RTLIB::SREM_I16, RTLIB::SREM_I32, 3252 RTLIB::SREM_I64, RTLIB::SREM_I128); 3253 else 3254 Tmp1 = ExpandIntLibCall(Node, false, 3255 RTLIB::UREM_I8, 3256 RTLIB::UREM_I16, RTLIB::UREM_I32, 3257 RTLIB::UREM_I64, RTLIB::UREM_I128); 3258 Results.push_back(Tmp1); 3259 break; 3260 } 3261 case ISD::UDIV: 3262 case ISD::SDIV: { 3263 bool isSigned = Node->getOpcode() == ISD::SDIV; 3264 unsigned DivRemOpc = isSigned ? ISD::SDIVREM : ISD::UDIVREM; 3265 EVT VT = Node->getValueType(0); 3266 SDVTList VTs = DAG.getVTList(VT, VT); 3267 if (TLI.isOperationLegalOrCustom(DivRemOpc, VT) || 3268 (isDivRemLibcallAvailable(Node, isSigned, TLI) && 3269 UseDivRem(Node, isSigned, true))) 3270 Tmp1 = DAG.getNode(DivRemOpc, dl, VTs, Node->getOperand(0), 3271 Node->getOperand(1)); 3272 else if (isSigned) 3273 Tmp1 = ExpandIntLibCall(Node, true, 3274 RTLIB::SDIV_I8, 3275 RTLIB::SDIV_I16, RTLIB::SDIV_I32, 3276 RTLIB::SDIV_I64, RTLIB::SDIV_I128); 3277 else 3278 Tmp1 = ExpandIntLibCall(Node, false, 3279 RTLIB::UDIV_I8, 3280 RTLIB::UDIV_I16, RTLIB::UDIV_I32, 3281 RTLIB::UDIV_I64, RTLIB::UDIV_I128); 3282 Results.push_back(Tmp1); 3283 break; 3284 } 3285 case ISD::MULHU: 3286 case ISD::MULHS: { 3287 unsigned ExpandOpcode = Node->getOpcode() == ISD::MULHU ? ISD::UMUL_LOHI : 3288 ISD::SMUL_LOHI; 3289 EVT VT = Node->getValueType(0); 3290 SDVTList VTs = DAG.getVTList(VT, VT); 3291 assert(TLI.isOperationLegalOrCustom(ExpandOpcode, VT) && 3292 "If this wasn't legal, it shouldn't have been created!"); 3293 Tmp1 = DAG.getNode(ExpandOpcode, dl, VTs, Node->getOperand(0), 3294 Node->getOperand(1)); 3295 Results.push_back(Tmp1.getValue(1)); 3296 break; 3297 } 3298 case ISD::SDIVREM: 3299 case ISD::UDIVREM: 3300 // Expand into divrem libcall 3301 ExpandDivRemLibCall(Node, Results); 3302 break; 3303 case ISD::MUL: { 3304 EVT VT = Node->getValueType(0); 3305 SDVTList VTs = DAG.getVTList(VT, VT); 3306 // See if multiply or divide can be lowered using two-result operations. 3307 // We just need the low half of the multiply; try both the signed 3308 // and unsigned forms. If the target supports both SMUL_LOHI and 3309 // UMUL_LOHI, form a preference by checking which forms of plain 3310 // MULH it supports. 3311 bool HasSMUL_LOHI = TLI.isOperationLegalOrCustom(ISD::SMUL_LOHI, VT); 3312 bool HasUMUL_LOHI = TLI.isOperationLegalOrCustom(ISD::UMUL_LOHI, VT); 3313 bool HasMULHS = TLI.isOperationLegalOrCustom(ISD::MULHS, VT); 3314 bool HasMULHU = TLI.isOperationLegalOrCustom(ISD::MULHU, VT); 3315 unsigned OpToUse = 0; 3316 if (HasSMUL_LOHI && !HasMULHS) { 3317 OpToUse = ISD::SMUL_LOHI; 3318 } else if (HasUMUL_LOHI && !HasMULHU) { 3319 OpToUse = ISD::UMUL_LOHI; 3320 } else if (HasSMUL_LOHI) { 3321 OpToUse = ISD::SMUL_LOHI; 3322 } else if (HasUMUL_LOHI) { 3323 OpToUse = ISD::UMUL_LOHI; 3324 } 3325 if (OpToUse) { 3326 Results.push_back(DAG.getNode(OpToUse, dl, VTs, Node->getOperand(0), 3327 Node->getOperand(1))); 3328 break; 3329 } 3330 Tmp1 = ExpandIntLibCall(Node, false, 3331 RTLIB::MUL_I8, 3332 RTLIB::MUL_I16, RTLIB::MUL_I32, 3333 RTLIB::MUL_I64, RTLIB::MUL_I128); 3334 Results.push_back(Tmp1); 3335 break; 3336 } 3337 case ISD::SADDO: 3338 case ISD::SSUBO: { 3339 SDValue LHS = Node->getOperand(0); 3340 SDValue RHS = Node->getOperand(1); 3341 SDValue Sum = DAG.getNode(Node->getOpcode() == ISD::SADDO ? 3342 ISD::ADD : ISD::SUB, dl, LHS.getValueType(), 3343 LHS, RHS); 3344 Results.push_back(Sum); 3345 EVT OType = Node->getValueType(1); 3346 3347 SDValue Zero = DAG.getConstant(0, LHS.getValueType()); 3348 3349 // LHSSign -> LHS >= 0 3350 // RHSSign -> RHS >= 0 3351 // SumSign -> Sum >= 0 3352 // 3353 // Add: 3354 // Overflow -> (LHSSign == RHSSign) && (LHSSign != SumSign) 3355 // Sub: 3356 // Overflow -> (LHSSign != RHSSign) && (LHSSign != SumSign) 3357 // 3358 SDValue LHSSign = DAG.getSetCC(dl, OType, LHS, Zero, ISD::SETGE); 3359 SDValue RHSSign = DAG.getSetCC(dl, OType, RHS, Zero, ISD::SETGE); 3360 SDValue SignsMatch = DAG.getSetCC(dl, OType, LHSSign, RHSSign, 3361 Node->getOpcode() == ISD::SADDO ? 3362 ISD::SETEQ : ISD::SETNE); 3363 3364 SDValue SumSign = DAG.getSetCC(dl, OType, Sum, Zero, ISD::SETGE); 3365 SDValue SumSignNE = DAG.getSetCC(dl, OType, LHSSign, SumSign, ISD::SETNE); 3366 3367 SDValue Cmp = DAG.getNode(ISD::AND, dl, OType, SignsMatch, SumSignNE); 3368 Results.push_back(Cmp); 3369 break; 3370 } 3371 case ISD::UADDO: 3372 case ISD::USUBO: { 3373 SDValue LHS = Node->getOperand(0); 3374 SDValue RHS = Node->getOperand(1); 3375 SDValue Sum = DAG.getNode(Node->getOpcode() == ISD::UADDO ? 3376 ISD::ADD : ISD::SUB, dl, LHS.getValueType(), 3377 LHS, RHS); 3378 Results.push_back(Sum); 3379 Results.push_back(DAG.getSetCC(dl, Node->getValueType(1), Sum, LHS, 3380 Node->getOpcode () == ISD::UADDO ? 3381 ISD::SETULT : ISD::SETUGT)); 3382 break; 3383 } 3384 case ISD::UMULO: 3385 case ISD::SMULO: { 3386 EVT VT = Node->getValueType(0); 3387 EVT WideVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits() * 2); 3388 SDValue LHS = Node->getOperand(0); 3389 SDValue RHS = Node->getOperand(1); 3390 SDValue BottomHalf; 3391 SDValue TopHalf; 3392 static const unsigned Ops[2][3] = 3393 { { ISD::MULHU, ISD::UMUL_LOHI, ISD::ZERO_EXTEND }, 3394 { ISD::MULHS, ISD::SMUL_LOHI, ISD::SIGN_EXTEND }}; 3395 bool isSigned = Node->getOpcode() == ISD::SMULO; 3396 if (TLI.isOperationLegalOrCustom(Ops[isSigned][0], VT)) { 3397 BottomHalf = DAG.getNode(ISD::MUL, dl, VT, LHS, RHS); 3398 TopHalf = DAG.getNode(Ops[isSigned][0], dl, VT, LHS, RHS); 3399 } else if (TLI.isOperationLegalOrCustom(Ops[isSigned][1], VT)) { 3400 BottomHalf = DAG.getNode(Ops[isSigned][1], dl, DAG.getVTList(VT, VT), LHS, 3401 RHS); 3402 TopHalf = BottomHalf.getValue(1); 3403 } else if (TLI.isTypeLegal(EVT::getIntegerVT(*DAG.getContext(), 3404 VT.getSizeInBits() * 2))) { 3405 LHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, LHS); 3406 RHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, RHS); 3407 Tmp1 = DAG.getNode(ISD::MUL, dl, WideVT, LHS, RHS); 3408 BottomHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, Tmp1, 3409 DAG.getIntPtrConstant(0)); 3410 TopHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, Tmp1, 3411 DAG.getIntPtrConstant(1)); 3412 } else { 3413 // We can fall back to a libcall with an illegal type for the MUL if we 3414 // have a libcall big enough. 3415 // Also, we can fall back to a division in some cases, but that's a big 3416 // performance hit in the general case. 3417 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 3418 if (WideVT == MVT::i16) 3419 LC = RTLIB::MUL_I16; 3420 else if (WideVT == MVT::i32) 3421 LC = RTLIB::MUL_I32; 3422 else if (WideVT == MVT::i64) 3423 LC = RTLIB::MUL_I64; 3424 else if (WideVT == MVT::i128) 3425 LC = RTLIB::MUL_I128; 3426 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Cannot expand this operation!"); 3427 3428 // The high part is obtained by SRA'ing all but one of the bits of low 3429 // part. 3430 unsigned LoSize = VT.getSizeInBits(); 3431 SDValue HiLHS = DAG.getNode(ISD::SRA, dl, VT, RHS, 3432 DAG.getConstant(LoSize-1, TLI.getPointerTy())); 3433 SDValue HiRHS = DAG.getNode(ISD::SRA, dl, VT, LHS, 3434 DAG.getConstant(LoSize-1, TLI.getPointerTy())); 3435 3436 // Here we're passing the 2 arguments explicitly as 4 arguments that are 3437 // pre-lowered to the correct types. This all depends upon WideVT not 3438 // being a legal type for the architecture and thus has to be split to 3439 // two arguments. 3440 SDValue Args[] = { LHS, HiLHS, RHS, HiRHS }; 3441 SDValue Ret = ExpandLibCall(LC, WideVT, Args, 4, isSigned, dl); 3442 BottomHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, Ret, 3443 DAG.getIntPtrConstant(0)); 3444 TopHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, Ret, 3445 DAG.getIntPtrConstant(1)); 3446 } 3447 3448 if (isSigned) { 3449 Tmp1 = DAG.getConstant(VT.getSizeInBits() - 1, 3450 TLI.getShiftAmountTy(BottomHalf.getValueType())); 3451 Tmp1 = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, Tmp1); 3452 TopHalf = DAG.getSetCC(dl, TLI.getSetCCResultType(VT), TopHalf, Tmp1, 3453 ISD::SETNE); 3454 } else { 3455 TopHalf = DAG.getSetCC(dl, TLI.getSetCCResultType(VT), TopHalf, 3456 DAG.getConstant(0, VT), ISD::SETNE); 3457 } 3458 Results.push_back(BottomHalf); 3459 Results.push_back(TopHalf); 3460 break; 3461 } 3462 case ISD::BUILD_PAIR: { 3463 EVT PairTy = Node->getValueType(0); 3464 Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, dl, PairTy, Node->getOperand(0)); 3465 Tmp2 = DAG.getNode(ISD::ANY_EXTEND, dl, PairTy, Node->getOperand(1)); 3466 Tmp2 = DAG.getNode(ISD::SHL, dl, PairTy, Tmp2, 3467 DAG.getConstant(PairTy.getSizeInBits()/2, 3468 TLI.getShiftAmountTy(PairTy))); 3469 Results.push_back(DAG.getNode(ISD::OR, dl, PairTy, Tmp1, Tmp2)); 3470 break; 3471 } 3472 case ISD::SELECT: 3473 Tmp1 = Node->getOperand(0); 3474 Tmp2 = Node->getOperand(1); 3475 Tmp3 = Node->getOperand(2); 3476 if (Tmp1.getOpcode() == ISD::SETCC) { 3477 Tmp1 = DAG.getSelectCC(dl, Tmp1.getOperand(0), Tmp1.getOperand(1), 3478 Tmp2, Tmp3, 3479 cast<CondCodeSDNode>(Tmp1.getOperand(2))->get()); 3480 } else { 3481 Tmp1 = DAG.getSelectCC(dl, Tmp1, 3482 DAG.getConstant(0, Tmp1.getValueType()), 3483 Tmp2, Tmp3, ISD::SETNE); 3484 } 3485 Results.push_back(Tmp1); 3486 break; 3487 case ISD::BR_JT: { 3488 SDValue Chain = Node->getOperand(0); 3489 SDValue Table = Node->getOperand(1); 3490 SDValue Index = Node->getOperand(2); 3491 3492 EVT PTy = TLI.getPointerTy(); 3493 3494 const TargetData &TD = *TLI.getTargetData(); 3495 unsigned EntrySize = 3496 DAG.getMachineFunction().getJumpTableInfo()->getEntrySize(TD); 3497 3498 Index = DAG.getNode(ISD::MUL, dl, PTy, 3499 Index, DAG.getConstant(EntrySize, PTy)); 3500 SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Index, Table); 3501 3502 EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), EntrySize * 8); 3503 SDValue LD = DAG.getExtLoad(ISD::SEXTLOAD, dl, PTy, Chain, Addr, 3504 MachinePointerInfo::getJumpTable(), MemVT, 3505 false, false, 0); 3506 Addr = LD; 3507 if (TM.getRelocationModel() == Reloc::PIC_) { 3508 // For PIC, the sequence is: 3509 // BRIND(load(Jumptable + index) + RelocBase) 3510 // RelocBase can be JumpTable, GOT or some sort of global base. 3511 Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr, 3512 TLI.getPICJumpTableRelocBase(Table, DAG)); 3513 } 3514 Tmp1 = DAG.getNode(ISD::BRIND, dl, MVT::Other, LD.getValue(1), Addr); 3515 Results.push_back(Tmp1); 3516 break; 3517 } 3518 case ISD::BRCOND: 3519 // Expand brcond's setcc into its constituent parts and create a BR_CC 3520 // Node. 3521 Tmp1 = Node->getOperand(0); 3522 Tmp2 = Node->getOperand(1); 3523 if (Tmp2.getOpcode() == ISD::SETCC) { 3524 Tmp1 = DAG.getNode(ISD::BR_CC, dl, MVT::Other, 3525 Tmp1, Tmp2.getOperand(2), 3526 Tmp2.getOperand(0), Tmp2.getOperand(1), 3527 Node->getOperand(2)); 3528 } else { 3529 // We test only the i1 bit. Skip the AND if UNDEF. 3530 Tmp3 = (Tmp2.getOpcode() == ISD::UNDEF) ? Tmp2 : 3531 DAG.getNode(ISD::AND, dl, Tmp2.getValueType(), Tmp2, 3532 DAG.getConstant(1, Tmp2.getValueType())); 3533 Tmp1 = DAG.getNode(ISD::BR_CC, dl, MVT::Other, Tmp1, 3534 DAG.getCondCode(ISD::SETNE), Tmp3, 3535 DAG.getConstant(0, Tmp3.getValueType()), 3536 Node->getOperand(2)); 3537 } 3538 Results.push_back(Tmp1); 3539 break; 3540 case ISD::SETCC: { 3541 Tmp1 = Node->getOperand(0); 3542 Tmp2 = Node->getOperand(1); 3543 Tmp3 = Node->getOperand(2); 3544 LegalizeSetCCCondCode(Node->getValueType(0), Tmp1, Tmp2, Tmp3, dl); 3545 3546 // If we expanded the SETCC into an AND/OR, return the new node 3547 if (Tmp2.getNode() == 0) { 3548 Results.push_back(Tmp1); 3549 break; 3550 } 3551 3552 // Otherwise, SETCC for the given comparison type must be completely 3553 // illegal; expand it into a SELECT_CC. 3554 EVT VT = Node->getValueType(0); 3555 Tmp1 = DAG.getNode(ISD::SELECT_CC, dl, VT, Tmp1, Tmp2, 3556 DAG.getConstant(1, VT), DAG.getConstant(0, VT), Tmp3); 3557 Results.push_back(Tmp1); 3558 break; 3559 } 3560 case ISD::SELECT_CC: { 3561 Tmp1 = Node->getOperand(0); // LHS 3562 Tmp2 = Node->getOperand(1); // RHS 3563 Tmp3 = Node->getOperand(2); // True 3564 Tmp4 = Node->getOperand(3); // False 3565 SDValue CC = Node->getOperand(4); 3566 3567 LegalizeSetCCCondCode(TLI.getSetCCResultType(Tmp1.getValueType()), 3568 Tmp1, Tmp2, CC, dl); 3569 3570 assert(!Tmp2.getNode() && "Can't legalize SELECT_CC with legal condition!"); 3571 Tmp2 = DAG.getConstant(0, Tmp1.getValueType()); 3572 CC = DAG.getCondCode(ISD::SETNE); 3573 Tmp1 = DAG.getNode(ISD::SELECT_CC, dl, Node->getValueType(0), Tmp1, Tmp2, 3574 Tmp3, Tmp4, CC); 3575 Results.push_back(Tmp1); 3576 break; 3577 } 3578 case ISD::BR_CC: { 3579 Tmp1 = Node->getOperand(0); // Chain 3580 Tmp2 = Node->getOperand(2); // LHS 3581 Tmp3 = Node->getOperand(3); // RHS 3582 Tmp4 = Node->getOperand(1); // CC 3583 3584 LegalizeSetCCCondCode(TLI.getSetCCResultType(Tmp2.getValueType()), 3585 Tmp2, Tmp3, Tmp4, dl); 3586 assert(LastCALLSEQ.size() == 1 && "branch inside CALLSEQ_BEGIN/END?"); 3587 setLastCALLSEQ(DAG.getEntryNode()); 3588 3589 assert(!Tmp3.getNode() && "Can't legalize BR_CC with legal condition!"); 3590 Tmp3 = DAG.getConstant(0, Tmp2.getValueType()); 3591 Tmp4 = DAG.getCondCode(ISD::SETNE); 3592 Tmp1 = DAG.getNode(ISD::BR_CC, dl, Node->getValueType(0), Tmp1, Tmp4, Tmp2, 3593 Tmp3, Node->getOperand(4)); 3594 Results.push_back(Tmp1); 3595 break; 3596 } 3597 case ISD::GLOBAL_OFFSET_TABLE: 3598 case ISD::GlobalAddress: 3599 case ISD::GlobalTLSAddress: 3600 case ISD::ExternalSymbol: 3601 case ISD::ConstantPool: 3602 case ISD::JumpTable: 3603 case ISD::INTRINSIC_W_CHAIN: 3604 case ISD::INTRINSIC_WO_CHAIN: 3605 case ISD::INTRINSIC_VOID: 3606 // FIXME: Custom lowering for these operations shouldn't return null! 3607 for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i) 3608 Results.push_back(SDValue(Node, i)); 3609 break; 3610 } 3611} 3612void SelectionDAGLegalize::PromoteNode(SDNode *Node, 3613 SmallVectorImpl<SDValue> &Results) { 3614 EVT OVT = Node->getValueType(0); 3615 if (Node->getOpcode() == ISD::UINT_TO_FP || 3616 Node->getOpcode() == ISD::SINT_TO_FP || 3617 Node->getOpcode() == ISD::SETCC) { 3618 OVT = Node->getOperand(0).getValueType(); 3619 } 3620 EVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), OVT); 3621 DebugLoc dl = Node->getDebugLoc(); 3622 SDValue Tmp1, Tmp2, Tmp3; 3623 switch (Node->getOpcode()) { 3624 case ISD::CTTZ: 3625 case ISD::CTLZ: 3626 case ISD::CTPOP: 3627 // Zero extend the argument. 3628 Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, Node->getOperand(0)); 3629 // Perform the larger operation. 3630 Tmp1 = DAG.getNode(Node->getOpcode(), dl, NVT, Tmp1); 3631 if (Node->getOpcode() == ISD::CTTZ) { 3632 //if Tmp1 == sizeinbits(NVT) then Tmp1 = sizeinbits(Old VT) 3633 Tmp2 = DAG.getSetCC(dl, TLI.getSetCCResultType(NVT), 3634 Tmp1, DAG.getConstant(NVT.getSizeInBits(), NVT), 3635 ISD::SETEQ); 3636 Tmp1 = DAG.getNode(ISD::SELECT, dl, NVT, Tmp2, 3637 DAG.getConstant(OVT.getSizeInBits(), NVT), Tmp1); 3638 } else if (Node->getOpcode() == ISD::CTLZ) { 3639 // Tmp1 = Tmp1 - (sizeinbits(NVT) - sizeinbits(Old VT)) 3640 Tmp1 = DAG.getNode(ISD::SUB, dl, NVT, Tmp1, 3641 DAG.getConstant(NVT.getSizeInBits() - 3642 OVT.getSizeInBits(), NVT)); 3643 } 3644 Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, OVT, Tmp1)); 3645 break; 3646 case ISD::BSWAP: { 3647 unsigned DiffBits = NVT.getSizeInBits() - OVT.getSizeInBits(); 3648 Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, Node->getOperand(0)); 3649 Tmp1 = DAG.getNode(ISD::BSWAP, dl, NVT, Tmp1); 3650 Tmp1 = DAG.getNode(ISD::SRL, dl, NVT, Tmp1, 3651 DAG.getConstant(DiffBits, TLI.getShiftAmountTy(NVT))); 3652 Results.push_back(Tmp1); 3653 break; 3654 } 3655 case ISD::FP_TO_UINT: 3656 case ISD::FP_TO_SINT: 3657 Tmp1 = PromoteLegalFP_TO_INT(Node->getOperand(0), Node->getValueType(0), 3658 Node->getOpcode() == ISD::FP_TO_SINT, dl); 3659 Results.push_back(Tmp1); 3660 break; 3661 case ISD::UINT_TO_FP: 3662 case ISD::SINT_TO_FP: 3663 Tmp1 = PromoteLegalINT_TO_FP(Node->getOperand(0), Node->getValueType(0), 3664 Node->getOpcode() == ISD::SINT_TO_FP, dl); 3665 Results.push_back(Tmp1); 3666 break; 3667 case ISD::AND: 3668 case ISD::OR: 3669 case ISD::XOR: { 3670 unsigned ExtOp, TruncOp; 3671 if (OVT.isVector()) { 3672 ExtOp = ISD::BITCAST; 3673 TruncOp = ISD::BITCAST; 3674 } else { 3675 assert(OVT.isInteger() && "Cannot promote logic operation"); 3676 ExtOp = ISD::ANY_EXTEND; 3677 TruncOp = ISD::TRUNCATE; 3678 } 3679 // Promote each of the values to the new type. 3680 Tmp1 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(0)); 3681 Tmp2 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(1)); 3682 // Perform the larger operation, then convert back 3683 Tmp1 = DAG.getNode(Node->getOpcode(), dl, NVT, Tmp1, Tmp2); 3684 Results.push_back(DAG.getNode(TruncOp, dl, OVT, Tmp1)); 3685 break; 3686 } 3687 case ISD::SELECT: { 3688 unsigned ExtOp, TruncOp; 3689 if (Node->getValueType(0).isVector()) { 3690 ExtOp = ISD::BITCAST; 3691 TruncOp = ISD::BITCAST; 3692 } else if (Node->getValueType(0).isInteger()) { 3693 ExtOp = ISD::ANY_EXTEND; 3694 TruncOp = ISD::TRUNCATE; 3695 } else { 3696 ExtOp = ISD::FP_EXTEND; 3697 TruncOp = ISD::FP_ROUND; 3698 } 3699 Tmp1 = Node->getOperand(0); 3700 // Promote each of the values to the new type. 3701 Tmp2 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(1)); 3702 Tmp3 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(2)); 3703 // Perform the larger operation, then round down. 3704 Tmp1 = DAG.getNode(ISD::SELECT, dl, NVT, Tmp1, Tmp2, Tmp3); 3705 if (TruncOp != ISD::FP_ROUND) 3706 Tmp1 = DAG.getNode(TruncOp, dl, Node->getValueType(0), Tmp1); 3707 else 3708 Tmp1 = DAG.getNode(TruncOp, dl, Node->getValueType(0), Tmp1, 3709 DAG.getIntPtrConstant(0)); 3710 Results.push_back(Tmp1); 3711 break; 3712 } 3713 case ISD::VECTOR_SHUFFLE: { 3714 SmallVector<int, 8> Mask; 3715 cast<ShuffleVectorSDNode>(Node)->getMask(Mask); 3716 3717 // Cast the two input vectors. 3718 Tmp1 = DAG.getNode(ISD::BITCAST, dl, NVT, Node->getOperand(0)); 3719 Tmp2 = DAG.getNode(ISD::BITCAST, dl, NVT, Node->getOperand(1)); 3720 3721 // Convert the shuffle mask to the right # elements. 3722 Tmp1 = ShuffleWithNarrowerEltType(NVT, OVT, dl, Tmp1, Tmp2, Mask); 3723 Tmp1 = DAG.getNode(ISD::BITCAST, dl, OVT, Tmp1); 3724 Results.push_back(Tmp1); 3725 break; 3726 } 3727 case ISD::SETCC: { 3728 unsigned ExtOp = ISD::FP_EXTEND; 3729 if (NVT.isInteger()) { 3730 ISD::CondCode CCCode = 3731 cast<CondCodeSDNode>(Node->getOperand(2))->get(); 3732 ExtOp = isSignedIntSetCC(CCCode) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 3733 } 3734 Tmp1 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(0)); 3735 Tmp2 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(1)); 3736 Results.push_back(DAG.getNode(ISD::SETCC, dl, Node->getValueType(0), 3737 Tmp1, Tmp2, Node->getOperand(2))); 3738 break; 3739 } 3740 } 3741} 3742 3743// SelectionDAG::Legalize - This is the entry point for the file. 3744// 3745void SelectionDAG::Legalize() { 3746 /// run - This is the main entry point to this class. 3747 /// 3748 SelectionDAGLegalize(*this).LegalizeDAG(); 3749} 3750