LegalizeDAG.cpp revision b3dbd4a0b61d9c26df358384bdb66a41326f1a9d
1//===-- LegalizeDAG.cpp - Implement SelectionDAG::Legalize ----------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the SelectionDAG::Legalize method. 11// 12//===----------------------------------------------------------------------===// 13 14#include "llvm/Analysis/DebugInfo.h" 15#include "llvm/CodeGen/Analysis.h" 16#include "llvm/CodeGen/MachineFunction.h" 17#include "llvm/CodeGen/MachineJumpTableInfo.h" 18#include "llvm/CodeGen/SelectionDAG.h" 19#include "llvm/Target/TargetFrameLowering.h" 20#include "llvm/Target/TargetLowering.h" 21#include "llvm/Target/TargetData.h" 22#include "llvm/Target/TargetMachine.h" 23#include "llvm/CallingConv.h" 24#include "llvm/Constants.h" 25#include "llvm/DerivedTypes.h" 26#include "llvm/LLVMContext.h" 27#include "llvm/Support/Debug.h" 28#include "llvm/Support/ErrorHandling.h" 29#include "llvm/Support/MathExtras.h" 30#include "llvm/Support/raw_ostream.h" 31#include "llvm/ADT/DenseMap.h" 32#include "llvm/ADT/SmallVector.h" 33#include "llvm/ADT/SmallPtrSet.h" 34using namespace llvm; 35 36//===----------------------------------------------------------------------===// 37/// SelectionDAGLegalize - This takes an arbitrary SelectionDAG as input and 38/// hacks on it until the target machine can handle it. This involves 39/// eliminating value sizes the machine cannot handle (promoting small sizes to 40/// large sizes or splitting up large values into small values) as well as 41/// eliminating operations the machine cannot handle. 42/// 43/// This code also does a small amount of optimization and recognition of idioms 44/// as part of its processing. For example, if a target does not support a 45/// 'setcc' instruction efficiently, but does support 'brcc' instruction, this 46/// will attempt merge setcc and brc instructions into brcc's. 47/// 48namespace { 49class SelectionDAGLegalize { 50 const TargetMachine &TM; 51 const TargetLowering &TLI; 52 SelectionDAG &DAG; 53 CodeGenOpt::Level OptLevel; 54 55 // Libcall insertion helpers. 56 57 /// LastCALLSEQ - This keeps track of the CALLSEQ_END node that has been 58 /// legalized. We use this to ensure that calls are properly serialized 59 /// against each other, including inserted libcalls. 60 SmallVector<SDValue, 8> LastCALLSEQ; 61 62 enum LegalizeAction { 63 Legal, // The target natively supports this operation. 64 Promote, // This operation should be executed in a larger type. 65 Expand // Try to expand this to other ops, otherwise use a libcall. 66 }; 67 68 /// ValueTypeActions - This is a bitvector that contains two bits for each 69 /// value type, where the two bits correspond to the LegalizeAction enum. 70 /// This can be queried with "getTypeAction(VT)". 71 TargetLowering::ValueTypeActionImpl ValueTypeActions; 72 73 /// LegalizedNodes - For nodes that are of legal width, and that have more 74 /// than one use, this map indicates what regularized operand to use. This 75 /// allows us to avoid legalizing the same thing more than once. 76 DenseMap<SDValue, SDValue> LegalizedNodes; 77 78 void AddLegalizedOperand(SDValue From, SDValue To) { 79 LegalizedNodes.insert(std::make_pair(From, To)); 80 // If someone requests legalization of the new node, return itself. 81 if (From != To) 82 LegalizedNodes.insert(std::make_pair(To, To)); 83 84 // Transfer SDDbgValues. 85 DAG.TransferDbgValues(From, To); 86 } 87 88public: 89 SelectionDAGLegalize(SelectionDAG &DAG, CodeGenOpt::Level ol); 90 91 /// getTypeAction - Return how we should legalize values of this type, either 92 /// it is already legal or we need to expand it into multiple registers of 93 /// smaller integer type, or we need to promote it to a larger type. 94 LegalizeAction getTypeAction(EVT VT) const { 95 return (LegalizeAction)ValueTypeActions.getTypeAction(VT); 96 } 97 98 /// isTypeLegal - Return true if this type is legal on this target. 99 /// 100 bool isTypeLegal(EVT VT) const { 101 return getTypeAction(VT) == Legal; 102 } 103 104 void LegalizeDAG(); 105 106private: 107 /// LegalizeOp - We know that the specified value has a legal type. 108 /// Recursively ensure that the operands have legal types, then return the 109 /// result. 110 SDValue LegalizeOp(SDValue O); 111 112 SDValue OptimizeFloatStore(StoreSDNode *ST); 113 114 /// PerformInsertVectorEltInMemory - Some target cannot handle a variable 115 /// insertion index for the INSERT_VECTOR_ELT instruction. In this case, it 116 /// is necessary to spill the vector being inserted into to memory, perform 117 /// the insert there, and then read the result back. 118 SDValue PerformInsertVectorEltInMemory(SDValue Vec, SDValue Val, 119 SDValue Idx, DebugLoc dl); 120 SDValue ExpandINSERT_VECTOR_ELT(SDValue Vec, SDValue Val, 121 SDValue Idx, DebugLoc dl); 122 123 /// ShuffleWithNarrowerEltType - Return a vector shuffle operation which 124 /// performs the same shuffe in terms of order or result bytes, but on a type 125 /// whose vector element type is narrower than the original shuffle type. 126 /// e.g. <v4i32> <0, 1, 0, 1> -> v8i16 <0, 1, 2, 3, 0, 1, 2, 3> 127 SDValue ShuffleWithNarrowerEltType(EVT NVT, EVT VT, DebugLoc dl, 128 SDValue N1, SDValue N2, 129 SmallVectorImpl<int> &Mask) const; 130 131 bool LegalizeAllNodesNotLeadingTo(SDNode *N, SDNode *Dest, 132 SmallPtrSet<SDNode*, 32> &NodesLeadingTo); 133 134 void LegalizeSetCCCondCode(EVT VT, SDValue &LHS, SDValue &RHS, SDValue &CC, 135 DebugLoc dl); 136 137 SDValue ExpandLibCall(RTLIB::Libcall LC, SDNode *Node, bool isSigned); 138 SDValue ExpandLibCall(RTLIB::Libcall LC, EVT RetVT, const SDValue *Ops, 139 unsigned NumOps, bool isSigned, DebugLoc dl); 140 141 std::pair<SDValue, SDValue> ExpandChainLibCall(RTLIB::Libcall LC, 142 SDNode *Node, bool isSigned); 143 SDValue ExpandFPLibCall(SDNode *Node, RTLIB::Libcall Call_F32, 144 RTLIB::Libcall Call_F64, RTLIB::Libcall Call_F80, 145 RTLIB::Libcall Call_PPCF128); 146 SDValue ExpandIntLibCall(SDNode *Node, bool isSigned, 147 RTLIB::Libcall Call_I8, 148 RTLIB::Libcall Call_I16, 149 RTLIB::Libcall Call_I32, 150 RTLIB::Libcall Call_I64, 151 RTLIB::Libcall Call_I128); 152 void ExpandDivRemLibCall(SDNode *Node, SmallVectorImpl<SDValue> &Results); 153 154 SDValue EmitStackConvert(SDValue SrcOp, EVT SlotVT, EVT DestVT, DebugLoc dl); 155 SDValue ExpandBUILD_VECTOR(SDNode *Node); 156 SDValue ExpandSCALAR_TO_VECTOR(SDNode *Node); 157 void ExpandDYNAMIC_STACKALLOC(SDNode *Node, 158 SmallVectorImpl<SDValue> &Results); 159 SDValue ExpandFCOPYSIGN(SDNode *Node); 160 SDValue ExpandLegalINT_TO_FP(bool isSigned, SDValue LegalOp, EVT DestVT, 161 DebugLoc dl); 162 SDValue PromoteLegalINT_TO_FP(SDValue LegalOp, EVT DestVT, bool isSigned, 163 DebugLoc dl); 164 SDValue PromoteLegalFP_TO_INT(SDValue LegalOp, EVT DestVT, bool isSigned, 165 DebugLoc dl); 166 167 SDValue ExpandBSWAP(SDValue Op, DebugLoc dl); 168 SDValue ExpandBitCount(unsigned Opc, SDValue Op, DebugLoc dl); 169 170 SDValue ExpandExtractFromVectorThroughStack(SDValue Op); 171 SDValue ExpandInsertToVectorThroughStack(SDValue Op); 172 SDValue ExpandVectorBuildThroughStack(SDNode* Node); 173 174 std::pair<SDValue, SDValue> ExpandAtomic(SDNode *Node); 175 176 void ExpandNode(SDNode *Node, SmallVectorImpl<SDValue> &Results); 177 void PromoteNode(SDNode *Node, SmallVectorImpl<SDValue> &Results); 178 179 SDValue getLastCALLSEQ() { return LastCALLSEQ.back(); } 180 void setLastCALLSEQ(const SDValue s) { LastCALLSEQ.back() = s; } 181 void pushLastCALLSEQ(SDValue s) { 182 LastCALLSEQ.push_back(s); 183 } 184 void popLastCALLSEQ() { 185 LastCALLSEQ.pop_back(); 186 } 187}; 188} 189 190/// ShuffleWithNarrowerEltType - Return a vector shuffle operation which 191/// performs the same shuffe in terms of order or result bytes, but on a type 192/// whose vector element type is narrower than the original shuffle type. 193/// e.g. <v4i32> <0, 1, 0, 1> -> v8i16 <0, 1, 2, 3, 0, 1, 2, 3> 194SDValue 195SelectionDAGLegalize::ShuffleWithNarrowerEltType(EVT NVT, EVT VT, DebugLoc dl, 196 SDValue N1, SDValue N2, 197 SmallVectorImpl<int> &Mask) const { 198 unsigned NumMaskElts = VT.getVectorNumElements(); 199 unsigned NumDestElts = NVT.getVectorNumElements(); 200 unsigned NumEltsGrowth = NumDestElts / NumMaskElts; 201 202 assert(NumEltsGrowth && "Cannot promote to vector type with fewer elts!"); 203 204 if (NumEltsGrowth == 1) 205 return DAG.getVectorShuffle(NVT, dl, N1, N2, &Mask[0]); 206 207 SmallVector<int, 8> NewMask; 208 for (unsigned i = 0; i != NumMaskElts; ++i) { 209 int Idx = Mask[i]; 210 for (unsigned j = 0; j != NumEltsGrowth; ++j) { 211 if (Idx < 0) 212 NewMask.push_back(-1); 213 else 214 NewMask.push_back(Idx * NumEltsGrowth + j); 215 } 216 } 217 assert(NewMask.size() == NumDestElts && "Non-integer NumEltsGrowth?"); 218 assert(TLI.isShuffleMaskLegal(NewMask, NVT) && "Shuffle not legal?"); 219 return DAG.getVectorShuffle(NVT, dl, N1, N2, &NewMask[0]); 220} 221 222SelectionDAGLegalize::SelectionDAGLegalize(SelectionDAG &dag, 223 CodeGenOpt::Level ol) 224 : TM(dag.getTarget()), TLI(dag.getTargetLoweringInfo()), 225 DAG(dag), OptLevel(ol), 226 ValueTypeActions(TLI.getValueTypeActions()) { 227 assert(MVT::LAST_VALUETYPE <= MVT::MAX_ALLOWED_VALUETYPE && 228 "Too many value types for ValueTypeActions to hold!"); 229} 230 231void SelectionDAGLegalize::LegalizeDAG() { 232 pushLastCALLSEQ(DAG.getEntryNode()); 233 234 // The legalize process is inherently a bottom-up recursive process (users 235 // legalize their uses before themselves). Given infinite stack space, we 236 // could just start legalizing on the root and traverse the whole graph. In 237 // practice however, this causes us to run out of stack space on large basic 238 // blocks. To avoid this problem, compute an ordering of the nodes where each 239 // node is only legalized after all of its operands are legalized. 240 DAG.AssignTopologicalOrder(); 241 for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(), 242 E = prior(DAG.allnodes_end()); I != llvm::next(E); ++I) 243 LegalizeOp(SDValue(I, 0)); 244 245 // Finally, it's possible the root changed. Get the new root. 246 SDValue OldRoot = DAG.getRoot(); 247 assert(LegalizedNodes.count(OldRoot) && "Root didn't get legalized?"); 248 DAG.setRoot(LegalizedNodes[OldRoot]); 249 250 LegalizedNodes.clear(); 251 252 // Remove dead nodes now. 253 DAG.RemoveDeadNodes(); 254} 255 256 257/// FindCallEndFromCallStart - Given a chained node that is part of a call 258/// sequence, find the CALLSEQ_END node that terminates the call sequence. 259static SDNode *FindCallEndFromCallStart(SDNode *Node, int depth = 0) { 260 int next_depth = depth; 261 if (Node->getOpcode() == ISD::CALLSEQ_START) 262 next_depth = depth + 1; 263 if (Node->getOpcode() == ISD::CALLSEQ_END) { 264 assert(depth > 0 && "negative depth!"); 265 if (depth == 1) 266 return Node; 267 else 268 next_depth = depth - 1; 269 } 270 if (Node->use_empty()) 271 return 0; // No CallSeqEnd 272 273 // The chain is usually at the end. 274 SDValue TheChain(Node, Node->getNumValues()-1); 275 if (TheChain.getValueType() != MVT::Other) { 276 // Sometimes it's at the beginning. 277 TheChain = SDValue(Node, 0); 278 if (TheChain.getValueType() != MVT::Other) { 279 // Otherwise, hunt for it. 280 for (unsigned i = 1, e = Node->getNumValues(); i != e; ++i) 281 if (Node->getValueType(i) == MVT::Other) { 282 TheChain = SDValue(Node, i); 283 break; 284 } 285 286 // Otherwise, we walked into a node without a chain. 287 if (TheChain.getValueType() != MVT::Other) 288 return 0; 289 } 290 } 291 292 for (SDNode::use_iterator UI = Node->use_begin(), 293 E = Node->use_end(); UI != E; ++UI) { 294 295 // Make sure to only follow users of our token chain. 296 SDNode *User = *UI; 297 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) 298 if (User->getOperand(i) == TheChain) 299 if (SDNode *Result = FindCallEndFromCallStart(User, next_depth)) 300 return Result; 301 } 302 return 0; 303} 304 305/// FindCallStartFromCallEnd - Given a chained node that is part of a call 306/// sequence, find the CALLSEQ_START node that initiates the call sequence. 307static SDNode *FindCallStartFromCallEnd(SDNode *Node) { 308 int nested = 0; 309 assert(Node && "Didn't find callseq_start for a call??"); 310 while (Node->getOpcode() != ISD::CALLSEQ_START || nested) { 311 Node = Node->getOperand(0).getNode(); 312 assert(Node->getOperand(0).getValueType() == MVT::Other && 313 "Node doesn't have a token chain argument!"); 314 switch (Node->getOpcode()) { 315 default: 316 break; 317 case ISD::CALLSEQ_START: 318 if (!nested) 319 return Node; 320 Node = Node->getOperand(0).getNode(); 321 nested--; 322 break; 323 case ISD::CALLSEQ_END: 324 nested++; 325 break; 326 } 327 } 328 return (Node->getOpcode() == ISD::CALLSEQ_START) ? Node : 0; 329} 330 331/// LegalizeAllNodesNotLeadingTo - Recursively walk the uses of N, looking to 332/// see if any uses can reach Dest. If no dest operands can get to dest, 333/// legalize them, legalize ourself, and return false, otherwise, return true. 334/// 335/// Keep track of the nodes we fine that actually do lead to Dest in 336/// NodesLeadingTo. This avoids retraversing them exponential number of times. 337/// 338bool SelectionDAGLegalize::LegalizeAllNodesNotLeadingTo(SDNode *N, SDNode *Dest, 339 SmallPtrSet<SDNode*, 32> &NodesLeadingTo) { 340 if (N == Dest) return true; // N certainly leads to Dest :) 341 342 // If we've already processed this node and it does lead to Dest, there is no 343 // need to reprocess it. 344 if (NodesLeadingTo.count(N)) return true; 345 346 // If the first result of this node has been already legalized, then it cannot 347 // reach N. 348 if (LegalizedNodes.count(SDValue(N, 0))) return false; 349 350 // Okay, this node has not already been legalized. Check and legalize all 351 // operands. If none lead to Dest, then we can legalize this node. 352 bool OperandsLeadToDest = false; 353 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) 354 OperandsLeadToDest |= // If an operand leads to Dest, so do we. 355 LegalizeAllNodesNotLeadingTo(N->getOperand(i).getNode(), Dest, 356 NodesLeadingTo); 357 358 if (OperandsLeadToDest) { 359 NodesLeadingTo.insert(N); 360 return true; 361 } 362 363 // Okay, this node looks safe, legalize it and return false. 364 LegalizeOp(SDValue(N, 0)); 365 return false; 366} 367 368/// ExpandConstantFP - Expands the ConstantFP node to an integer constant or 369/// a load from the constant pool. 370static SDValue ExpandConstantFP(ConstantFPSDNode *CFP, bool UseCP, 371 SelectionDAG &DAG, const TargetLowering &TLI) { 372 bool Extend = false; 373 DebugLoc dl = CFP->getDebugLoc(); 374 375 // If a FP immediate is precise when represented as a float and if the 376 // target can do an extending load from float to double, we put it into 377 // the constant pool as a float, even if it's is statically typed as a 378 // double. This shrinks FP constants and canonicalizes them for targets where 379 // an FP extending load is the same cost as a normal load (such as on the x87 380 // fp stack or PPC FP unit). 381 EVT VT = CFP->getValueType(0); 382 ConstantFP *LLVMC = const_cast<ConstantFP*>(CFP->getConstantFPValue()); 383 if (!UseCP) { 384 assert((VT == MVT::f64 || VT == MVT::f32) && "Invalid type expansion"); 385 return DAG.getConstant(LLVMC->getValueAPF().bitcastToAPInt(), 386 (VT == MVT::f64) ? MVT::i64 : MVT::i32); 387 } 388 389 EVT OrigVT = VT; 390 EVT SVT = VT; 391 while (SVT != MVT::f32) { 392 SVT = (MVT::SimpleValueType)(SVT.getSimpleVT().SimpleTy - 1); 393 if (ConstantFPSDNode::isValueValidForType(SVT, CFP->getValueAPF()) && 394 // Only do this if the target has a native EXTLOAD instruction from 395 // smaller type. 396 TLI.isLoadExtLegal(ISD::EXTLOAD, SVT) && 397 TLI.ShouldShrinkFPConstant(OrigVT)) { 398 const Type *SType = SVT.getTypeForEVT(*DAG.getContext()); 399 LLVMC = cast<ConstantFP>(ConstantExpr::getFPTrunc(LLVMC, SType)); 400 VT = SVT; 401 Extend = true; 402 } 403 } 404 405 SDValue CPIdx = DAG.getConstantPool(LLVMC, TLI.getPointerTy()); 406 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment(); 407 if (Extend) 408 return DAG.getExtLoad(ISD::EXTLOAD, dl, OrigVT, 409 DAG.getEntryNode(), 410 CPIdx, MachinePointerInfo::getConstantPool(), 411 VT, false, false, Alignment); 412 return DAG.getLoad(OrigVT, dl, DAG.getEntryNode(), CPIdx, 413 MachinePointerInfo::getConstantPool(), false, false, 414 Alignment); 415} 416 417/// ExpandUnalignedStore - Expands an unaligned store to 2 half-size stores. 418static 419SDValue ExpandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG, 420 const TargetLowering &TLI) { 421 SDValue Chain = ST->getChain(); 422 SDValue Ptr = ST->getBasePtr(); 423 SDValue Val = ST->getValue(); 424 EVT VT = Val.getValueType(); 425 int Alignment = ST->getAlignment(); 426 DebugLoc dl = ST->getDebugLoc(); 427 if (ST->getMemoryVT().isFloatingPoint() || 428 ST->getMemoryVT().isVector()) { 429 EVT intVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits()); 430 if (TLI.isTypeLegal(intVT)) { 431 // Expand to a bitconvert of the value to the integer type of the 432 // same size, then a (misaligned) int store. 433 // FIXME: Does not handle truncating floating point stores! 434 SDValue Result = DAG.getNode(ISD::BITCAST, dl, intVT, Val); 435 return DAG.getStore(Chain, dl, Result, Ptr, ST->getPointerInfo(), 436 ST->isVolatile(), ST->isNonTemporal(), Alignment); 437 } else { 438 // Do a (aligned) store to a stack slot, then copy from the stack slot 439 // to the final destination using (unaligned) integer loads and stores. 440 EVT StoredVT = ST->getMemoryVT(); 441 EVT RegVT = 442 TLI.getRegisterType(*DAG.getContext(), 443 EVT::getIntegerVT(*DAG.getContext(), 444 StoredVT.getSizeInBits())); 445 unsigned StoredBytes = StoredVT.getSizeInBits() / 8; 446 unsigned RegBytes = RegVT.getSizeInBits() / 8; 447 unsigned NumRegs = (StoredBytes + RegBytes - 1) / RegBytes; 448 449 // Make sure the stack slot is also aligned for the register type. 450 SDValue StackPtr = DAG.CreateStackTemporary(StoredVT, RegVT); 451 452 // Perform the original store, only redirected to the stack slot. 453 SDValue Store = DAG.getTruncStore(Chain, dl, 454 Val, StackPtr, MachinePointerInfo(), 455 StoredVT, false, false, 0); 456 SDValue Increment = DAG.getConstant(RegBytes, TLI.getPointerTy()); 457 SmallVector<SDValue, 8> Stores; 458 unsigned Offset = 0; 459 460 // Do all but one copies using the full register width. 461 for (unsigned i = 1; i < NumRegs; i++) { 462 // Load one integer register's worth from the stack slot. 463 SDValue Load = DAG.getLoad(RegVT, dl, Store, StackPtr, 464 MachinePointerInfo(), 465 false, false, 0); 466 // Store it to the final location. Remember the store. 467 Stores.push_back(DAG.getStore(Load.getValue(1), dl, Load, Ptr, 468 ST->getPointerInfo().getWithOffset(Offset), 469 ST->isVolatile(), ST->isNonTemporal(), 470 MinAlign(ST->getAlignment(), Offset))); 471 // Increment the pointers. 472 Offset += RegBytes; 473 StackPtr = DAG.getNode(ISD::ADD, dl, StackPtr.getValueType(), StackPtr, 474 Increment); 475 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 476 } 477 478 // The last store may be partial. Do a truncating store. On big-endian 479 // machines this requires an extending load from the stack slot to ensure 480 // that the bits are in the right place. 481 EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), 482 8 * (StoredBytes - Offset)); 483 484 // Load from the stack slot. 485 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Store, StackPtr, 486 MachinePointerInfo(), 487 MemVT, false, false, 0); 488 489 Stores.push_back(DAG.getTruncStore(Load.getValue(1), dl, Load, Ptr, 490 ST->getPointerInfo() 491 .getWithOffset(Offset), 492 MemVT, ST->isVolatile(), 493 ST->isNonTemporal(), 494 MinAlign(ST->getAlignment(), Offset))); 495 // The order of the stores doesn't matter - say it with a TokenFactor. 496 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Stores[0], 497 Stores.size()); 498 } 499 } 500 assert(ST->getMemoryVT().isInteger() && 501 !ST->getMemoryVT().isVector() && 502 "Unaligned store of unknown type."); 503 // Get the half-size VT 504 EVT NewStoredVT = ST->getMemoryVT().getHalfSizedIntegerVT(*DAG.getContext()); 505 int NumBits = NewStoredVT.getSizeInBits(); 506 int IncrementSize = NumBits / 8; 507 508 // Divide the stored value in two parts. 509 SDValue ShiftAmount = DAG.getConstant(NumBits, 510 TLI.getShiftAmountTy(Val.getValueType())); 511 SDValue Lo = Val; 512 SDValue Hi = DAG.getNode(ISD::SRL, dl, VT, Val, ShiftAmount); 513 514 // Store the two parts 515 SDValue Store1, Store2; 516 Store1 = DAG.getTruncStore(Chain, dl, TLI.isLittleEndian()?Lo:Hi, Ptr, 517 ST->getPointerInfo(), NewStoredVT, 518 ST->isVolatile(), ST->isNonTemporal(), Alignment); 519 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, 520 DAG.getConstant(IncrementSize, TLI.getPointerTy())); 521 Alignment = MinAlign(Alignment, IncrementSize); 522 Store2 = DAG.getTruncStore(Chain, dl, TLI.isLittleEndian()?Hi:Lo, Ptr, 523 ST->getPointerInfo().getWithOffset(IncrementSize), 524 NewStoredVT, ST->isVolatile(), ST->isNonTemporal(), 525 Alignment); 526 527 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Store1, Store2); 528} 529 530/// ExpandUnalignedLoad - Expands an unaligned load to 2 half-size loads. 531static 532SDValue ExpandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG, 533 const TargetLowering &TLI) { 534 SDValue Chain = LD->getChain(); 535 SDValue Ptr = LD->getBasePtr(); 536 EVT VT = LD->getValueType(0); 537 EVT LoadedVT = LD->getMemoryVT(); 538 DebugLoc dl = LD->getDebugLoc(); 539 if (VT.isFloatingPoint() || VT.isVector()) { 540 EVT intVT = EVT::getIntegerVT(*DAG.getContext(), LoadedVT.getSizeInBits()); 541 if (TLI.isTypeLegal(intVT)) { 542 // Expand to a (misaligned) integer load of the same size, 543 // then bitconvert to floating point or vector. 544 SDValue newLoad = DAG.getLoad(intVT, dl, Chain, Ptr, LD->getPointerInfo(), 545 LD->isVolatile(), 546 LD->isNonTemporal(), LD->getAlignment()); 547 SDValue Result = DAG.getNode(ISD::BITCAST, dl, LoadedVT, newLoad); 548 if (VT.isFloatingPoint() && LoadedVT != VT) 549 Result = DAG.getNode(ISD::FP_EXTEND, dl, VT, Result); 550 551 SDValue Ops[] = { Result, Chain }; 552 return DAG.getMergeValues(Ops, 2, dl); 553 } 554 555 // Copy the value to a (aligned) stack slot using (unaligned) integer 556 // loads and stores, then do a (aligned) load from the stack slot. 557 EVT RegVT = TLI.getRegisterType(*DAG.getContext(), intVT); 558 unsigned LoadedBytes = LoadedVT.getSizeInBits() / 8; 559 unsigned RegBytes = RegVT.getSizeInBits() / 8; 560 unsigned NumRegs = (LoadedBytes + RegBytes - 1) / RegBytes; 561 562 // Make sure the stack slot is also aligned for the register type. 563 SDValue StackBase = DAG.CreateStackTemporary(LoadedVT, RegVT); 564 565 SDValue Increment = DAG.getConstant(RegBytes, TLI.getPointerTy()); 566 SmallVector<SDValue, 8> Stores; 567 SDValue StackPtr = StackBase; 568 unsigned Offset = 0; 569 570 // Do all but one copies using the full register width. 571 for (unsigned i = 1; i < NumRegs; i++) { 572 // Load one integer register's worth from the original location. 573 SDValue Load = DAG.getLoad(RegVT, dl, Chain, Ptr, 574 LD->getPointerInfo().getWithOffset(Offset), 575 LD->isVolatile(), LD->isNonTemporal(), 576 MinAlign(LD->getAlignment(), Offset)); 577 // Follow the load with a store to the stack slot. Remember the store. 578 Stores.push_back(DAG.getStore(Load.getValue(1), dl, Load, StackPtr, 579 MachinePointerInfo(), false, false, 0)); 580 // Increment the pointers. 581 Offset += RegBytes; 582 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 583 StackPtr = DAG.getNode(ISD::ADD, dl, StackPtr.getValueType(), StackPtr, 584 Increment); 585 } 586 587 // The last copy may be partial. Do an extending load. 588 EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), 589 8 * (LoadedBytes - Offset)); 590 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Chain, Ptr, 591 LD->getPointerInfo().getWithOffset(Offset), 592 MemVT, LD->isVolatile(), 593 LD->isNonTemporal(), 594 MinAlign(LD->getAlignment(), Offset)); 595 // Follow the load with a store to the stack slot. Remember the store. 596 // On big-endian machines this requires a truncating store to ensure 597 // that the bits end up in the right place. 598 Stores.push_back(DAG.getTruncStore(Load.getValue(1), dl, Load, StackPtr, 599 MachinePointerInfo(), MemVT, 600 false, false, 0)); 601 602 // The order of the stores doesn't matter - say it with a TokenFactor. 603 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Stores[0], 604 Stores.size()); 605 606 // Finally, perform the original load only redirected to the stack slot. 607 Load = DAG.getExtLoad(LD->getExtensionType(), dl, VT, TF, StackBase, 608 MachinePointerInfo(), LoadedVT, false, false, 0); 609 610 // Callers expect a MERGE_VALUES node. 611 SDValue Ops[] = { Load, TF }; 612 return DAG.getMergeValues(Ops, 2, dl); 613 } 614 assert(LoadedVT.isInteger() && !LoadedVT.isVector() && 615 "Unaligned load of unsupported type."); 616 617 // Compute the new VT that is half the size of the old one. This is an 618 // integer MVT. 619 unsigned NumBits = LoadedVT.getSizeInBits(); 620 EVT NewLoadedVT; 621 NewLoadedVT = EVT::getIntegerVT(*DAG.getContext(), NumBits/2); 622 NumBits >>= 1; 623 624 unsigned Alignment = LD->getAlignment(); 625 unsigned IncrementSize = NumBits / 8; 626 ISD::LoadExtType HiExtType = LD->getExtensionType(); 627 628 // If the original load is NON_EXTLOAD, the hi part load must be ZEXTLOAD. 629 if (HiExtType == ISD::NON_EXTLOAD) 630 HiExtType = ISD::ZEXTLOAD; 631 632 // Load the value in two parts 633 SDValue Lo, Hi; 634 if (TLI.isLittleEndian()) { 635 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, LD->getPointerInfo(), 636 NewLoadedVT, LD->isVolatile(), 637 LD->isNonTemporal(), Alignment); 638 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, 639 DAG.getConstant(IncrementSize, TLI.getPointerTy())); 640 Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, 641 LD->getPointerInfo().getWithOffset(IncrementSize), 642 NewLoadedVT, LD->isVolatile(), 643 LD->isNonTemporal(), MinAlign(Alignment,IncrementSize)); 644 } else { 645 Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, LD->getPointerInfo(), 646 NewLoadedVT, LD->isVolatile(), 647 LD->isNonTemporal(), Alignment); 648 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, 649 DAG.getConstant(IncrementSize, TLI.getPointerTy())); 650 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, 651 LD->getPointerInfo().getWithOffset(IncrementSize), 652 NewLoadedVT, LD->isVolatile(), 653 LD->isNonTemporal(), MinAlign(Alignment,IncrementSize)); 654 } 655 656 // aggregate the two parts 657 SDValue ShiftAmount = DAG.getConstant(NumBits, 658 TLI.getShiftAmountTy(Hi.getValueType())); 659 SDValue Result = DAG.getNode(ISD::SHL, dl, VT, Hi, ShiftAmount); 660 Result = DAG.getNode(ISD::OR, dl, VT, Result, Lo); 661 662 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1), 663 Hi.getValue(1)); 664 665 SDValue Ops[] = { Result, TF }; 666 return DAG.getMergeValues(Ops, 2, dl); 667} 668 669/// PerformInsertVectorEltInMemory - Some target cannot handle a variable 670/// insertion index for the INSERT_VECTOR_ELT instruction. In this case, it 671/// is necessary to spill the vector being inserted into to memory, perform 672/// the insert there, and then read the result back. 673SDValue SelectionDAGLegalize:: 674PerformInsertVectorEltInMemory(SDValue Vec, SDValue Val, SDValue Idx, 675 DebugLoc dl) { 676 SDValue Tmp1 = Vec; 677 SDValue Tmp2 = Val; 678 SDValue Tmp3 = Idx; 679 680 // If the target doesn't support this, we have to spill the input vector 681 // to a temporary stack slot, update the element, then reload it. This is 682 // badness. We could also load the value into a vector register (either 683 // with a "move to register" or "extload into register" instruction, then 684 // permute it into place, if the idx is a constant and if the idx is 685 // supported by the target. 686 EVT VT = Tmp1.getValueType(); 687 EVT EltVT = VT.getVectorElementType(); 688 EVT IdxVT = Tmp3.getValueType(); 689 EVT PtrVT = TLI.getPointerTy(); 690 SDValue StackPtr = DAG.CreateStackTemporary(VT); 691 692 int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex(); 693 694 // Store the vector. 695 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, Tmp1, StackPtr, 696 MachinePointerInfo::getFixedStack(SPFI), 697 false, false, 0); 698 699 // Truncate or zero extend offset to target pointer type. 700 unsigned CastOpc = IdxVT.bitsGT(PtrVT) ? ISD::TRUNCATE : ISD::ZERO_EXTEND; 701 Tmp3 = DAG.getNode(CastOpc, dl, PtrVT, Tmp3); 702 // Add the offset to the index. 703 unsigned EltSize = EltVT.getSizeInBits()/8; 704 Tmp3 = DAG.getNode(ISD::MUL, dl, IdxVT, Tmp3,DAG.getConstant(EltSize, IdxVT)); 705 SDValue StackPtr2 = DAG.getNode(ISD::ADD, dl, IdxVT, Tmp3, StackPtr); 706 // Store the scalar value. 707 Ch = DAG.getTruncStore(Ch, dl, Tmp2, StackPtr2, MachinePointerInfo(), EltVT, 708 false, false, 0); 709 // Load the updated vector. 710 return DAG.getLoad(VT, dl, Ch, StackPtr, 711 MachinePointerInfo::getFixedStack(SPFI), false, false, 0); 712} 713 714 715SDValue SelectionDAGLegalize:: 716ExpandINSERT_VECTOR_ELT(SDValue Vec, SDValue Val, SDValue Idx, DebugLoc dl) { 717 if (ConstantSDNode *InsertPos = dyn_cast<ConstantSDNode>(Idx)) { 718 // SCALAR_TO_VECTOR requires that the type of the value being inserted 719 // match the element type of the vector being created, except for 720 // integers in which case the inserted value can be over width. 721 EVT EltVT = Vec.getValueType().getVectorElementType(); 722 if (Val.getValueType() == EltVT || 723 (EltVT.isInteger() && Val.getValueType().bitsGE(EltVT))) { 724 SDValue ScVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 725 Vec.getValueType(), Val); 726 727 unsigned NumElts = Vec.getValueType().getVectorNumElements(); 728 // We generate a shuffle of InVec and ScVec, so the shuffle mask 729 // should be 0,1,2,3,4,5... with the appropriate element replaced with 730 // elt 0 of the RHS. 731 SmallVector<int, 8> ShufOps; 732 for (unsigned i = 0; i != NumElts; ++i) 733 ShufOps.push_back(i != InsertPos->getZExtValue() ? i : NumElts); 734 735 return DAG.getVectorShuffle(Vec.getValueType(), dl, Vec, ScVec, 736 &ShufOps[0]); 737 } 738 } 739 return PerformInsertVectorEltInMemory(Vec, Val, Idx, dl); 740} 741 742SDValue SelectionDAGLegalize::OptimizeFloatStore(StoreSDNode* ST) { 743 // Turn 'store float 1.0, Ptr' -> 'store int 0x12345678, Ptr' 744 // FIXME: We shouldn't do this for TargetConstantFP's. 745 // FIXME: move this to the DAG Combiner! Note that we can't regress due 746 // to phase ordering between legalized code and the dag combiner. This 747 // probably means that we need to integrate dag combiner and legalizer 748 // together. 749 // We generally can't do this one for long doubles. 750 SDValue Tmp1 = ST->getChain(); 751 SDValue Tmp2 = ST->getBasePtr(); 752 SDValue Tmp3; 753 unsigned Alignment = ST->getAlignment(); 754 bool isVolatile = ST->isVolatile(); 755 bool isNonTemporal = ST->isNonTemporal(); 756 DebugLoc dl = ST->getDebugLoc(); 757 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(ST->getValue())) { 758 if (CFP->getValueType(0) == MVT::f32 && 759 getTypeAction(MVT::i32) == Legal) { 760 Tmp3 = DAG.getConstant(CFP->getValueAPF(). 761 bitcastToAPInt().zextOrTrunc(32), 762 MVT::i32); 763 return DAG.getStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(), 764 isVolatile, isNonTemporal, Alignment); 765 } 766 767 if (CFP->getValueType(0) == MVT::f64) { 768 // If this target supports 64-bit registers, do a single 64-bit store. 769 if (getTypeAction(MVT::i64) == Legal) { 770 Tmp3 = DAG.getConstant(CFP->getValueAPF().bitcastToAPInt(). 771 zextOrTrunc(64), MVT::i64); 772 return DAG.getStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(), 773 isVolatile, isNonTemporal, Alignment); 774 } 775 776 if (getTypeAction(MVT::i32) == Legal && !ST->isVolatile()) { 777 // Otherwise, if the target supports 32-bit registers, use 2 32-bit 778 // stores. If the target supports neither 32- nor 64-bits, this 779 // xform is certainly not worth it. 780 const APInt &IntVal =CFP->getValueAPF().bitcastToAPInt(); 781 SDValue Lo = DAG.getConstant(IntVal.trunc(32), MVT::i32); 782 SDValue Hi = DAG.getConstant(IntVal.lshr(32).trunc(32), MVT::i32); 783 if (TLI.isBigEndian()) std::swap(Lo, Hi); 784 785 Lo = DAG.getStore(Tmp1, dl, Lo, Tmp2, ST->getPointerInfo(), isVolatile, 786 isNonTemporal, Alignment); 787 Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2, 788 DAG.getIntPtrConstant(4)); 789 Hi = DAG.getStore(Tmp1, dl, Hi, Tmp2, 790 ST->getPointerInfo().getWithOffset(4), 791 isVolatile, isNonTemporal, MinAlign(Alignment, 4U)); 792 793 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo, Hi); 794 } 795 } 796 } 797 return SDValue(0, 0); 798} 799 800/// LegalizeOp - We know that the specified value has a legal type, and 801/// that its operands are legal. Now ensure that the operation itself 802/// is legal, recursively ensuring that the operands' operations remain 803/// legal. 804SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) { 805 if (Op.getOpcode() == ISD::TargetConstant) // Allow illegal target nodes. 806 return Op; 807 808 SDNode *Node = Op.getNode(); 809 DebugLoc dl = Node->getDebugLoc(); 810 811 for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i) 812 assert(getTypeAction(Node->getValueType(i)) == Legal && 813 "Unexpected illegal type!"); 814 815 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) 816 assert((isTypeLegal(Node->getOperand(i).getValueType()) || 817 Node->getOperand(i).getOpcode() == ISD::TargetConstant) && 818 "Unexpected illegal type!"); 819 820 // Note that LegalizeOp may be reentered even from single-use nodes, which 821 // means that we always must cache transformed nodes. 822 DenseMap<SDValue, SDValue>::iterator I = LegalizedNodes.find(Op); 823 if (I != LegalizedNodes.end()) return I->second; 824 825 SDValue Tmp1, Tmp2, Tmp3, Tmp4; 826 SDValue Result = Op; 827 bool isCustom = false; 828 829 // Figure out the correct action; the way to query this varies by opcode 830 TargetLowering::LegalizeAction Action = TargetLowering::Legal; 831 bool SimpleFinishLegalizing = true; 832 switch (Node->getOpcode()) { 833 case ISD::INTRINSIC_W_CHAIN: 834 case ISD::INTRINSIC_WO_CHAIN: 835 case ISD::INTRINSIC_VOID: 836 case ISD::VAARG: 837 case ISD::STACKSAVE: 838 Action = TLI.getOperationAction(Node->getOpcode(), MVT::Other); 839 break; 840 case ISD::SINT_TO_FP: 841 case ISD::UINT_TO_FP: 842 case ISD::EXTRACT_VECTOR_ELT: 843 Action = TLI.getOperationAction(Node->getOpcode(), 844 Node->getOperand(0).getValueType()); 845 break; 846 case ISD::FP_ROUND_INREG: 847 case ISD::SIGN_EXTEND_INREG: { 848 EVT InnerType = cast<VTSDNode>(Node->getOperand(1))->getVT(); 849 Action = TLI.getOperationAction(Node->getOpcode(), InnerType); 850 break; 851 } 852 case ISD::SELECT_CC: 853 case ISD::SETCC: 854 case ISD::BR_CC: { 855 unsigned CCOperand = Node->getOpcode() == ISD::SELECT_CC ? 4 : 856 Node->getOpcode() == ISD::SETCC ? 2 : 1; 857 unsigned CompareOperand = Node->getOpcode() == ISD::BR_CC ? 2 : 0; 858 EVT OpVT = Node->getOperand(CompareOperand).getValueType(); 859 ISD::CondCode CCCode = 860 cast<CondCodeSDNode>(Node->getOperand(CCOperand))->get(); 861 Action = TLI.getCondCodeAction(CCCode, OpVT); 862 if (Action == TargetLowering::Legal) { 863 if (Node->getOpcode() == ISD::SELECT_CC) 864 Action = TLI.getOperationAction(Node->getOpcode(), 865 Node->getValueType(0)); 866 else 867 Action = TLI.getOperationAction(Node->getOpcode(), OpVT); 868 } 869 break; 870 } 871 case ISD::LOAD: 872 case ISD::STORE: 873 // FIXME: Model these properly. LOAD and STORE are complicated, and 874 // STORE expects the unlegalized operand in some cases. 875 SimpleFinishLegalizing = false; 876 break; 877 case ISD::CALLSEQ_START: 878 case ISD::CALLSEQ_END: 879 // FIXME: This shouldn't be necessary. These nodes have special properties 880 // dealing with the recursive nature of legalization. Removing this 881 // special case should be done as part of making LegalizeDAG non-recursive. 882 SimpleFinishLegalizing = false; 883 break; 884 case ISD::EXTRACT_ELEMENT: 885 case ISD::FLT_ROUNDS_: 886 case ISD::SADDO: 887 case ISD::SSUBO: 888 case ISD::UADDO: 889 case ISD::USUBO: 890 case ISD::SMULO: 891 case ISD::UMULO: 892 case ISD::FPOWI: 893 case ISD::MERGE_VALUES: 894 case ISD::EH_RETURN: 895 case ISD::FRAME_TO_ARGS_OFFSET: 896 case ISD::EH_SJLJ_SETJMP: 897 case ISD::EH_SJLJ_LONGJMP: 898 case ISD::EH_SJLJ_DISPATCHSETUP: 899 // These operations lie about being legal: when they claim to be legal, 900 // they should actually be expanded. 901 Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0)); 902 if (Action == TargetLowering::Legal) 903 Action = TargetLowering::Expand; 904 break; 905 case ISD::TRAMPOLINE: 906 case ISD::FRAMEADDR: 907 case ISD::RETURNADDR: 908 // These operations lie about being legal: when they claim to be legal, 909 // they should actually be custom-lowered. 910 Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0)); 911 if (Action == TargetLowering::Legal) 912 Action = TargetLowering::Custom; 913 break; 914 case ISD::BUILD_VECTOR: 915 // A weird case: legalization for BUILD_VECTOR never legalizes the 916 // operands! 917 // FIXME: This really sucks... changing it isn't semantically incorrect, 918 // but it massively pessimizes the code for floating-point BUILD_VECTORs 919 // because ConstantFP operands get legalized into constant pool loads 920 // before the BUILD_VECTOR code can see them. It doesn't usually bite, 921 // though, because BUILD_VECTORS usually get lowered into other nodes 922 // which get legalized properly. 923 SimpleFinishLegalizing = false; 924 break; 925 default: 926 if (Node->getOpcode() >= ISD::BUILTIN_OP_END) { 927 Action = TargetLowering::Legal; 928 } else { 929 Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0)); 930 } 931 break; 932 } 933 934 if (SimpleFinishLegalizing) { 935 SmallVector<SDValue, 8> Ops, ResultVals; 936 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) 937 Ops.push_back(LegalizeOp(Node->getOperand(i))); 938 switch (Node->getOpcode()) { 939 default: break; 940 case ISD::BR: 941 case ISD::BRIND: 942 case ISD::BR_JT: 943 case ISD::BR_CC: 944 case ISD::BRCOND: 945 assert(LastCALLSEQ.size() == 1 && "branch inside CALLSEQ_BEGIN/END?"); 946 // Branches tweak the chain to include LastCALLSEQ 947 Ops[0] = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ops[0], 948 getLastCALLSEQ()); 949 Ops[0] = LegalizeOp(Ops[0]); 950 setLastCALLSEQ(DAG.getEntryNode()); 951 break; 952 case ISD::SHL: 953 case ISD::SRL: 954 case ISD::SRA: 955 case ISD::ROTL: 956 case ISD::ROTR: 957 // Legalizing shifts/rotates requires adjusting the shift amount 958 // to the appropriate width. 959 if (!Ops[1].getValueType().isVector()) 960 Ops[1] = LegalizeOp(DAG.getShiftAmountOperand(Ops[0].getValueType(), 961 Ops[1])); 962 break; 963 case ISD::SRL_PARTS: 964 case ISD::SRA_PARTS: 965 case ISD::SHL_PARTS: 966 // Legalizing shifts/rotates requires adjusting the shift amount 967 // to the appropriate width. 968 if (!Ops[2].getValueType().isVector()) 969 Ops[2] = LegalizeOp(DAG.getShiftAmountOperand(Ops[0].getValueType(), 970 Ops[2])); 971 break; 972 } 973 974 Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(), Ops.data(), 975 Ops.size()), 0); 976 switch (Action) { 977 case TargetLowering::Legal: 978 for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i) 979 ResultVals.push_back(Result.getValue(i)); 980 break; 981 case TargetLowering::Custom: 982 // FIXME: The handling for custom lowering with multiple results is 983 // a complete mess. 984 Tmp1 = TLI.LowerOperation(Result, DAG); 985 if (Tmp1.getNode()) { 986 for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i) { 987 if (e == 1) 988 ResultVals.push_back(Tmp1); 989 else 990 ResultVals.push_back(Tmp1.getValue(i)); 991 } 992 break; 993 } 994 995 // FALL THROUGH 996 case TargetLowering::Expand: 997 ExpandNode(Result.getNode(), ResultVals); 998 break; 999 case TargetLowering::Promote: 1000 PromoteNode(Result.getNode(), ResultVals); 1001 break; 1002 } 1003 if (!ResultVals.empty()) { 1004 for (unsigned i = 0, e = ResultVals.size(); i != e; ++i) { 1005 if (ResultVals[i] != SDValue(Node, i)) 1006 ResultVals[i] = LegalizeOp(ResultVals[i]); 1007 AddLegalizedOperand(SDValue(Node, i), ResultVals[i]); 1008 } 1009 return ResultVals[Op.getResNo()]; 1010 } 1011 } 1012 1013 switch (Node->getOpcode()) { 1014 default: 1015#ifndef NDEBUG 1016 dbgs() << "NODE: "; 1017 Node->dump( &DAG); 1018 dbgs() << "\n"; 1019#endif 1020 assert(0 && "Do not know how to legalize this operator!"); 1021 1022 case ISD::BUILD_VECTOR: 1023 switch (TLI.getOperationAction(ISD::BUILD_VECTOR, Node->getValueType(0))) { 1024 default: assert(0 && "This action is not supported yet!"); 1025 case TargetLowering::Custom: 1026 Tmp3 = TLI.LowerOperation(Result, DAG); 1027 if (Tmp3.getNode()) { 1028 Result = Tmp3; 1029 break; 1030 } 1031 // FALLTHROUGH 1032 case TargetLowering::Expand: 1033 Result = ExpandBUILD_VECTOR(Result.getNode()); 1034 break; 1035 } 1036 break; 1037 case ISD::CALLSEQ_START: { 1038 SDNode *CallEnd = FindCallEndFromCallStart(Node); 1039 assert(CallEnd && "didn't find CALLSEQ_END!"); 1040 1041 // Recursively Legalize all of the inputs of the call end that do not lead 1042 // to this call start. This ensures that any libcalls that need be inserted 1043 // are inserted *before* the CALLSEQ_START. 1044 {SmallPtrSet<SDNode*, 32> NodesLeadingTo; 1045 for (unsigned i = 0, e = CallEnd->getNumOperands(); i != e; ++i) 1046 LegalizeAllNodesNotLeadingTo(CallEnd->getOperand(i).getNode(), Node, 1047 NodesLeadingTo); 1048 } 1049 1050 // Now that we have legalized all of the inputs (which may have inserted 1051 // libcalls), create the new CALLSEQ_START node. 1052 Tmp1 = LegalizeOp(Node->getOperand(0)); // Legalize the chain. 1053 1054 // Merge in the last call to ensure that this call starts after the last 1055 // call ended. 1056 if (getLastCALLSEQ().getOpcode() != ISD::EntryToken) { 1057 Tmp1 = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 1058 Tmp1, getLastCALLSEQ()); 1059 Tmp1 = LegalizeOp(Tmp1); 1060 } 1061 1062 // Do not try to legalize the target-specific arguments (#1+). 1063 if (Tmp1 != Node->getOperand(0)) { 1064 SmallVector<SDValue, 8> Ops(Node->op_begin(), Node->op_end()); 1065 Ops[0] = Tmp1; 1066 Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(), &Ops[0], 1067 Ops.size()), Result.getResNo()); 1068 } 1069 1070 // Remember that the CALLSEQ_START is legalized. 1071 AddLegalizedOperand(Op.getValue(0), Result); 1072 if (Node->getNumValues() == 2) // If this has a flag result, remember it. 1073 AddLegalizedOperand(Op.getValue(1), Result.getValue(1)); 1074 1075 // Now that the callseq_start and all of the non-call nodes above this call 1076 // sequence have been legalized, legalize the call itself. During this 1077 // process, no libcalls can/will be inserted, guaranteeing that no calls 1078 // can overlap. 1079 // Note that we are selecting this call! 1080 setLastCALLSEQ(SDValue(CallEnd, 0)); 1081 1082 // Legalize the call, starting from the CALLSEQ_END. 1083 LegalizeOp(getLastCALLSEQ()); 1084 return Result; 1085 } 1086 case ISD::CALLSEQ_END: 1087 { 1088 SDNode *myCALLSEQ_BEGIN = FindCallStartFromCallEnd(Node); 1089 1090 // If the CALLSEQ_START node hasn't been legalized first, legalize it. 1091 // This will cause this node to be legalized as well as handling libcalls 1092 // right. 1093 if (getLastCALLSEQ().getNode() != Node) { 1094 LegalizeOp(SDValue(myCALLSEQ_BEGIN, 0)); 1095 DenseMap<SDValue, SDValue>::iterator I = LegalizedNodes.find(Op); 1096 assert(I != LegalizedNodes.end() && 1097 "Legalizing the call start should have legalized this node!"); 1098 return I->second; 1099 } 1100 1101 pushLastCALLSEQ(SDValue(myCALLSEQ_BEGIN, 0)); 1102 } 1103 1104 // Otherwise, the call start has been legalized and everything is going 1105 // according to plan. Just legalize ourselves normally here. 1106 Tmp1 = LegalizeOp(Node->getOperand(0)); // Legalize the chain. 1107 // Do not try to legalize the target-specific arguments (#1+), except for 1108 // an optional flag input. 1109 if (Node->getOperand(Node->getNumOperands()-1).getValueType() != MVT::Glue){ 1110 if (Tmp1 != Node->getOperand(0)) { 1111 SmallVector<SDValue, 8> Ops(Node->op_begin(), Node->op_end()); 1112 Ops[0] = Tmp1; 1113 Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(), 1114 &Ops[0], Ops.size()), 1115 Result.getResNo()); 1116 } 1117 } else { 1118 Tmp2 = LegalizeOp(Node->getOperand(Node->getNumOperands()-1)); 1119 if (Tmp1 != Node->getOperand(0) || 1120 Tmp2 != Node->getOperand(Node->getNumOperands()-1)) { 1121 SmallVector<SDValue, 8> Ops(Node->op_begin(), Node->op_end()); 1122 Ops[0] = Tmp1; 1123 Ops.back() = Tmp2; 1124 Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(), 1125 &Ops[0], Ops.size()), 1126 Result.getResNo()); 1127 } 1128 } 1129 // This finishes up call legalization. 1130 popLastCALLSEQ(); 1131 1132 // If the CALLSEQ_END node has a flag, remember that we legalized it. 1133 AddLegalizedOperand(SDValue(Node, 0), Result.getValue(0)); 1134 if (Node->getNumValues() == 2) 1135 AddLegalizedOperand(SDValue(Node, 1), Result.getValue(1)); 1136 return Result.getValue(Op.getResNo()); 1137 case ISD::LOAD: { 1138 LoadSDNode *LD = cast<LoadSDNode>(Node); 1139 Tmp1 = LegalizeOp(LD->getChain()); // Legalize the chain. 1140 Tmp2 = LegalizeOp(LD->getBasePtr()); // Legalize the base pointer. 1141 1142 ISD::LoadExtType ExtType = LD->getExtensionType(); 1143 if (ExtType == ISD::NON_EXTLOAD) { 1144 EVT VT = Node->getValueType(0); 1145 Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(), 1146 Tmp1, Tmp2, LD->getOffset()), 1147 Result.getResNo()); 1148 Tmp3 = Result.getValue(0); 1149 Tmp4 = Result.getValue(1); 1150 1151 switch (TLI.getOperationAction(Node->getOpcode(), VT)) { 1152 default: assert(0 && "This action is not supported yet!"); 1153 case TargetLowering::Legal: 1154 // If this is an unaligned load and the target doesn't support it, 1155 // expand it. 1156 if (!TLI.allowsUnalignedMemoryAccesses(LD->getMemoryVT())) { 1157 const Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext()); 1158 unsigned ABIAlignment = TLI.getTargetData()->getABITypeAlignment(Ty); 1159 if (LD->getAlignment() < ABIAlignment){ 1160 Result = ExpandUnalignedLoad(cast<LoadSDNode>(Result.getNode()), 1161 DAG, TLI); 1162 Tmp3 = Result.getOperand(0); 1163 Tmp4 = Result.getOperand(1); 1164 Tmp3 = LegalizeOp(Tmp3); 1165 Tmp4 = LegalizeOp(Tmp4); 1166 } 1167 } 1168 break; 1169 case TargetLowering::Custom: 1170 Tmp1 = TLI.LowerOperation(Tmp3, DAG); 1171 if (Tmp1.getNode()) { 1172 Tmp3 = LegalizeOp(Tmp1); 1173 Tmp4 = LegalizeOp(Tmp1.getValue(1)); 1174 } 1175 break; 1176 case TargetLowering::Promote: { 1177 // Only promote a load of vector type to another. 1178 assert(VT.isVector() && "Cannot promote this load!"); 1179 // Change base type to a different vector type. 1180 EVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), VT); 1181 1182 Tmp1 = DAG.getLoad(NVT, dl, Tmp1, Tmp2, LD->getPointerInfo(), 1183 LD->isVolatile(), LD->isNonTemporal(), 1184 LD->getAlignment()); 1185 Tmp3 = LegalizeOp(DAG.getNode(ISD::BITCAST, dl, VT, Tmp1)); 1186 Tmp4 = LegalizeOp(Tmp1.getValue(1)); 1187 break; 1188 } 1189 } 1190 // Since loads produce two values, make sure to remember that we 1191 // legalized both of them. 1192 AddLegalizedOperand(SDValue(Node, 0), Tmp3); 1193 AddLegalizedOperand(SDValue(Node, 1), Tmp4); 1194 return Op.getResNo() ? Tmp4 : Tmp3; 1195 } 1196 1197 EVT SrcVT = LD->getMemoryVT(); 1198 unsigned SrcWidth = SrcVT.getSizeInBits(); 1199 unsigned Alignment = LD->getAlignment(); 1200 bool isVolatile = LD->isVolatile(); 1201 bool isNonTemporal = LD->isNonTemporal(); 1202 1203 if (SrcWidth != SrcVT.getStoreSizeInBits() && 1204 // Some targets pretend to have an i1 loading operation, and actually 1205 // load an i8. This trick is correct for ZEXTLOAD because the top 7 1206 // bits are guaranteed to be zero; it helps the optimizers understand 1207 // that these bits are zero. It is also useful for EXTLOAD, since it 1208 // tells the optimizers that those bits are undefined. It would be 1209 // nice to have an effective generic way of getting these benefits... 1210 // Until such a way is found, don't insist on promoting i1 here. 1211 (SrcVT != MVT::i1 || 1212 TLI.getLoadExtAction(ExtType, MVT::i1) == TargetLowering::Promote)) { 1213 // Promote to a byte-sized load if not loading an integral number of 1214 // bytes. For example, promote EXTLOAD:i20 -> EXTLOAD:i24. 1215 unsigned NewWidth = SrcVT.getStoreSizeInBits(); 1216 EVT NVT = EVT::getIntegerVT(*DAG.getContext(), NewWidth); 1217 SDValue Ch; 1218 1219 // The extra bits are guaranteed to be zero, since we stored them that 1220 // way. A zext load from NVT thus automatically gives zext from SrcVT. 1221 1222 ISD::LoadExtType NewExtType = 1223 ExtType == ISD::ZEXTLOAD ? ISD::ZEXTLOAD : ISD::EXTLOAD; 1224 1225 Result = DAG.getExtLoad(NewExtType, dl, Node->getValueType(0), 1226 Tmp1, Tmp2, LD->getPointerInfo(), 1227 NVT, isVolatile, isNonTemporal, Alignment); 1228 1229 Ch = Result.getValue(1); // The chain. 1230 1231 if (ExtType == ISD::SEXTLOAD) 1232 // Having the top bits zero doesn't help when sign extending. 1233 Result = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, 1234 Result.getValueType(), 1235 Result, DAG.getValueType(SrcVT)); 1236 else if (ExtType == ISD::ZEXTLOAD || NVT == Result.getValueType()) 1237 // All the top bits are guaranteed to be zero - inform the optimizers. 1238 Result = DAG.getNode(ISD::AssertZext, dl, 1239 Result.getValueType(), Result, 1240 DAG.getValueType(SrcVT)); 1241 1242 Tmp1 = LegalizeOp(Result); 1243 Tmp2 = LegalizeOp(Ch); 1244 } else if (SrcWidth & (SrcWidth - 1)) { 1245 // If not loading a power-of-2 number of bits, expand as two loads. 1246 assert(!SrcVT.isVector() && "Unsupported extload!"); 1247 unsigned RoundWidth = 1 << Log2_32(SrcWidth); 1248 assert(RoundWidth < SrcWidth); 1249 unsigned ExtraWidth = SrcWidth - RoundWidth; 1250 assert(ExtraWidth < RoundWidth); 1251 assert(!(RoundWidth % 8) && !(ExtraWidth % 8) && 1252 "Load size not an integral number of bytes!"); 1253 EVT RoundVT = EVT::getIntegerVT(*DAG.getContext(), RoundWidth); 1254 EVT ExtraVT = EVT::getIntegerVT(*DAG.getContext(), ExtraWidth); 1255 SDValue Lo, Hi, Ch; 1256 unsigned IncrementSize; 1257 1258 if (TLI.isLittleEndian()) { 1259 // EXTLOAD:i24 -> ZEXTLOAD:i16 | (shl EXTLOAD@+2:i8, 16) 1260 // Load the bottom RoundWidth bits. 1261 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, Node->getValueType(0), 1262 Tmp1, Tmp2, 1263 LD->getPointerInfo(), RoundVT, isVolatile, 1264 isNonTemporal, Alignment); 1265 1266 // Load the remaining ExtraWidth bits. 1267 IncrementSize = RoundWidth / 8; 1268 Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2, 1269 DAG.getIntPtrConstant(IncrementSize)); 1270 Hi = DAG.getExtLoad(ExtType, dl, Node->getValueType(0), Tmp1, Tmp2, 1271 LD->getPointerInfo().getWithOffset(IncrementSize), 1272 ExtraVT, isVolatile, isNonTemporal, 1273 MinAlign(Alignment, IncrementSize)); 1274 1275 // Build a factor node to remember that this load is independent of 1276 // the other one. 1277 Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1), 1278 Hi.getValue(1)); 1279 1280 // Move the top bits to the right place. 1281 Hi = DAG.getNode(ISD::SHL, dl, Hi.getValueType(), Hi, 1282 DAG.getConstant(RoundWidth, 1283 TLI.getShiftAmountTy(Hi.getValueType()))); 1284 1285 // Join the hi and lo parts. 1286 Result = DAG.getNode(ISD::OR, dl, Node->getValueType(0), Lo, Hi); 1287 } else { 1288 // Big endian - avoid unaligned loads. 1289 // EXTLOAD:i24 -> (shl EXTLOAD:i16, 8) | ZEXTLOAD@+2:i8 1290 // Load the top RoundWidth bits. 1291 Hi = DAG.getExtLoad(ExtType, dl, Node->getValueType(0), Tmp1, Tmp2, 1292 LD->getPointerInfo(), RoundVT, isVolatile, 1293 isNonTemporal, Alignment); 1294 1295 // Load the remaining ExtraWidth bits. 1296 IncrementSize = RoundWidth / 8; 1297 Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2, 1298 DAG.getIntPtrConstant(IncrementSize)); 1299 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, 1300 dl, Node->getValueType(0), Tmp1, Tmp2, 1301 LD->getPointerInfo().getWithOffset(IncrementSize), 1302 ExtraVT, isVolatile, isNonTemporal, 1303 MinAlign(Alignment, IncrementSize)); 1304 1305 // Build a factor node to remember that this load is independent of 1306 // the other one. 1307 Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1), 1308 Hi.getValue(1)); 1309 1310 // Move the top bits to the right place. 1311 Hi = DAG.getNode(ISD::SHL, dl, Hi.getValueType(), Hi, 1312 DAG.getConstant(ExtraWidth, 1313 TLI.getShiftAmountTy(Hi.getValueType()))); 1314 1315 // Join the hi and lo parts. 1316 Result = DAG.getNode(ISD::OR, dl, Node->getValueType(0), Lo, Hi); 1317 } 1318 1319 Tmp1 = LegalizeOp(Result); 1320 Tmp2 = LegalizeOp(Ch); 1321 } else { 1322 switch (TLI.getLoadExtAction(ExtType, SrcVT)) { 1323 default: assert(0 && "This action is not supported yet!"); 1324 case TargetLowering::Custom: 1325 isCustom = true; 1326 // FALLTHROUGH 1327 case TargetLowering::Legal: 1328 Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(), 1329 Tmp1, Tmp2, LD->getOffset()), 1330 Result.getResNo()); 1331 Tmp1 = Result.getValue(0); 1332 Tmp2 = Result.getValue(1); 1333 1334 if (isCustom) { 1335 Tmp3 = TLI.LowerOperation(Result, DAG); 1336 if (Tmp3.getNode()) { 1337 Tmp1 = LegalizeOp(Tmp3); 1338 Tmp2 = LegalizeOp(Tmp3.getValue(1)); 1339 } 1340 } else { 1341 // If this is an unaligned load and the target doesn't support it, 1342 // expand it. 1343 if (!TLI.allowsUnalignedMemoryAccesses(LD->getMemoryVT())) { 1344 const Type *Ty = 1345 LD->getMemoryVT().getTypeForEVT(*DAG.getContext()); 1346 unsigned ABIAlignment = 1347 TLI.getTargetData()->getABITypeAlignment(Ty); 1348 if (LD->getAlignment() < ABIAlignment){ 1349 Result = ExpandUnalignedLoad(cast<LoadSDNode>(Result.getNode()), 1350 DAG, TLI); 1351 Tmp1 = Result.getOperand(0); 1352 Tmp2 = Result.getOperand(1); 1353 Tmp1 = LegalizeOp(Tmp1); 1354 Tmp2 = LegalizeOp(Tmp2); 1355 } 1356 } 1357 } 1358 break; 1359 case TargetLowering::Expand: 1360 if (!TLI.isLoadExtLegal(ISD::EXTLOAD, SrcVT) && isTypeLegal(SrcVT)) { 1361 SDValue Load = DAG.getLoad(SrcVT, dl, Tmp1, Tmp2, 1362 LD->getPointerInfo(), 1363 LD->isVolatile(), LD->isNonTemporal(), 1364 LD->getAlignment()); 1365 unsigned ExtendOp; 1366 switch (ExtType) { 1367 case ISD::EXTLOAD: 1368 ExtendOp = (SrcVT.isFloatingPoint() ? 1369 ISD::FP_EXTEND : ISD::ANY_EXTEND); 1370 break; 1371 case ISD::SEXTLOAD: ExtendOp = ISD::SIGN_EXTEND; break; 1372 case ISD::ZEXTLOAD: ExtendOp = ISD::ZERO_EXTEND; break; 1373 default: llvm_unreachable("Unexpected extend load type!"); 1374 } 1375 Result = DAG.getNode(ExtendOp, dl, Node->getValueType(0), Load); 1376 Tmp1 = LegalizeOp(Result); // Relegalize new nodes. 1377 Tmp2 = LegalizeOp(Load.getValue(1)); 1378 break; 1379 } 1380 // FIXME: This does not work for vectors on most targets. Sign- and 1381 // zero-extend operations are currently folded into extending loads, 1382 // whether they are legal or not, and then we end up here without any 1383 // support for legalizing them. 1384 assert(ExtType != ISD::EXTLOAD && 1385 "EXTLOAD should always be supported!"); 1386 // Turn the unsupported load into an EXTLOAD followed by an explicit 1387 // zero/sign extend inreg. 1388 Result = DAG.getExtLoad(ISD::EXTLOAD, dl, Node->getValueType(0), 1389 Tmp1, Tmp2, LD->getPointerInfo(), SrcVT, 1390 LD->isVolatile(), LD->isNonTemporal(), 1391 LD->getAlignment()); 1392 SDValue ValRes; 1393 if (ExtType == ISD::SEXTLOAD) 1394 ValRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, 1395 Result.getValueType(), 1396 Result, DAG.getValueType(SrcVT)); 1397 else 1398 ValRes = DAG.getZeroExtendInReg(Result, dl, SrcVT.getScalarType()); 1399 Tmp1 = LegalizeOp(ValRes); // Relegalize new nodes. 1400 Tmp2 = LegalizeOp(Result.getValue(1)); // Relegalize new nodes. 1401 break; 1402 } 1403 } 1404 1405 // Since loads produce two values, make sure to remember that we legalized 1406 // both of them. 1407 AddLegalizedOperand(SDValue(Node, 0), Tmp1); 1408 AddLegalizedOperand(SDValue(Node, 1), Tmp2); 1409 return Op.getResNo() ? Tmp2 : Tmp1; 1410 } 1411 case ISD::STORE: { 1412 StoreSDNode *ST = cast<StoreSDNode>(Node); 1413 Tmp1 = LegalizeOp(ST->getChain()); // Legalize the chain. 1414 Tmp2 = LegalizeOp(ST->getBasePtr()); // Legalize the pointer. 1415 unsigned Alignment = ST->getAlignment(); 1416 bool isVolatile = ST->isVolatile(); 1417 bool isNonTemporal = ST->isNonTemporal(); 1418 1419 if (!ST->isTruncatingStore()) { 1420 if (SDNode *OptStore = OptimizeFloatStore(ST).getNode()) { 1421 Result = SDValue(OptStore, 0); 1422 break; 1423 } 1424 1425 { 1426 Tmp3 = LegalizeOp(ST->getValue()); 1427 Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(), 1428 Tmp1, Tmp3, Tmp2, 1429 ST->getOffset()), 1430 Result.getResNo()); 1431 1432 EVT VT = Tmp3.getValueType(); 1433 switch (TLI.getOperationAction(ISD::STORE, VT)) { 1434 default: assert(0 && "This action is not supported yet!"); 1435 case TargetLowering::Legal: 1436 // If this is an unaligned store and the target doesn't support it, 1437 // expand it. 1438 if (!TLI.allowsUnalignedMemoryAccesses(ST->getMemoryVT())) { 1439 const Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext()); 1440 unsigned ABIAlignment= TLI.getTargetData()->getABITypeAlignment(Ty); 1441 if (ST->getAlignment() < ABIAlignment) 1442 Result = ExpandUnalignedStore(cast<StoreSDNode>(Result.getNode()), 1443 DAG, TLI); 1444 } 1445 break; 1446 case TargetLowering::Custom: 1447 Tmp1 = TLI.LowerOperation(Result, DAG); 1448 if (Tmp1.getNode()) Result = Tmp1; 1449 break; 1450 case TargetLowering::Promote: 1451 assert(VT.isVector() && "Unknown legal promote case!"); 1452 Tmp3 = DAG.getNode(ISD::BITCAST, dl, 1453 TLI.getTypeToPromoteTo(ISD::STORE, VT), Tmp3); 1454 Result = DAG.getStore(Tmp1, dl, Tmp3, Tmp2, 1455 ST->getPointerInfo(), isVolatile, 1456 isNonTemporal, Alignment); 1457 break; 1458 } 1459 break; 1460 } 1461 } else { 1462 Tmp3 = LegalizeOp(ST->getValue()); 1463 1464 EVT StVT = ST->getMemoryVT(); 1465 unsigned StWidth = StVT.getSizeInBits(); 1466 1467 if (StWidth != StVT.getStoreSizeInBits()) { 1468 // Promote to a byte-sized store with upper bits zero if not 1469 // storing an integral number of bytes. For example, promote 1470 // TRUNCSTORE:i1 X -> TRUNCSTORE:i8 (and X, 1) 1471 EVT NVT = EVT::getIntegerVT(*DAG.getContext(), 1472 StVT.getStoreSizeInBits()); 1473 Tmp3 = DAG.getZeroExtendInReg(Tmp3, dl, StVT); 1474 Result = DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(), 1475 NVT, isVolatile, isNonTemporal, Alignment); 1476 } else if (StWidth & (StWidth - 1)) { 1477 // If not storing a power-of-2 number of bits, expand as two stores. 1478 assert(!StVT.isVector() && "Unsupported truncstore!"); 1479 unsigned RoundWidth = 1 << Log2_32(StWidth); 1480 assert(RoundWidth < StWidth); 1481 unsigned ExtraWidth = StWidth - RoundWidth; 1482 assert(ExtraWidth < RoundWidth); 1483 assert(!(RoundWidth % 8) && !(ExtraWidth % 8) && 1484 "Store size not an integral number of bytes!"); 1485 EVT RoundVT = EVT::getIntegerVT(*DAG.getContext(), RoundWidth); 1486 EVT ExtraVT = EVT::getIntegerVT(*DAG.getContext(), ExtraWidth); 1487 SDValue Lo, Hi; 1488 unsigned IncrementSize; 1489 1490 if (TLI.isLittleEndian()) { 1491 // TRUNCSTORE:i24 X -> TRUNCSTORE:i16 X, TRUNCSTORE@+2:i8 (srl X, 16) 1492 // Store the bottom RoundWidth bits. 1493 Lo = DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(), 1494 RoundVT, 1495 isVolatile, isNonTemporal, Alignment); 1496 1497 // Store the remaining ExtraWidth bits. 1498 IncrementSize = RoundWidth / 8; 1499 Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2, 1500 DAG.getIntPtrConstant(IncrementSize)); 1501 Hi = DAG.getNode(ISD::SRL, dl, Tmp3.getValueType(), Tmp3, 1502 DAG.getConstant(RoundWidth, 1503 TLI.getShiftAmountTy(Tmp3.getValueType()))); 1504 Hi = DAG.getTruncStore(Tmp1, dl, Hi, Tmp2, 1505 ST->getPointerInfo().getWithOffset(IncrementSize), 1506 ExtraVT, isVolatile, isNonTemporal, 1507 MinAlign(Alignment, IncrementSize)); 1508 } else { 1509 // Big endian - avoid unaligned stores. 1510 // TRUNCSTORE:i24 X -> TRUNCSTORE:i16 (srl X, 8), TRUNCSTORE@+2:i8 X 1511 // Store the top RoundWidth bits. 1512 Hi = DAG.getNode(ISD::SRL, dl, Tmp3.getValueType(), Tmp3, 1513 DAG.getConstant(ExtraWidth, 1514 TLI.getShiftAmountTy(Tmp3.getValueType()))); 1515 Hi = DAG.getTruncStore(Tmp1, dl, Hi, Tmp2, ST->getPointerInfo(), 1516 RoundVT, isVolatile, isNonTemporal, Alignment); 1517 1518 // Store the remaining ExtraWidth bits. 1519 IncrementSize = RoundWidth / 8; 1520 Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2, 1521 DAG.getIntPtrConstant(IncrementSize)); 1522 Lo = DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2, 1523 ST->getPointerInfo().getWithOffset(IncrementSize), 1524 ExtraVT, isVolatile, isNonTemporal, 1525 MinAlign(Alignment, IncrementSize)); 1526 } 1527 1528 // The order of the stores doesn't matter. 1529 Result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo, Hi); 1530 } else { 1531 if (Tmp1 != ST->getChain() || Tmp3 != ST->getValue() || 1532 Tmp2 != ST->getBasePtr()) 1533 Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(), 1534 Tmp1, Tmp3, Tmp2, 1535 ST->getOffset()), 1536 Result.getResNo()); 1537 1538 switch (TLI.getTruncStoreAction(ST->getValue().getValueType(), StVT)) { 1539 default: assert(0 && "This action is not supported yet!"); 1540 case TargetLowering::Legal: 1541 // If this is an unaligned store and the target doesn't support it, 1542 // expand it. 1543 if (!TLI.allowsUnalignedMemoryAccesses(ST->getMemoryVT())) { 1544 const Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext()); 1545 unsigned ABIAlignment= TLI.getTargetData()->getABITypeAlignment(Ty); 1546 if (ST->getAlignment() < ABIAlignment) 1547 Result = ExpandUnalignedStore(cast<StoreSDNode>(Result.getNode()), 1548 DAG, TLI); 1549 } 1550 break; 1551 case TargetLowering::Custom: 1552 Result = TLI.LowerOperation(Result, DAG); 1553 break; 1554 case Expand: 1555 // TRUNCSTORE:i16 i32 -> STORE i16 1556 assert(isTypeLegal(StVT) && "Do not know how to expand this store!"); 1557 Tmp3 = DAG.getNode(ISD::TRUNCATE, dl, StVT, Tmp3); 1558 Result = DAG.getStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(), 1559 isVolatile, isNonTemporal, Alignment); 1560 break; 1561 } 1562 } 1563 } 1564 break; 1565 } 1566 } 1567 assert(Result.getValueType() == Op.getValueType() && 1568 "Bad legalization!"); 1569 1570 // Make sure that the generated code is itself legal. 1571 if (Result != Op) 1572 Result = LegalizeOp(Result); 1573 1574 // Note that LegalizeOp may be reentered even from single-use nodes, which 1575 // means that we always must cache transformed nodes. 1576 AddLegalizedOperand(Op, Result); 1577 return Result; 1578} 1579 1580SDValue SelectionDAGLegalize::ExpandExtractFromVectorThroughStack(SDValue Op) { 1581 SDValue Vec = Op.getOperand(0); 1582 SDValue Idx = Op.getOperand(1); 1583 DebugLoc dl = Op.getDebugLoc(); 1584 // Store the value to a temporary stack slot, then LOAD the returned part. 1585 SDValue StackPtr = DAG.CreateStackTemporary(Vec.getValueType()); 1586 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, 1587 MachinePointerInfo(), false, false, 0); 1588 1589 // Add the offset to the index. 1590 unsigned EltSize = 1591 Vec.getValueType().getVectorElementType().getSizeInBits()/8; 1592 Idx = DAG.getNode(ISD::MUL, dl, Idx.getValueType(), Idx, 1593 DAG.getConstant(EltSize, Idx.getValueType())); 1594 1595 if (Idx.getValueType().bitsGT(TLI.getPointerTy())) 1596 Idx = DAG.getNode(ISD::TRUNCATE, dl, TLI.getPointerTy(), Idx); 1597 else 1598 Idx = DAG.getNode(ISD::ZERO_EXTEND, dl, TLI.getPointerTy(), Idx); 1599 1600 StackPtr = DAG.getNode(ISD::ADD, dl, Idx.getValueType(), Idx, StackPtr); 1601 1602 if (Op.getValueType().isVector()) 1603 return DAG.getLoad(Op.getValueType(), dl, Ch, StackPtr,MachinePointerInfo(), 1604 false, false, 0); 1605 return DAG.getExtLoad(ISD::EXTLOAD, dl, Op.getValueType(), Ch, StackPtr, 1606 MachinePointerInfo(), 1607 Vec.getValueType().getVectorElementType(), 1608 false, false, 0); 1609} 1610 1611SDValue SelectionDAGLegalize::ExpandInsertToVectorThroughStack(SDValue Op) { 1612 assert(Op.getValueType().isVector() && "Non-vector insert subvector!"); 1613 1614 SDValue Vec = Op.getOperand(0); 1615 SDValue Part = Op.getOperand(1); 1616 SDValue Idx = Op.getOperand(2); 1617 DebugLoc dl = Op.getDebugLoc(); 1618 1619 // Store the value to a temporary stack slot, then LOAD the returned part. 1620 1621 SDValue StackPtr = DAG.CreateStackTemporary(Vec.getValueType()); 1622 int FI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex(); 1623 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(FI); 1624 1625 // First store the whole vector. 1626 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, PtrInfo, 1627 false, false, 0); 1628 1629 // Then store the inserted part. 1630 1631 // Add the offset to the index. 1632 unsigned EltSize = 1633 Vec.getValueType().getVectorElementType().getSizeInBits()/8; 1634 1635 Idx = DAG.getNode(ISD::MUL, dl, Idx.getValueType(), Idx, 1636 DAG.getConstant(EltSize, Idx.getValueType())); 1637 1638 if (Idx.getValueType().bitsGT(TLI.getPointerTy())) 1639 Idx = DAG.getNode(ISD::TRUNCATE, dl, TLI.getPointerTy(), Idx); 1640 else 1641 Idx = DAG.getNode(ISD::ZERO_EXTEND, dl, TLI.getPointerTy(), Idx); 1642 1643 SDValue SubStackPtr = DAG.getNode(ISD::ADD, dl, Idx.getValueType(), Idx, 1644 StackPtr); 1645 1646 // Store the subvector. 1647 Ch = DAG.getStore(DAG.getEntryNode(), dl, Part, SubStackPtr, 1648 MachinePointerInfo(), false, false, 0); 1649 1650 // Finally, load the updated vector. 1651 return DAG.getLoad(Op.getValueType(), dl, Ch, StackPtr, PtrInfo, 1652 false, false, 0); 1653} 1654 1655SDValue SelectionDAGLegalize::ExpandVectorBuildThroughStack(SDNode* Node) { 1656 // We can't handle this case efficiently. Allocate a sufficiently 1657 // aligned object on the stack, store each element into it, then load 1658 // the result as a vector. 1659 // Create the stack frame object. 1660 EVT VT = Node->getValueType(0); 1661 EVT EltVT = VT.getVectorElementType(); 1662 DebugLoc dl = Node->getDebugLoc(); 1663 SDValue FIPtr = DAG.CreateStackTemporary(VT); 1664 int FI = cast<FrameIndexSDNode>(FIPtr.getNode())->getIndex(); 1665 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(FI); 1666 1667 // Emit a store of each element to the stack slot. 1668 SmallVector<SDValue, 8> Stores; 1669 unsigned TypeByteSize = EltVT.getSizeInBits() / 8; 1670 // Store (in the right endianness) the elements to memory. 1671 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) { 1672 // Ignore undef elements. 1673 if (Node->getOperand(i).getOpcode() == ISD::UNDEF) continue; 1674 1675 unsigned Offset = TypeByteSize*i; 1676 1677 SDValue Idx = DAG.getConstant(Offset, FIPtr.getValueType()); 1678 Idx = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr, Idx); 1679 1680 // If the destination vector element type is narrower than the source 1681 // element type, only store the bits necessary. 1682 if (EltVT.bitsLT(Node->getOperand(i).getValueType().getScalarType())) { 1683 Stores.push_back(DAG.getTruncStore(DAG.getEntryNode(), dl, 1684 Node->getOperand(i), Idx, 1685 PtrInfo.getWithOffset(Offset), 1686 EltVT, false, false, 0)); 1687 } else 1688 Stores.push_back(DAG.getStore(DAG.getEntryNode(), dl, 1689 Node->getOperand(i), Idx, 1690 PtrInfo.getWithOffset(Offset), 1691 false, false, 0)); 1692 } 1693 1694 SDValue StoreChain; 1695 if (!Stores.empty()) // Not all undef elements? 1696 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 1697 &Stores[0], Stores.size()); 1698 else 1699 StoreChain = DAG.getEntryNode(); 1700 1701 // Result is a load from the stack slot. 1702 return DAG.getLoad(VT, dl, StoreChain, FIPtr, PtrInfo, false, false, 0); 1703} 1704 1705SDValue SelectionDAGLegalize::ExpandFCOPYSIGN(SDNode* Node) { 1706 DebugLoc dl = Node->getDebugLoc(); 1707 SDValue Tmp1 = Node->getOperand(0); 1708 SDValue Tmp2 = Node->getOperand(1); 1709 1710 // Get the sign bit of the RHS. First obtain a value that has the same 1711 // sign as the sign bit, i.e. negative if and only if the sign bit is 1. 1712 SDValue SignBit; 1713 EVT FloatVT = Tmp2.getValueType(); 1714 EVT IVT = EVT::getIntegerVT(*DAG.getContext(), FloatVT.getSizeInBits()); 1715 if (isTypeLegal(IVT)) { 1716 // Convert to an integer with the same sign bit. 1717 SignBit = DAG.getNode(ISD::BITCAST, dl, IVT, Tmp2); 1718 } else { 1719 // Store the float to memory, then load the sign part out as an integer. 1720 MVT LoadTy = TLI.getPointerTy(); 1721 // First create a temporary that is aligned for both the load and store. 1722 SDValue StackPtr = DAG.CreateStackTemporary(FloatVT, LoadTy); 1723 // Then store the float to it. 1724 SDValue Ch = 1725 DAG.getStore(DAG.getEntryNode(), dl, Tmp2, StackPtr, MachinePointerInfo(), 1726 false, false, 0); 1727 if (TLI.isBigEndian()) { 1728 assert(FloatVT.isByteSized() && "Unsupported floating point type!"); 1729 // Load out a legal integer with the same sign bit as the float. 1730 SignBit = DAG.getLoad(LoadTy, dl, Ch, StackPtr, MachinePointerInfo(), 1731 false, false, 0); 1732 } else { // Little endian 1733 SDValue LoadPtr = StackPtr; 1734 // The float may be wider than the integer we are going to load. Advance 1735 // the pointer so that the loaded integer will contain the sign bit. 1736 unsigned Strides = (FloatVT.getSizeInBits()-1)/LoadTy.getSizeInBits(); 1737 unsigned ByteOffset = (Strides * LoadTy.getSizeInBits()) / 8; 1738 LoadPtr = DAG.getNode(ISD::ADD, dl, LoadPtr.getValueType(), 1739 LoadPtr, DAG.getIntPtrConstant(ByteOffset)); 1740 // Load a legal integer containing the sign bit. 1741 SignBit = DAG.getLoad(LoadTy, dl, Ch, LoadPtr, MachinePointerInfo(), 1742 false, false, 0); 1743 // Move the sign bit to the top bit of the loaded integer. 1744 unsigned BitShift = LoadTy.getSizeInBits() - 1745 (FloatVT.getSizeInBits() - 8 * ByteOffset); 1746 assert(BitShift < LoadTy.getSizeInBits() && "Pointer advanced wrong?"); 1747 if (BitShift) 1748 SignBit = DAG.getNode(ISD::SHL, dl, LoadTy, SignBit, 1749 DAG.getConstant(BitShift, 1750 TLI.getShiftAmountTy(SignBit.getValueType()))); 1751 } 1752 } 1753 // Now get the sign bit proper, by seeing whether the value is negative. 1754 SignBit = DAG.getSetCC(dl, TLI.getSetCCResultType(SignBit.getValueType()), 1755 SignBit, DAG.getConstant(0, SignBit.getValueType()), 1756 ISD::SETLT); 1757 // Get the absolute value of the result. 1758 SDValue AbsVal = DAG.getNode(ISD::FABS, dl, Tmp1.getValueType(), Tmp1); 1759 // Select between the nabs and abs value based on the sign bit of 1760 // the input. 1761 return DAG.getNode(ISD::SELECT, dl, AbsVal.getValueType(), SignBit, 1762 DAG.getNode(ISD::FNEG, dl, AbsVal.getValueType(), AbsVal), 1763 AbsVal); 1764} 1765 1766void SelectionDAGLegalize::ExpandDYNAMIC_STACKALLOC(SDNode* Node, 1767 SmallVectorImpl<SDValue> &Results) { 1768 unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore(); 1769 assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and" 1770 " not tell us which reg is the stack pointer!"); 1771 DebugLoc dl = Node->getDebugLoc(); 1772 EVT VT = Node->getValueType(0); 1773 SDValue Tmp1 = SDValue(Node, 0); 1774 SDValue Tmp2 = SDValue(Node, 1); 1775 SDValue Tmp3 = Node->getOperand(2); 1776 SDValue Chain = Tmp1.getOperand(0); 1777 1778 // Chain the dynamic stack allocation so that it doesn't modify the stack 1779 // pointer when other instructions are using the stack. 1780 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(0, true)); 1781 1782 SDValue Size = Tmp2.getOperand(1); 1783 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT); 1784 Chain = SP.getValue(1); 1785 unsigned Align = cast<ConstantSDNode>(Tmp3)->getZExtValue(); 1786 unsigned StackAlign = TM.getFrameLowering()->getStackAlignment(); 1787 if (Align > StackAlign) 1788 SP = DAG.getNode(ISD::AND, dl, VT, SP, 1789 DAG.getConstant(-(uint64_t)Align, VT)); 1790 Tmp1 = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value 1791 Chain = DAG.getCopyToReg(Chain, dl, SPReg, Tmp1); // Output chain 1792 1793 Tmp2 = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, true), 1794 DAG.getIntPtrConstant(0, true), SDValue()); 1795 1796 Results.push_back(Tmp1); 1797 Results.push_back(Tmp2); 1798} 1799 1800/// LegalizeSetCCCondCode - Legalize a SETCC with given LHS and RHS and 1801/// condition code CC on the current target. This routine expands SETCC with 1802/// illegal condition code into AND / OR of multiple SETCC values. 1803void SelectionDAGLegalize::LegalizeSetCCCondCode(EVT VT, 1804 SDValue &LHS, SDValue &RHS, 1805 SDValue &CC, 1806 DebugLoc dl) { 1807 EVT OpVT = LHS.getValueType(); 1808 ISD::CondCode CCCode = cast<CondCodeSDNode>(CC)->get(); 1809 switch (TLI.getCondCodeAction(CCCode, OpVT)) { 1810 default: assert(0 && "Unknown condition code action!"); 1811 case TargetLowering::Legal: 1812 // Nothing to do. 1813 break; 1814 case TargetLowering::Expand: { 1815 ISD::CondCode CC1 = ISD::SETCC_INVALID, CC2 = ISD::SETCC_INVALID; 1816 unsigned Opc = 0; 1817 switch (CCCode) { 1818 default: assert(0 && "Don't know how to expand this condition!"); 1819 case ISD::SETOEQ: CC1 = ISD::SETEQ; CC2 = ISD::SETO; Opc = ISD::AND; break; 1820 case ISD::SETOGT: CC1 = ISD::SETGT; CC2 = ISD::SETO; Opc = ISD::AND; break; 1821 case ISD::SETOGE: CC1 = ISD::SETGE; CC2 = ISD::SETO; Opc = ISD::AND; break; 1822 case ISD::SETOLT: CC1 = ISD::SETLT; CC2 = ISD::SETO; Opc = ISD::AND; break; 1823 case ISD::SETOLE: CC1 = ISD::SETLE; CC2 = ISD::SETO; Opc = ISD::AND; break; 1824 case ISD::SETONE: CC1 = ISD::SETNE; CC2 = ISD::SETO; Opc = ISD::AND; break; 1825 case ISD::SETUEQ: CC1 = ISD::SETEQ; CC2 = ISD::SETUO; Opc = ISD::OR; break; 1826 case ISD::SETUGT: CC1 = ISD::SETGT; CC2 = ISD::SETUO; Opc = ISD::OR; break; 1827 case ISD::SETUGE: CC1 = ISD::SETGE; CC2 = ISD::SETUO; Opc = ISD::OR; break; 1828 case ISD::SETULT: CC1 = ISD::SETLT; CC2 = ISD::SETUO; Opc = ISD::OR; break; 1829 case ISD::SETULE: CC1 = ISD::SETLE; CC2 = ISD::SETUO; Opc = ISD::OR; break; 1830 case ISD::SETUNE: CC1 = ISD::SETNE; CC2 = ISD::SETUO; Opc = ISD::OR; break; 1831 // FIXME: Implement more expansions. 1832 } 1833 1834 SDValue SetCC1 = DAG.getSetCC(dl, VT, LHS, RHS, CC1); 1835 SDValue SetCC2 = DAG.getSetCC(dl, VT, LHS, RHS, CC2); 1836 LHS = DAG.getNode(Opc, dl, VT, SetCC1, SetCC2); 1837 RHS = SDValue(); 1838 CC = SDValue(); 1839 break; 1840 } 1841 } 1842} 1843 1844/// EmitStackConvert - Emit a store/load combination to the stack. This stores 1845/// SrcOp to a stack slot of type SlotVT, truncating it if needed. It then does 1846/// a load from the stack slot to DestVT, extending it if needed. 1847/// The resultant code need not be legal. 1848SDValue SelectionDAGLegalize::EmitStackConvert(SDValue SrcOp, 1849 EVT SlotVT, 1850 EVT DestVT, 1851 DebugLoc dl) { 1852 // Create the stack frame object. 1853 unsigned SrcAlign = 1854 TLI.getTargetData()->getPrefTypeAlignment(SrcOp.getValueType(). 1855 getTypeForEVT(*DAG.getContext())); 1856 SDValue FIPtr = DAG.CreateStackTemporary(SlotVT, SrcAlign); 1857 1858 FrameIndexSDNode *StackPtrFI = cast<FrameIndexSDNode>(FIPtr); 1859 int SPFI = StackPtrFI->getIndex(); 1860 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(SPFI); 1861 1862 unsigned SrcSize = SrcOp.getValueType().getSizeInBits(); 1863 unsigned SlotSize = SlotVT.getSizeInBits(); 1864 unsigned DestSize = DestVT.getSizeInBits(); 1865 const Type *DestType = DestVT.getTypeForEVT(*DAG.getContext()); 1866 unsigned DestAlign = TLI.getTargetData()->getPrefTypeAlignment(DestType); 1867 1868 // Emit a store to the stack slot. Use a truncstore if the input value is 1869 // later than DestVT. 1870 SDValue Store; 1871 1872 if (SrcSize > SlotSize) 1873 Store = DAG.getTruncStore(DAG.getEntryNode(), dl, SrcOp, FIPtr, 1874 PtrInfo, SlotVT, false, false, SrcAlign); 1875 else { 1876 assert(SrcSize == SlotSize && "Invalid store"); 1877 Store = DAG.getStore(DAG.getEntryNode(), dl, SrcOp, FIPtr, 1878 PtrInfo, false, false, SrcAlign); 1879 } 1880 1881 // Result is a load from the stack slot. 1882 if (SlotSize == DestSize) 1883 return DAG.getLoad(DestVT, dl, Store, FIPtr, PtrInfo, 1884 false, false, DestAlign); 1885 1886 assert(SlotSize < DestSize && "Unknown extension!"); 1887 return DAG.getExtLoad(ISD::EXTLOAD, dl, DestVT, Store, FIPtr, 1888 PtrInfo, SlotVT, false, false, DestAlign); 1889} 1890 1891SDValue SelectionDAGLegalize::ExpandSCALAR_TO_VECTOR(SDNode *Node) { 1892 DebugLoc dl = Node->getDebugLoc(); 1893 // Create a vector sized/aligned stack slot, store the value to element #0, 1894 // then load the whole vector back out. 1895 SDValue StackPtr = DAG.CreateStackTemporary(Node->getValueType(0)); 1896 1897 FrameIndexSDNode *StackPtrFI = cast<FrameIndexSDNode>(StackPtr); 1898 int SPFI = StackPtrFI->getIndex(); 1899 1900 SDValue Ch = DAG.getTruncStore(DAG.getEntryNode(), dl, Node->getOperand(0), 1901 StackPtr, 1902 MachinePointerInfo::getFixedStack(SPFI), 1903 Node->getValueType(0).getVectorElementType(), 1904 false, false, 0); 1905 return DAG.getLoad(Node->getValueType(0), dl, Ch, StackPtr, 1906 MachinePointerInfo::getFixedStack(SPFI), 1907 false, false, 0); 1908} 1909 1910 1911/// ExpandBUILD_VECTOR - Expand a BUILD_VECTOR node on targets that don't 1912/// support the operation, but do support the resultant vector type. 1913SDValue SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) { 1914 unsigned NumElems = Node->getNumOperands(); 1915 SDValue Value1, Value2; 1916 DebugLoc dl = Node->getDebugLoc(); 1917 EVT VT = Node->getValueType(0); 1918 EVT OpVT = Node->getOperand(0).getValueType(); 1919 EVT EltVT = VT.getVectorElementType(); 1920 1921 // If the only non-undef value is the low element, turn this into a 1922 // SCALAR_TO_VECTOR node. If this is { X, X, X, X }, determine X. 1923 bool isOnlyLowElement = true; 1924 bool MoreThanTwoValues = false; 1925 bool isConstant = true; 1926 for (unsigned i = 0; i < NumElems; ++i) { 1927 SDValue V = Node->getOperand(i); 1928 if (V.getOpcode() == ISD::UNDEF) 1929 continue; 1930 if (i > 0) 1931 isOnlyLowElement = false; 1932 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V)) 1933 isConstant = false; 1934 1935 if (!Value1.getNode()) { 1936 Value1 = V; 1937 } else if (!Value2.getNode()) { 1938 if (V != Value1) 1939 Value2 = V; 1940 } else if (V != Value1 && V != Value2) { 1941 MoreThanTwoValues = true; 1942 } 1943 } 1944 1945 if (!Value1.getNode()) 1946 return DAG.getUNDEF(VT); 1947 1948 if (isOnlyLowElement) 1949 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Node->getOperand(0)); 1950 1951 // If all elements are constants, create a load from the constant pool. 1952 if (isConstant) { 1953 std::vector<Constant*> CV; 1954 for (unsigned i = 0, e = NumElems; i != e; ++i) { 1955 if (ConstantFPSDNode *V = 1956 dyn_cast<ConstantFPSDNode>(Node->getOperand(i))) { 1957 CV.push_back(const_cast<ConstantFP *>(V->getConstantFPValue())); 1958 } else if (ConstantSDNode *V = 1959 dyn_cast<ConstantSDNode>(Node->getOperand(i))) { 1960 if (OpVT==EltVT) 1961 CV.push_back(const_cast<ConstantInt *>(V->getConstantIntValue())); 1962 else { 1963 // If OpVT and EltVT don't match, EltVT is not legal and the 1964 // element values have been promoted/truncated earlier. Undo this; 1965 // we don't want a v16i8 to become a v16i32 for example. 1966 const ConstantInt *CI = V->getConstantIntValue(); 1967 CV.push_back(ConstantInt::get(EltVT.getTypeForEVT(*DAG.getContext()), 1968 CI->getZExtValue())); 1969 } 1970 } else { 1971 assert(Node->getOperand(i).getOpcode() == ISD::UNDEF); 1972 const Type *OpNTy = EltVT.getTypeForEVT(*DAG.getContext()); 1973 CV.push_back(UndefValue::get(OpNTy)); 1974 } 1975 } 1976 Constant *CP = ConstantVector::get(CV); 1977 SDValue CPIdx = DAG.getConstantPool(CP, TLI.getPointerTy()); 1978 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment(); 1979 return DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 1980 MachinePointerInfo::getConstantPool(), 1981 false, false, Alignment); 1982 } 1983 1984 if (!MoreThanTwoValues) { 1985 SmallVector<int, 8> ShuffleVec(NumElems, -1); 1986 for (unsigned i = 0; i < NumElems; ++i) { 1987 SDValue V = Node->getOperand(i); 1988 if (V.getOpcode() == ISD::UNDEF) 1989 continue; 1990 ShuffleVec[i] = V == Value1 ? 0 : NumElems; 1991 } 1992 if (TLI.isShuffleMaskLegal(ShuffleVec, Node->getValueType(0))) { 1993 // Get the splatted value into the low element of a vector register. 1994 SDValue Vec1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value1); 1995 SDValue Vec2; 1996 if (Value2.getNode()) 1997 Vec2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value2); 1998 else 1999 Vec2 = DAG.getUNDEF(VT); 2000 2001 // Return shuffle(LowValVec, undef, <0,0,0,0>) 2002 return DAG.getVectorShuffle(VT, dl, Vec1, Vec2, ShuffleVec.data()); 2003 } 2004 } 2005 2006 // Otherwise, we can't handle this case efficiently. 2007 return ExpandVectorBuildThroughStack(Node); 2008} 2009 2010// ExpandLibCall - Expand a node into a call to a libcall. If the result value 2011// does not fit into a register, return the lo part and set the hi part to the 2012// by-reg argument. If it does fit into a single register, return the result 2013// and leave the Hi part unset. 2014SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, SDNode *Node, 2015 bool isSigned) { 2016 // The input chain to this libcall is the entry node of the function. 2017 // Legalizing the call will automatically add the previous call to the 2018 // dependence. 2019 SDValue InChain = DAG.getEntryNode(); 2020 2021 TargetLowering::ArgListTy Args; 2022 TargetLowering::ArgListEntry Entry; 2023 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) { 2024 EVT ArgVT = Node->getOperand(i).getValueType(); 2025 const Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); 2026 Entry.Node = Node->getOperand(i); Entry.Ty = ArgTy; 2027 Entry.isSExt = isSigned; 2028 Entry.isZExt = !isSigned; 2029 Args.push_back(Entry); 2030 } 2031 SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC), 2032 TLI.getPointerTy()); 2033 2034 // Splice the libcall in wherever FindInputOutputChains tells us to. 2035 const Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext()); 2036 2037 // isTailCall may be true since the callee does not reference caller stack 2038 // frame. Check if it's in the right position. 2039 bool isTailCall = isInTailCallPosition(DAG, Node, TLI); 2040 std::pair<SDValue, SDValue> CallInfo = 2041 TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false, 2042 0, TLI.getLibcallCallingConv(LC), isTailCall, 2043 /*isReturnValueUsed=*/true, 2044 Callee, Args, DAG, Node->getDebugLoc()); 2045 2046 if (!CallInfo.second.getNode()) 2047 // It's a tailcall, return the chain (which is the DAG root). 2048 return DAG.getRoot(); 2049 2050 // Legalize the call sequence, starting with the chain. This will advance 2051 // the LastCALLSEQ to the legalized version of the CALLSEQ_END node that 2052 // was added by LowerCallTo (guaranteeing proper serialization of calls). 2053 LegalizeOp(CallInfo.second); 2054 return CallInfo.first; 2055} 2056 2057/// ExpandLibCall - Generate a libcall taking the given operands as arguments 2058/// and returning a result of type RetVT. 2059SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, EVT RetVT, 2060 const SDValue *Ops, unsigned NumOps, 2061 bool isSigned, DebugLoc dl) { 2062 TargetLowering::ArgListTy Args; 2063 Args.reserve(NumOps); 2064 2065 TargetLowering::ArgListEntry Entry; 2066 for (unsigned i = 0; i != NumOps; ++i) { 2067 Entry.Node = Ops[i]; 2068 Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.getContext()); 2069 Entry.isSExt = isSigned; 2070 Entry.isZExt = !isSigned; 2071 Args.push_back(Entry); 2072 } 2073 SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC), 2074 TLI.getPointerTy()); 2075 2076 const Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext()); 2077 std::pair<SDValue,SDValue> CallInfo = 2078 TLI.LowerCallTo(DAG.getEntryNode(), RetTy, isSigned, !isSigned, false, 2079 false, 0, TLI.getLibcallCallingConv(LC), false, 2080 /*isReturnValueUsed=*/true, 2081 Callee, Args, DAG, dl); 2082 2083 // Legalize the call sequence, starting with the chain. This will advance 2084 // the LastCALLSEQ_END to the legalized version of the CALLSEQ_END node that 2085 // was added by LowerCallTo (guaranteeing proper serialization of calls). 2086 LegalizeOp(CallInfo.second); 2087 2088 return CallInfo.first; 2089} 2090 2091// ExpandChainLibCall - Expand a node into a call to a libcall. Similar to 2092// ExpandLibCall except that the first operand is the in-chain. 2093std::pair<SDValue, SDValue> 2094SelectionDAGLegalize::ExpandChainLibCall(RTLIB::Libcall LC, 2095 SDNode *Node, 2096 bool isSigned) { 2097 SDValue InChain = Node->getOperand(0); 2098 2099 TargetLowering::ArgListTy Args; 2100 TargetLowering::ArgListEntry Entry; 2101 for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i) { 2102 EVT ArgVT = Node->getOperand(i).getValueType(); 2103 const Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); 2104 Entry.Node = Node->getOperand(i); 2105 Entry.Ty = ArgTy; 2106 Entry.isSExt = isSigned; 2107 Entry.isZExt = !isSigned; 2108 Args.push_back(Entry); 2109 } 2110 SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC), 2111 TLI.getPointerTy()); 2112 2113 // Splice the libcall in wherever FindInputOutputChains tells us to. 2114 const Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext()); 2115 std::pair<SDValue, SDValue> CallInfo = 2116 TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false, 2117 0, TLI.getLibcallCallingConv(LC), /*isTailCall=*/false, 2118 /*isReturnValueUsed=*/true, 2119 Callee, Args, DAG, Node->getDebugLoc()); 2120 2121 // Legalize the call sequence, starting with the chain. This will advance 2122 // the LastCALLSEQ to the legalized version of the CALLSEQ_END node that 2123 // was added by LowerCallTo (guaranteeing proper serialization of calls). 2124 LegalizeOp(CallInfo.second); 2125 return CallInfo; 2126} 2127 2128SDValue SelectionDAGLegalize::ExpandFPLibCall(SDNode* Node, 2129 RTLIB::Libcall Call_F32, 2130 RTLIB::Libcall Call_F64, 2131 RTLIB::Libcall Call_F80, 2132 RTLIB::Libcall Call_PPCF128) { 2133 RTLIB::Libcall LC; 2134 switch (Node->getValueType(0).getSimpleVT().SimpleTy) { 2135 default: assert(0 && "Unexpected request for libcall!"); 2136 case MVT::f32: LC = Call_F32; break; 2137 case MVT::f64: LC = Call_F64; break; 2138 case MVT::f80: LC = Call_F80; break; 2139 case MVT::ppcf128: LC = Call_PPCF128; break; 2140 } 2141 return ExpandLibCall(LC, Node, false); 2142} 2143 2144SDValue SelectionDAGLegalize::ExpandIntLibCall(SDNode* Node, bool isSigned, 2145 RTLIB::Libcall Call_I8, 2146 RTLIB::Libcall Call_I16, 2147 RTLIB::Libcall Call_I32, 2148 RTLIB::Libcall Call_I64, 2149 RTLIB::Libcall Call_I128) { 2150 RTLIB::Libcall LC; 2151 switch (Node->getValueType(0).getSimpleVT().SimpleTy) { 2152 default: assert(0 && "Unexpected request for libcall!"); 2153 case MVT::i8: LC = Call_I8; break; 2154 case MVT::i16: LC = Call_I16; break; 2155 case MVT::i32: LC = Call_I32; break; 2156 case MVT::i64: LC = Call_I64; break; 2157 case MVT::i128: LC = Call_I128; break; 2158 } 2159 return ExpandLibCall(LC, Node, isSigned); 2160} 2161 2162/// isDivRemLibcallAvailable - Return true if divmod libcall is available. 2163static bool isDivRemLibcallAvailable(SDNode *Node, bool isSigned, 2164 const TargetLowering &TLI) { 2165 RTLIB::Libcall LC; 2166 switch (Node->getValueType(0).getSimpleVT().SimpleTy) { 2167 default: assert(0 && "Unexpected request for libcall!"); 2168 case MVT::i8: LC= isSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8; break; 2169 case MVT::i16: LC= isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break; 2170 case MVT::i32: LC= isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break; 2171 case MVT::i64: LC= isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break; 2172 case MVT::i128: LC= isSigned ? RTLIB::SDIVREM_I128:RTLIB::UDIVREM_I128; break; 2173 } 2174 2175 return TLI.getLibcallName(LC) != 0; 2176} 2177 2178/// UseDivRem - Only issue divrem libcall if both quotient and remainder are 2179/// needed. 2180static bool UseDivRem(SDNode *Node, bool isSigned, bool isDIV) { 2181 unsigned OtherOpcode = 0; 2182 if (isSigned) 2183 OtherOpcode = isDIV ? ISD::SREM : ISD::SDIV; 2184 else 2185 OtherOpcode = isDIV ? ISD::UREM : ISD::UDIV; 2186 2187 SDValue Op0 = Node->getOperand(0); 2188 SDValue Op1 = Node->getOperand(1); 2189 for (SDNode::use_iterator UI = Op0.getNode()->use_begin(), 2190 UE = Op0.getNode()->use_end(); UI != UE; ++UI) { 2191 SDNode *User = *UI; 2192 if (User == Node) 2193 continue; 2194 if (User->getOpcode() == OtherOpcode && 2195 User->getOperand(0) == Op0 && 2196 User->getOperand(1) == Op1) 2197 return true; 2198 } 2199 return false; 2200} 2201 2202/// ExpandDivRemLibCall - Issue libcalls to __{u}divmod to compute div / rem 2203/// pairs. 2204void 2205SelectionDAGLegalize::ExpandDivRemLibCall(SDNode *Node, 2206 SmallVectorImpl<SDValue> &Results) { 2207 unsigned Opcode = Node->getOpcode(); 2208 bool isSigned = Opcode == ISD::SDIVREM; 2209 2210 RTLIB::Libcall LC; 2211 switch (Node->getValueType(0).getSimpleVT().SimpleTy) { 2212 default: assert(0 && "Unexpected request for libcall!"); 2213 case MVT::i8: LC= isSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8; break; 2214 case MVT::i16: LC= isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break; 2215 case MVT::i32: LC= isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break; 2216 case MVT::i64: LC= isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break; 2217 case MVT::i128: LC= isSigned ? RTLIB::SDIVREM_I128:RTLIB::UDIVREM_I128; break; 2218 } 2219 2220 // The input chain to this libcall is the entry node of the function. 2221 // Legalizing the call will automatically add the previous call to the 2222 // dependence. 2223 SDValue InChain = DAG.getEntryNode(); 2224 2225 EVT RetVT = Node->getValueType(0); 2226 const Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext()); 2227 2228 TargetLowering::ArgListTy Args; 2229 TargetLowering::ArgListEntry Entry; 2230 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) { 2231 EVT ArgVT = Node->getOperand(i).getValueType(); 2232 const Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); 2233 Entry.Node = Node->getOperand(i); Entry.Ty = ArgTy; 2234 Entry.isSExt = isSigned; 2235 Entry.isZExt = !isSigned; 2236 Args.push_back(Entry); 2237 } 2238 2239 // Also pass the return address of the remainder. 2240 SDValue FIPtr = DAG.CreateStackTemporary(RetVT); 2241 Entry.Node = FIPtr; 2242 Entry.Ty = RetTy->getPointerTo(); 2243 Entry.isSExt = isSigned; 2244 Entry.isZExt = !isSigned; 2245 Args.push_back(Entry); 2246 2247 SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC), 2248 TLI.getPointerTy()); 2249 2250 // Splice the libcall in wherever FindInputOutputChains tells us to. 2251 DebugLoc dl = Node->getDebugLoc(); 2252 std::pair<SDValue, SDValue> CallInfo = 2253 TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false, 2254 0, TLI.getLibcallCallingConv(LC), /*isTailCall=*/false, 2255 /*isReturnValueUsed=*/true, Callee, Args, DAG, dl); 2256 2257 // Legalize the call sequence, starting with the chain. This will advance 2258 // the LastCALLSEQ to the legalized version of the CALLSEQ_END node that 2259 // was added by LowerCallTo (guaranteeing proper serialization of calls). 2260 LegalizeOp(CallInfo.second); 2261 2262 // Remainder is loaded back from the stack frame. 2263 SDValue Rem = DAG.getLoad(RetVT, dl, getLastCALLSEQ(), FIPtr, 2264 MachinePointerInfo(), false, false, 0); 2265 Results.push_back(CallInfo.first); 2266 Results.push_back(Rem); 2267} 2268 2269/// ExpandLegalINT_TO_FP - This function is responsible for legalizing a 2270/// INT_TO_FP operation of the specified operand when the target requests that 2271/// we expand it. At this point, we know that the result and operand types are 2272/// legal for the target. 2273SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned, 2274 SDValue Op0, 2275 EVT DestVT, 2276 DebugLoc dl) { 2277 if (Op0.getValueType() == MVT::i32) { 2278 // simple 32-bit [signed|unsigned] integer to float/double expansion 2279 2280 // Get the stack frame index of a 8 byte buffer. 2281 SDValue StackSlot = DAG.CreateStackTemporary(MVT::f64); 2282 2283 // word offset constant for Hi/Lo address computation 2284 SDValue WordOff = DAG.getConstant(sizeof(int), TLI.getPointerTy()); 2285 // set up Hi and Lo (into buffer) address based on endian 2286 SDValue Hi = StackSlot; 2287 SDValue Lo = DAG.getNode(ISD::ADD, dl, 2288 TLI.getPointerTy(), StackSlot, WordOff); 2289 if (TLI.isLittleEndian()) 2290 std::swap(Hi, Lo); 2291 2292 // if signed map to unsigned space 2293 SDValue Op0Mapped; 2294 if (isSigned) { 2295 // constant used to invert sign bit (signed to unsigned mapping) 2296 SDValue SignBit = DAG.getConstant(0x80000000u, MVT::i32); 2297 Op0Mapped = DAG.getNode(ISD::XOR, dl, MVT::i32, Op0, SignBit); 2298 } else { 2299 Op0Mapped = Op0; 2300 } 2301 // store the lo of the constructed double - based on integer input 2302 SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, 2303 Op0Mapped, Lo, MachinePointerInfo(), 2304 false, false, 0); 2305 // initial hi portion of constructed double 2306 SDValue InitialHi = DAG.getConstant(0x43300000u, MVT::i32); 2307 // store the hi of the constructed double - biased exponent 2308 SDValue Store2 = DAG.getStore(Store1, dl, InitialHi, Hi, 2309 MachinePointerInfo(), 2310 false, false, 0); 2311 // load the constructed double 2312 SDValue Load = DAG.getLoad(MVT::f64, dl, Store2, StackSlot, 2313 MachinePointerInfo(), false, false, 0); 2314 // FP constant to bias correct the final result 2315 SDValue Bias = DAG.getConstantFP(isSigned ? 2316 BitsToDouble(0x4330000080000000ULL) : 2317 BitsToDouble(0x4330000000000000ULL), 2318 MVT::f64); 2319 // subtract the bias 2320 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Load, Bias); 2321 // final result 2322 SDValue Result; 2323 // handle final rounding 2324 if (DestVT == MVT::f64) { 2325 // do nothing 2326 Result = Sub; 2327 } else if (DestVT.bitsLT(MVT::f64)) { 2328 Result = DAG.getNode(ISD::FP_ROUND, dl, DestVT, Sub, 2329 DAG.getIntPtrConstant(0)); 2330 } else if (DestVT.bitsGT(MVT::f64)) { 2331 Result = DAG.getNode(ISD::FP_EXTEND, dl, DestVT, Sub); 2332 } 2333 return Result; 2334 } 2335 assert(!isSigned && "Legalize cannot Expand SINT_TO_FP for i64 yet"); 2336 // Code below here assumes !isSigned without checking again. 2337 2338 // Implementation of unsigned i64 to f64 following the algorithm in 2339 // __floatundidf in compiler_rt. This implementation has the advantage 2340 // of performing rounding correctly, both in the default rounding mode 2341 // and in all alternate rounding modes. 2342 // TODO: Generalize this for use with other types. 2343 if (Op0.getValueType() == MVT::i64 && DestVT == MVT::f64) { 2344 SDValue TwoP52 = 2345 DAG.getConstant(UINT64_C(0x4330000000000000), MVT::i64); 2346 SDValue TwoP84PlusTwoP52 = 2347 DAG.getConstantFP(BitsToDouble(UINT64_C(0x4530000000100000)), MVT::f64); 2348 SDValue TwoP84 = 2349 DAG.getConstant(UINT64_C(0x4530000000000000), MVT::i64); 2350 2351 SDValue Lo = DAG.getZeroExtendInReg(Op0, dl, MVT::i32); 2352 SDValue Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Op0, 2353 DAG.getConstant(32, MVT::i64)); 2354 SDValue LoOr = DAG.getNode(ISD::OR, dl, MVT::i64, Lo, TwoP52); 2355 SDValue HiOr = DAG.getNode(ISD::OR, dl, MVT::i64, Hi, TwoP84); 2356 SDValue LoFlt = DAG.getNode(ISD::BITCAST, dl, MVT::f64, LoOr); 2357 SDValue HiFlt = DAG.getNode(ISD::BITCAST, dl, MVT::f64, HiOr); 2358 SDValue HiSub = DAG.getNode(ISD::FSUB, dl, MVT::f64, HiFlt, 2359 TwoP84PlusTwoP52); 2360 return DAG.getNode(ISD::FADD, dl, MVT::f64, LoFlt, HiSub); 2361 } 2362 2363 // Implementation of unsigned i64 to f32. 2364 // TODO: Generalize this for use with other types. 2365 if (Op0.getValueType() == MVT::i64 && DestVT == MVT::f32) { 2366 // For unsigned conversions, convert them to signed conversions using the 2367 // algorithm from the x86_64 __floatundidf in compiler_rt. 2368 if (!isSigned) { 2369 SDValue Fast = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, Op0); 2370 2371 SDValue ShiftConst = 2372 DAG.getConstant(1, TLI.getShiftAmountTy(Op0.getValueType())); 2373 SDValue Shr = DAG.getNode(ISD::SRL, dl, MVT::i64, Op0, ShiftConst); 2374 SDValue AndConst = DAG.getConstant(1, MVT::i64); 2375 SDValue And = DAG.getNode(ISD::AND, dl, MVT::i64, Op0, AndConst); 2376 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::i64, And, Shr); 2377 2378 SDValue SignCvt = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, Or); 2379 SDValue Slow = DAG.getNode(ISD::FADD, dl, MVT::f32, SignCvt, SignCvt); 2380 2381 // TODO: This really should be implemented using a branch rather than a 2382 // select. We happen to get lucky and machinesink does the right 2383 // thing most of the time. This would be a good candidate for a 2384 //pseudo-op, or, even better, for whole-function isel. 2385 SDValue SignBitTest = DAG.getSetCC(dl, TLI.getSetCCResultType(MVT::i64), 2386 Op0, DAG.getConstant(0, MVT::i64), ISD::SETLT); 2387 return DAG.getNode(ISD::SELECT, dl, MVT::f32, SignBitTest, Slow, Fast); 2388 } 2389 2390 // Otherwise, implement the fully general conversion. 2391 2392 SDValue And = DAG.getNode(ISD::AND, dl, MVT::i64, Op0, 2393 DAG.getConstant(UINT64_C(0xfffffffffffff800), MVT::i64)); 2394 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::i64, And, 2395 DAG.getConstant(UINT64_C(0x800), MVT::i64)); 2396 SDValue And2 = DAG.getNode(ISD::AND, dl, MVT::i64, Op0, 2397 DAG.getConstant(UINT64_C(0x7ff), MVT::i64)); 2398 SDValue Ne = DAG.getSetCC(dl, TLI.getSetCCResultType(MVT::i64), 2399 And2, DAG.getConstant(UINT64_C(0), MVT::i64), ISD::SETNE); 2400 SDValue Sel = DAG.getNode(ISD::SELECT, dl, MVT::i64, Ne, Or, Op0); 2401 SDValue Ge = DAG.getSetCC(dl, TLI.getSetCCResultType(MVT::i64), 2402 Op0, DAG.getConstant(UINT64_C(0x0020000000000000), MVT::i64), 2403 ISD::SETUGE); 2404 SDValue Sel2 = DAG.getNode(ISD::SELECT, dl, MVT::i64, Ge, Sel, Op0); 2405 EVT SHVT = TLI.getShiftAmountTy(Sel2.getValueType()); 2406 2407 SDValue Sh = DAG.getNode(ISD::SRL, dl, MVT::i64, Sel2, 2408 DAG.getConstant(32, SHVT)); 2409 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Sh); 2410 SDValue Fcvt = DAG.getNode(ISD::UINT_TO_FP, dl, MVT::f64, Trunc); 2411 SDValue TwoP32 = 2412 DAG.getConstantFP(BitsToDouble(UINT64_C(0x41f0000000000000)), MVT::f64); 2413 SDValue Fmul = DAG.getNode(ISD::FMUL, dl, MVT::f64, TwoP32, Fcvt); 2414 SDValue Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Sel2); 2415 SDValue Fcvt2 = DAG.getNode(ISD::UINT_TO_FP, dl, MVT::f64, Lo); 2416 SDValue Fadd = DAG.getNode(ISD::FADD, dl, MVT::f64, Fmul, Fcvt2); 2417 return DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, Fadd, 2418 DAG.getIntPtrConstant(0)); 2419 } 2420 2421 SDValue Tmp1 = DAG.getNode(ISD::SINT_TO_FP, dl, DestVT, Op0); 2422 2423 SDValue SignSet = DAG.getSetCC(dl, TLI.getSetCCResultType(Op0.getValueType()), 2424 Op0, DAG.getConstant(0, Op0.getValueType()), 2425 ISD::SETLT); 2426 SDValue Zero = DAG.getIntPtrConstant(0), Four = DAG.getIntPtrConstant(4); 2427 SDValue CstOffset = DAG.getNode(ISD::SELECT, dl, Zero.getValueType(), 2428 SignSet, Four, Zero); 2429 2430 // If the sign bit of the integer is set, the large number will be treated 2431 // as a negative number. To counteract this, the dynamic code adds an 2432 // offset depending on the data type. 2433 uint64_t FF; 2434 switch (Op0.getValueType().getSimpleVT().SimpleTy) { 2435 default: assert(0 && "Unsupported integer type!"); 2436 case MVT::i8 : FF = 0x43800000ULL; break; // 2^8 (as a float) 2437 case MVT::i16: FF = 0x47800000ULL; break; // 2^16 (as a float) 2438 case MVT::i32: FF = 0x4F800000ULL; break; // 2^32 (as a float) 2439 case MVT::i64: FF = 0x5F800000ULL; break; // 2^64 (as a float) 2440 } 2441 if (TLI.isLittleEndian()) FF <<= 32; 2442 Constant *FudgeFactor = ConstantInt::get( 2443 Type::getInt64Ty(*DAG.getContext()), FF); 2444 2445 SDValue CPIdx = DAG.getConstantPool(FudgeFactor, TLI.getPointerTy()); 2446 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment(); 2447 CPIdx = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), CPIdx, CstOffset); 2448 Alignment = std::min(Alignment, 4u); 2449 SDValue FudgeInReg; 2450 if (DestVT == MVT::f32) 2451 FudgeInReg = DAG.getLoad(MVT::f32, dl, DAG.getEntryNode(), CPIdx, 2452 MachinePointerInfo::getConstantPool(), 2453 false, false, Alignment); 2454 else { 2455 FudgeInReg = 2456 LegalizeOp(DAG.getExtLoad(ISD::EXTLOAD, dl, DestVT, 2457 DAG.getEntryNode(), CPIdx, 2458 MachinePointerInfo::getConstantPool(), 2459 MVT::f32, false, false, Alignment)); 2460 } 2461 2462 return DAG.getNode(ISD::FADD, dl, DestVT, Tmp1, FudgeInReg); 2463} 2464 2465/// PromoteLegalINT_TO_FP - This function is responsible for legalizing a 2466/// *INT_TO_FP operation of the specified operand when the target requests that 2467/// we promote it. At this point, we know that the result and operand types are 2468/// legal for the target, and that there is a legal UINT_TO_FP or SINT_TO_FP 2469/// operation that takes a larger input. 2470SDValue SelectionDAGLegalize::PromoteLegalINT_TO_FP(SDValue LegalOp, 2471 EVT DestVT, 2472 bool isSigned, 2473 DebugLoc dl) { 2474 // First step, figure out the appropriate *INT_TO_FP operation to use. 2475 EVT NewInTy = LegalOp.getValueType(); 2476 2477 unsigned OpToUse = 0; 2478 2479 // Scan for the appropriate larger type to use. 2480 while (1) { 2481 NewInTy = (MVT::SimpleValueType)(NewInTy.getSimpleVT().SimpleTy+1); 2482 assert(NewInTy.isInteger() && "Ran out of possibilities!"); 2483 2484 // If the target supports SINT_TO_FP of this type, use it. 2485 if (TLI.isOperationLegalOrCustom(ISD::SINT_TO_FP, NewInTy)) { 2486 OpToUse = ISD::SINT_TO_FP; 2487 break; 2488 } 2489 if (isSigned) continue; 2490 2491 // If the target supports UINT_TO_FP of this type, use it. 2492 if (TLI.isOperationLegalOrCustom(ISD::UINT_TO_FP, NewInTy)) { 2493 OpToUse = ISD::UINT_TO_FP; 2494 break; 2495 } 2496 2497 // Otherwise, try a larger type. 2498 } 2499 2500 // Okay, we found the operation and type to use. Zero extend our input to the 2501 // desired type then run the operation on it. 2502 return DAG.getNode(OpToUse, dl, DestVT, 2503 DAG.getNode(isSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, 2504 dl, NewInTy, LegalOp)); 2505} 2506 2507/// PromoteLegalFP_TO_INT - This function is responsible for legalizing a 2508/// FP_TO_*INT operation of the specified operand when the target requests that 2509/// we promote it. At this point, we know that the result and operand types are 2510/// legal for the target, and that there is a legal FP_TO_UINT or FP_TO_SINT 2511/// operation that returns a larger result. 2512SDValue SelectionDAGLegalize::PromoteLegalFP_TO_INT(SDValue LegalOp, 2513 EVT DestVT, 2514 bool isSigned, 2515 DebugLoc dl) { 2516 // First step, figure out the appropriate FP_TO*INT operation to use. 2517 EVT NewOutTy = DestVT; 2518 2519 unsigned OpToUse = 0; 2520 2521 // Scan for the appropriate larger type to use. 2522 while (1) { 2523 NewOutTy = (MVT::SimpleValueType)(NewOutTy.getSimpleVT().SimpleTy+1); 2524 assert(NewOutTy.isInteger() && "Ran out of possibilities!"); 2525 2526 if (TLI.isOperationLegalOrCustom(ISD::FP_TO_SINT, NewOutTy)) { 2527 OpToUse = ISD::FP_TO_SINT; 2528 break; 2529 } 2530 2531 if (TLI.isOperationLegalOrCustom(ISD::FP_TO_UINT, NewOutTy)) { 2532 OpToUse = ISD::FP_TO_UINT; 2533 break; 2534 } 2535 2536 // Otherwise, try a larger type. 2537 } 2538 2539 2540 // Okay, we found the operation and type to use. 2541 SDValue Operation = DAG.getNode(OpToUse, dl, NewOutTy, LegalOp); 2542 2543 // Truncate the result of the extended FP_TO_*INT operation to the desired 2544 // size. 2545 return DAG.getNode(ISD::TRUNCATE, dl, DestVT, Operation); 2546} 2547 2548/// ExpandBSWAP - Open code the operations for BSWAP of the specified operation. 2549/// 2550SDValue SelectionDAGLegalize::ExpandBSWAP(SDValue Op, DebugLoc dl) { 2551 EVT VT = Op.getValueType(); 2552 EVT SHVT = TLI.getShiftAmountTy(VT); 2553 SDValue Tmp1, Tmp2, Tmp3, Tmp4, Tmp5, Tmp6, Tmp7, Tmp8; 2554 switch (VT.getSimpleVT().SimpleTy) { 2555 default: assert(0 && "Unhandled Expand type in BSWAP!"); 2556 case MVT::i16: 2557 Tmp2 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(8, SHVT)); 2558 Tmp1 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(8, SHVT)); 2559 return DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 2560 case MVT::i32: 2561 Tmp4 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(24, SHVT)); 2562 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(8, SHVT)); 2563 Tmp2 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(8, SHVT)); 2564 Tmp1 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(24, SHVT)); 2565 Tmp3 = DAG.getNode(ISD::AND, dl, VT, Tmp3, DAG.getConstant(0xFF0000, VT)); 2566 Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, DAG.getConstant(0xFF00, VT)); 2567 Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp3); 2568 Tmp2 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp1); 2569 return DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp2); 2570 case MVT::i64: 2571 Tmp8 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(56, SHVT)); 2572 Tmp7 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(40, SHVT)); 2573 Tmp6 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(24, SHVT)); 2574 Tmp5 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(8, SHVT)); 2575 Tmp4 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(8, SHVT)); 2576 Tmp3 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(24, SHVT)); 2577 Tmp2 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(40, SHVT)); 2578 Tmp1 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(56, SHVT)); 2579 Tmp7 = DAG.getNode(ISD::AND, dl, VT, Tmp7, DAG.getConstant(255ULL<<48, VT)); 2580 Tmp6 = DAG.getNode(ISD::AND, dl, VT, Tmp6, DAG.getConstant(255ULL<<40, VT)); 2581 Tmp5 = DAG.getNode(ISD::AND, dl, VT, Tmp5, DAG.getConstant(255ULL<<32, VT)); 2582 Tmp4 = DAG.getNode(ISD::AND, dl, VT, Tmp4, DAG.getConstant(255ULL<<24, VT)); 2583 Tmp3 = DAG.getNode(ISD::AND, dl, VT, Tmp3, DAG.getConstant(255ULL<<16, VT)); 2584 Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, DAG.getConstant(255ULL<<8 , VT)); 2585 Tmp8 = DAG.getNode(ISD::OR, dl, VT, Tmp8, Tmp7); 2586 Tmp6 = DAG.getNode(ISD::OR, dl, VT, Tmp6, Tmp5); 2587 Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp3); 2588 Tmp2 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp1); 2589 Tmp8 = DAG.getNode(ISD::OR, dl, VT, Tmp8, Tmp6); 2590 Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp2); 2591 return DAG.getNode(ISD::OR, dl, VT, Tmp8, Tmp4); 2592 } 2593} 2594 2595/// SplatByte - Distribute ByteVal over NumBits bits. 2596// FIXME: Move this helper to a common place. 2597static APInt SplatByte(unsigned NumBits, uint8_t ByteVal) { 2598 APInt Val = APInt(NumBits, ByteVal); 2599 unsigned Shift = 8; 2600 for (unsigned i = NumBits; i > 8; i >>= 1) { 2601 Val = (Val << Shift) | Val; 2602 Shift <<= 1; 2603 } 2604 return Val; 2605} 2606 2607/// ExpandBitCount - Expand the specified bitcount instruction into operations. 2608/// 2609SDValue SelectionDAGLegalize::ExpandBitCount(unsigned Opc, SDValue Op, 2610 DebugLoc dl) { 2611 switch (Opc) { 2612 default: assert(0 && "Cannot expand this yet!"); 2613 case ISD::CTPOP: { 2614 EVT VT = Op.getValueType(); 2615 EVT ShVT = TLI.getShiftAmountTy(VT); 2616 unsigned Len = VT.getSizeInBits(); 2617 2618 assert(VT.isInteger() && Len <= 128 && Len % 8 == 0 && 2619 "CTPOP not implemented for this type."); 2620 2621 // This is the "best" algorithm from 2622 // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel 2623 2624 SDValue Mask55 = DAG.getConstant(SplatByte(Len, 0x55), VT); 2625 SDValue Mask33 = DAG.getConstant(SplatByte(Len, 0x33), VT); 2626 SDValue Mask0F = DAG.getConstant(SplatByte(Len, 0x0F), VT); 2627 SDValue Mask01 = DAG.getConstant(SplatByte(Len, 0x01), VT); 2628 2629 // v = v - ((v >> 1) & 0x55555555...) 2630 Op = DAG.getNode(ISD::SUB, dl, VT, Op, 2631 DAG.getNode(ISD::AND, dl, VT, 2632 DAG.getNode(ISD::SRL, dl, VT, Op, 2633 DAG.getConstant(1, ShVT)), 2634 Mask55)); 2635 // v = (v & 0x33333333...) + ((v >> 2) & 0x33333333...) 2636 Op = DAG.getNode(ISD::ADD, dl, VT, 2637 DAG.getNode(ISD::AND, dl, VT, Op, Mask33), 2638 DAG.getNode(ISD::AND, dl, VT, 2639 DAG.getNode(ISD::SRL, dl, VT, Op, 2640 DAG.getConstant(2, ShVT)), 2641 Mask33)); 2642 // v = (v + (v >> 4)) & 0x0F0F0F0F... 2643 Op = DAG.getNode(ISD::AND, dl, VT, 2644 DAG.getNode(ISD::ADD, dl, VT, Op, 2645 DAG.getNode(ISD::SRL, dl, VT, Op, 2646 DAG.getConstant(4, ShVT))), 2647 Mask0F); 2648 // v = (v * 0x01010101...) >> (Len - 8) 2649 Op = DAG.getNode(ISD::SRL, dl, VT, 2650 DAG.getNode(ISD::MUL, dl, VT, Op, Mask01), 2651 DAG.getConstant(Len - 8, ShVT)); 2652 2653 return Op; 2654 } 2655 case ISD::CTLZ: { 2656 // for now, we do this: 2657 // x = x | (x >> 1); 2658 // x = x | (x >> 2); 2659 // ... 2660 // x = x | (x >>16); 2661 // x = x | (x >>32); // for 64-bit input 2662 // return popcount(~x); 2663 // 2664 // but see also: http://www.hackersdelight.org/HDcode/nlz.cc 2665 EVT VT = Op.getValueType(); 2666 EVT ShVT = TLI.getShiftAmountTy(VT); 2667 unsigned len = VT.getSizeInBits(); 2668 for (unsigned i = 0; (1U << i) <= (len / 2); ++i) { 2669 SDValue Tmp3 = DAG.getConstant(1ULL << i, ShVT); 2670 Op = DAG.getNode(ISD::OR, dl, VT, Op, 2671 DAG.getNode(ISD::SRL, dl, VT, Op, Tmp3)); 2672 } 2673 Op = DAG.getNOT(dl, Op, VT); 2674 return DAG.getNode(ISD::CTPOP, dl, VT, Op); 2675 } 2676 case ISD::CTTZ: { 2677 // for now, we use: { return popcount(~x & (x - 1)); } 2678 // unless the target has ctlz but not ctpop, in which case we use: 2679 // { return 32 - nlz(~x & (x-1)); } 2680 // see also http://www.hackersdelight.org/HDcode/ntz.cc 2681 EVT VT = Op.getValueType(); 2682 SDValue Tmp3 = DAG.getNode(ISD::AND, dl, VT, 2683 DAG.getNOT(dl, Op, VT), 2684 DAG.getNode(ISD::SUB, dl, VT, Op, 2685 DAG.getConstant(1, VT))); 2686 // If ISD::CTLZ is legal and CTPOP isn't, then do that instead. 2687 if (!TLI.isOperationLegalOrCustom(ISD::CTPOP, VT) && 2688 TLI.isOperationLegalOrCustom(ISD::CTLZ, VT)) 2689 return DAG.getNode(ISD::SUB, dl, VT, 2690 DAG.getConstant(VT.getSizeInBits(), VT), 2691 DAG.getNode(ISD::CTLZ, dl, VT, Tmp3)); 2692 return DAG.getNode(ISD::CTPOP, dl, VT, Tmp3); 2693 } 2694 } 2695} 2696 2697std::pair <SDValue, SDValue> SelectionDAGLegalize::ExpandAtomic(SDNode *Node) { 2698 unsigned Opc = Node->getOpcode(); 2699 MVT VT = cast<AtomicSDNode>(Node)->getMemoryVT().getSimpleVT(); 2700 RTLIB::Libcall LC; 2701 2702 switch (Opc) { 2703 default: 2704 llvm_unreachable("Unhandled atomic intrinsic Expand!"); 2705 break; 2706 case ISD::ATOMIC_SWAP: 2707 switch (VT.SimpleTy) { 2708 default: llvm_unreachable("Unexpected value type for atomic!"); 2709 case MVT::i8: LC = RTLIB::SYNC_LOCK_TEST_AND_SET_1; break; 2710 case MVT::i16: LC = RTLIB::SYNC_LOCK_TEST_AND_SET_2; break; 2711 case MVT::i32: LC = RTLIB::SYNC_LOCK_TEST_AND_SET_4; break; 2712 case MVT::i64: LC = RTLIB::SYNC_LOCK_TEST_AND_SET_8; break; 2713 } 2714 break; 2715 case ISD::ATOMIC_CMP_SWAP: 2716 switch (VT.SimpleTy) { 2717 default: llvm_unreachable("Unexpected value type for atomic!"); 2718 case MVT::i8: LC = RTLIB::SYNC_VAL_COMPARE_AND_SWAP_1; break; 2719 case MVT::i16: LC = RTLIB::SYNC_VAL_COMPARE_AND_SWAP_2; break; 2720 case MVT::i32: LC = RTLIB::SYNC_VAL_COMPARE_AND_SWAP_4; break; 2721 case MVT::i64: LC = RTLIB::SYNC_VAL_COMPARE_AND_SWAP_8; break; 2722 } 2723 break; 2724 case ISD::ATOMIC_LOAD_ADD: 2725 switch (VT.SimpleTy) { 2726 default: llvm_unreachable("Unexpected value type for atomic!"); 2727 case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_ADD_1; break; 2728 case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_ADD_2; break; 2729 case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_ADD_4; break; 2730 case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_ADD_8; break; 2731 } 2732 break; 2733 case ISD::ATOMIC_LOAD_SUB: 2734 switch (VT.SimpleTy) { 2735 default: llvm_unreachable("Unexpected value type for atomic!"); 2736 case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_SUB_1; break; 2737 case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_SUB_2; break; 2738 case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_SUB_4; break; 2739 case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_SUB_8; break; 2740 } 2741 break; 2742 case ISD::ATOMIC_LOAD_AND: 2743 switch (VT.SimpleTy) { 2744 default: llvm_unreachable("Unexpected value type for atomic!"); 2745 case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_AND_1; break; 2746 case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_AND_2; break; 2747 case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_AND_4; break; 2748 case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_AND_8; break; 2749 } 2750 break; 2751 case ISD::ATOMIC_LOAD_OR: 2752 switch (VT.SimpleTy) { 2753 default: llvm_unreachable("Unexpected value type for atomic!"); 2754 case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_OR_1; break; 2755 case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_OR_2; break; 2756 case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_OR_4; break; 2757 case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_OR_8; break; 2758 } 2759 break; 2760 case ISD::ATOMIC_LOAD_XOR: 2761 switch (VT.SimpleTy) { 2762 default: llvm_unreachable("Unexpected value type for atomic!"); 2763 case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_XOR_1; break; 2764 case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_XOR_2; break; 2765 case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_XOR_4; break; 2766 case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_XOR_8; break; 2767 } 2768 break; 2769 case ISD::ATOMIC_LOAD_NAND: 2770 switch (VT.SimpleTy) { 2771 default: llvm_unreachable("Unexpected value type for atomic!"); 2772 case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_NAND_1; break; 2773 case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_NAND_2; break; 2774 case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_NAND_4; break; 2775 case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_NAND_8; break; 2776 } 2777 break; 2778 } 2779 2780 return ExpandChainLibCall(LC, Node, false); 2781} 2782 2783void SelectionDAGLegalize::ExpandNode(SDNode *Node, 2784 SmallVectorImpl<SDValue> &Results) { 2785 DebugLoc dl = Node->getDebugLoc(); 2786 SDValue Tmp1, Tmp2, Tmp3, Tmp4; 2787 switch (Node->getOpcode()) { 2788 case ISD::CTPOP: 2789 case ISD::CTLZ: 2790 case ISD::CTTZ: 2791 Tmp1 = ExpandBitCount(Node->getOpcode(), Node->getOperand(0), dl); 2792 Results.push_back(Tmp1); 2793 break; 2794 case ISD::BSWAP: 2795 Results.push_back(ExpandBSWAP(Node->getOperand(0), dl)); 2796 break; 2797 case ISD::FRAMEADDR: 2798 case ISD::RETURNADDR: 2799 case ISD::FRAME_TO_ARGS_OFFSET: 2800 Results.push_back(DAG.getConstant(0, Node->getValueType(0))); 2801 break; 2802 case ISD::FLT_ROUNDS_: 2803 Results.push_back(DAG.getConstant(1, Node->getValueType(0))); 2804 break; 2805 case ISD::EH_RETURN: 2806 case ISD::EH_LABEL: 2807 case ISD::PREFETCH: 2808 case ISD::VAEND: 2809 case ISD::EH_SJLJ_LONGJMP: 2810 case ISD::EH_SJLJ_DISPATCHSETUP: 2811 // If the target didn't expand these, there's nothing to do, so just 2812 // preserve the chain and be done. 2813 Results.push_back(Node->getOperand(0)); 2814 break; 2815 case ISD::EH_SJLJ_SETJMP: 2816 // If the target didn't expand this, just return 'zero' and preserve the 2817 // chain. 2818 Results.push_back(DAG.getConstant(0, MVT::i32)); 2819 Results.push_back(Node->getOperand(0)); 2820 break; 2821 case ISD::MEMBARRIER: { 2822 // If the target didn't lower this, lower it to '__sync_synchronize()' call 2823 TargetLowering::ArgListTy Args; 2824 std::pair<SDValue, SDValue> CallResult = 2825 TLI.LowerCallTo(Node->getOperand(0), Type::getVoidTy(*DAG.getContext()), 2826 false, false, false, false, 0, CallingConv::C, 2827 /*isTailCall=*/false, 2828 /*isReturnValueUsed=*/true, 2829 DAG.getExternalSymbol("__sync_synchronize", 2830 TLI.getPointerTy()), 2831 Args, DAG, dl); 2832 Results.push_back(CallResult.second); 2833 break; 2834 } 2835 // By default, atomic intrinsics are marked Legal and lowered. Targets 2836 // which don't support them directly, however, may want libcalls, in which 2837 // case they mark them Expand, and we get here. 2838 case ISD::ATOMIC_SWAP: 2839 case ISD::ATOMIC_LOAD_ADD: 2840 case ISD::ATOMIC_LOAD_SUB: 2841 case ISD::ATOMIC_LOAD_AND: 2842 case ISD::ATOMIC_LOAD_OR: 2843 case ISD::ATOMIC_LOAD_XOR: 2844 case ISD::ATOMIC_LOAD_NAND: 2845 case ISD::ATOMIC_LOAD_MIN: 2846 case ISD::ATOMIC_LOAD_MAX: 2847 case ISD::ATOMIC_LOAD_UMIN: 2848 case ISD::ATOMIC_LOAD_UMAX: 2849 case ISD::ATOMIC_CMP_SWAP: { 2850 std::pair<SDValue, SDValue> Tmp = ExpandAtomic(Node); 2851 Results.push_back(Tmp.first); 2852 Results.push_back(Tmp.second); 2853 break; 2854 } 2855 case ISD::DYNAMIC_STACKALLOC: 2856 ExpandDYNAMIC_STACKALLOC(Node, Results); 2857 break; 2858 case ISD::MERGE_VALUES: 2859 for (unsigned i = 0; i < Node->getNumValues(); i++) 2860 Results.push_back(Node->getOperand(i)); 2861 break; 2862 case ISD::UNDEF: { 2863 EVT VT = Node->getValueType(0); 2864 if (VT.isInteger()) 2865 Results.push_back(DAG.getConstant(0, VT)); 2866 else { 2867 assert(VT.isFloatingPoint() && "Unknown value type!"); 2868 Results.push_back(DAG.getConstantFP(0, VT)); 2869 } 2870 break; 2871 } 2872 case ISD::TRAP: { 2873 // If this operation is not supported, lower it to 'abort()' call 2874 TargetLowering::ArgListTy Args; 2875 std::pair<SDValue, SDValue> CallResult = 2876 TLI.LowerCallTo(Node->getOperand(0), Type::getVoidTy(*DAG.getContext()), 2877 false, false, false, false, 0, CallingConv::C, 2878 /*isTailCall=*/false, 2879 /*isReturnValueUsed=*/true, 2880 DAG.getExternalSymbol("abort", TLI.getPointerTy()), 2881 Args, DAG, dl); 2882 Results.push_back(CallResult.second); 2883 break; 2884 } 2885 case ISD::FP_ROUND: 2886 case ISD::BITCAST: 2887 Tmp1 = EmitStackConvert(Node->getOperand(0), Node->getValueType(0), 2888 Node->getValueType(0), dl); 2889 Results.push_back(Tmp1); 2890 break; 2891 case ISD::FP_EXTEND: 2892 Tmp1 = EmitStackConvert(Node->getOperand(0), 2893 Node->getOperand(0).getValueType(), 2894 Node->getValueType(0), dl); 2895 Results.push_back(Tmp1); 2896 break; 2897 case ISD::SIGN_EXTEND_INREG: { 2898 // NOTE: we could fall back on load/store here too for targets without 2899 // SAR. However, it is doubtful that any exist. 2900 EVT ExtraVT = cast<VTSDNode>(Node->getOperand(1))->getVT(); 2901 EVT VT = Node->getValueType(0); 2902 EVT ShiftAmountTy = TLI.getShiftAmountTy(VT); 2903 if (VT.isVector()) 2904 ShiftAmountTy = VT; 2905 unsigned BitsDiff = VT.getScalarType().getSizeInBits() - 2906 ExtraVT.getScalarType().getSizeInBits(); 2907 SDValue ShiftCst = DAG.getConstant(BitsDiff, ShiftAmountTy); 2908 Tmp1 = DAG.getNode(ISD::SHL, dl, Node->getValueType(0), 2909 Node->getOperand(0), ShiftCst); 2910 Tmp1 = DAG.getNode(ISD::SRA, dl, Node->getValueType(0), Tmp1, ShiftCst); 2911 Results.push_back(Tmp1); 2912 break; 2913 } 2914 case ISD::FP_ROUND_INREG: { 2915 // The only way we can lower this is to turn it into a TRUNCSTORE, 2916 // EXTLOAD pair, targeting a temporary location (a stack slot). 2917 2918 // NOTE: there is a choice here between constantly creating new stack 2919 // slots and always reusing the same one. We currently always create 2920 // new ones, as reuse may inhibit scheduling. 2921 EVT ExtraVT = cast<VTSDNode>(Node->getOperand(1))->getVT(); 2922 Tmp1 = EmitStackConvert(Node->getOperand(0), ExtraVT, 2923 Node->getValueType(0), dl); 2924 Results.push_back(Tmp1); 2925 break; 2926 } 2927 case ISD::SINT_TO_FP: 2928 case ISD::UINT_TO_FP: 2929 Tmp1 = ExpandLegalINT_TO_FP(Node->getOpcode() == ISD::SINT_TO_FP, 2930 Node->getOperand(0), Node->getValueType(0), dl); 2931 Results.push_back(Tmp1); 2932 break; 2933 case ISD::FP_TO_UINT: { 2934 SDValue True, False; 2935 EVT VT = Node->getOperand(0).getValueType(); 2936 EVT NVT = Node->getValueType(0); 2937 APFloat apf(APInt::getNullValue(VT.getSizeInBits())); 2938 APInt x = APInt::getSignBit(NVT.getSizeInBits()); 2939 (void)apf.convertFromAPInt(x, false, APFloat::rmNearestTiesToEven); 2940 Tmp1 = DAG.getConstantFP(apf, VT); 2941 Tmp2 = DAG.getSetCC(dl, TLI.getSetCCResultType(VT), 2942 Node->getOperand(0), 2943 Tmp1, ISD::SETLT); 2944 True = DAG.getNode(ISD::FP_TO_SINT, dl, NVT, Node->getOperand(0)); 2945 False = DAG.getNode(ISD::FP_TO_SINT, dl, NVT, 2946 DAG.getNode(ISD::FSUB, dl, VT, 2947 Node->getOperand(0), Tmp1)); 2948 False = DAG.getNode(ISD::XOR, dl, NVT, False, 2949 DAG.getConstant(x, NVT)); 2950 Tmp1 = DAG.getNode(ISD::SELECT, dl, NVT, Tmp2, True, False); 2951 Results.push_back(Tmp1); 2952 break; 2953 } 2954 case ISD::VAARG: { 2955 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 2956 EVT VT = Node->getValueType(0); 2957 Tmp1 = Node->getOperand(0); 2958 Tmp2 = Node->getOperand(1); 2959 unsigned Align = Node->getConstantOperandVal(3); 2960 2961 SDValue VAListLoad = DAG.getLoad(TLI.getPointerTy(), dl, Tmp1, Tmp2, 2962 MachinePointerInfo(V), false, false, 0); 2963 SDValue VAList = VAListLoad; 2964 2965 if (Align > TLI.getMinStackArgumentAlignment()) { 2966 assert(((Align & (Align-1)) == 0) && "Expected Align to be a power of 2"); 2967 2968 VAList = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), VAList, 2969 DAG.getConstant(Align - 1, 2970 TLI.getPointerTy())); 2971 2972 VAList = DAG.getNode(ISD::AND, dl, TLI.getPointerTy(), VAList, 2973 DAG.getConstant(-(int64_t)Align, 2974 TLI.getPointerTy())); 2975 } 2976 2977 // Increment the pointer, VAList, to the next vaarg 2978 Tmp3 = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), VAList, 2979 DAG.getConstant(TLI.getTargetData()-> 2980 getTypeAllocSize(VT.getTypeForEVT(*DAG.getContext())), 2981 TLI.getPointerTy())); 2982 // Store the incremented VAList to the legalized pointer 2983 Tmp3 = DAG.getStore(VAListLoad.getValue(1), dl, Tmp3, Tmp2, 2984 MachinePointerInfo(V), false, false, 0); 2985 // Load the actual argument out of the pointer VAList 2986 Results.push_back(DAG.getLoad(VT, dl, Tmp3, VAList, MachinePointerInfo(), 2987 false, false, 0)); 2988 Results.push_back(Results[0].getValue(1)); 2989 break; 2990 } 2991 case ISD::VACOPY: { 2992 // This defaults to loading a pointer from the input and storing it to the 2993 // output, returning the chain. 2994 const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue(); 2995 const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue(); 2996 Tmp1 = DAG.getLoad(TLI.getPointerTy(), dl, Node->getOperand(0), 2997 Node->getOperand(2), MachinePointerInfo(VS), 2998 false, false, 0); 2999 Tmp1 = DAG.getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1), 3000 MachinePointerInfo(VD), false, false, 0); 3001 Results.push_back(Tmp1); 3002 break; 3003 } 3004 case ISD::EXTRACT_VECTOR_ELT: 3005 if (Node->getOperand(0).getValueType().getVectorNumElements() == 1) 3006 // This must be an access of the only element. Return it. 3007 Tmp1 = DAG.getNode(ISD::BITCAST, dl, Node->getValueType(0), 3008 Node->getOperand(0)); 3009 else 3010 Tmp1 = ExpandExtractFromVectorThroughStack(SDValue(Node, 0)); 3011 Results.push_back(Tmp1); 3012 break; 3013 case ISD::EXTRACT_SUBVECTOR: 3014 Results.push_back(ExpandExtractFromVectorThroughStack(SDValue(Node, 0))); 3015 break; 3016 case ISD::INSERT_SUBVECTOR: 3017 Results.push_back(ExpandInsertToVectorThroughStack(SDValue(Node, 0))); 3018 break; 3019 case ISD::CONCAT_VECTORS: { 3020 Results.push_back(ExpandVectorBuildThroughStack(Node)); 3021 break; 3022 } 3023 case ISD::SCALAR_TO_VECTOR: 3024 Results.push_back(ExpandSCALAR_TO_VECTOR(Node)); 3025 break; 3026 case ISD::INSERT_VECTOR_ELT: 3027 Results.push_back(ExpandINSERT_VECTOR_ELT(Node->getOperand(0), 3028 Node->getOperand(1), 3029 Node->getOperand(2), dl)); 3030 break; 3031 case ISD::VECTOR_SHUFFLE: { 3032 SmallVector<int, 8> Mask; 3033 cast<ShuffleVectorSDNode>(Node)->getMask(Mask); 3034 3035 EVT VT = Node->getValueType(0); 3036 EVT EltVT = VT.getVectorElementType(); 3037 if (getTypeAction(EltVT) == Promote) 3038 EltVT = TLI.getTypeToTransformTo(*DAG.getContext(), EltVT); 3039 unsigned NumElems = VT.getVectorNumElements(); 3040 SmallVector<SDValue, 8> Ops; 3041 for (unsigned i = 0; i != NumElems; ++i) { 3042 if (Mask[i] < 0) { 3043 Ops.push_back(DAG.getUNDEF(EltVT)); 3044 continue; 3045 } 3046 unsigned Idx = Mask[i]; 3047 if (Idx < NumElems) 3048 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, 3049 Node->getOperand(0), 3050 DAG.getIntPtrConstant(Idx))); 3051 else 3052 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, 3053 Node->getOperand(1), 3054 DAG.getIntPtrConstant(Idx - NumElems))); 3055 } 3056 Tmp1 = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &Ops[0], Ops.size()); 3057 Results.push_back(Tmp1); 3058 break; 3059 } 3060 case ISD::EXTRACT_ELEMENT: { 3061 EVT OpTy = Node->getOperand(0).getValueType(); 3062 if (cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue()) { 3063 // 1 -> Hi 3064 Tmp1 = DAG.getNode(ISD::SRL, dl, OpTy, Node->getOperand(0), 3065 DAG.getConstant(OpTy.getSizeInBits()/2, 3066 TLI.getShiftAmountTy(Node->getOperand(0).getValueType()))); 3067 Tmp1 = DAG.getNode(ISD::TRUNCATE, dl, Node->getValueType(0), Tmp1); 3068 } else { 3069 // 0 -> Lo 3070 Tmp1 = DAG.getNode(ISD::TRUNCATE, dl, Node->getValueType(0), 3071 Node->getOperand(0)); 3072 } 3073 Results.push_back(Tmp1); 3074 break; 3075 } 3076 case ISD::STACKSAVE: 3077 // Expand to CopyFromReg if the target set 3078 // StackPointerRegisterToSaveRestore. 3079 if (unsigned SP = TLI.getStackPointerRegisterToSaveRestore()) { 3080 Results.push_back(DAG.getCopyFromReg(Node->getOperand(0), dl, SP, 3081 Node->getValueType(0))); 3082 Results.push_back(Results[0].getValue(1)); 3083 } else { 3084 Results.push_back(DAG.getUNDEF(Node->getValueType(0))); 3085 Results.push_back(Node->getOperand(0)); 3086 } 3087 break; 3088 case ISD::STACKRESTORE: 3089 // Expand to CopyToReg if the target set 3090 // StackPointerRegisterToSaveRestore. 3091 if (unsigned SP = TLI.getStackPointerRegisterToSaveRestore()) { 3092 Results.push_back(DAG.getCopyToReg(Node->getOperand(0), dl, SP, 3093 Node->getOperand(1))); 3094 } else { 3095 Results.push_back(Node->getOperand(0)); 3096 } 3097 break; 3098 case ISD::FCOPYSIGN: 3099 Results.push_back(ExpandFCOPYSIGN(Node)); 3100 break; 3101 case ISD::FNEG: 3102 // Expand Y = FNEG(X) -> Y = SUB -0.0, X 3103 Tmp1 = DAG.getConstantFP(-0.0, Node->getValueType(0)); 3104 Tmp1 = DAG.getNode(ISD::FSUB, dl, Node->getValueType(0), Tmp1, 3105 Node->getOperand(0)); 3106 Results.push_back(Tmp1); 3107 break; 3108 case ISD::FABS: { 3109 // Expand Y = FABS(X) -> Y = (X >u 0.0) ? X : fneg(X). 3110 EVT VT = Node->getValueType(0); 3111 Tmp1 = Node->getOperand(0); 3112 Tmp2 = DAG.getConstantFP(0.0, VT); 3113 Tmp2 = DAG.getSetCC(dl, TLI.getSetCCResultType(Tmp1.getValueType()), 3114 Tmp1, Tmp2, ISD::SETUGT); 3115 Tmp3 = DAG.getNode(ISD::FNEG, dl, VT, Tmp1); 3116 Tmp1 = DAG.getNode(ISD::SELECT, dl, VT, Tmp2, Tmp1, Tmp3); 3117 Results.push_back(Tmp1); 3118 break; 3119 } 3120 case ISD::FSQRT: 3121 Results.push_back(ExpandFPLibCall(Node, RTLIB::SQRT_F32, RTLIB::SQRT_F64, 3122 RTLIB::SQRT_F80, RTLIB::SQRT_PPCF128)); 3123 break; 3124 case ISD::FSIN: 3125 Results.push_back(ExpandFPLibCall(Node, RTLIB::SIN_F32, RTLIB::SIN_F64, 3126 RTLIB::SIN_F80, RTLIB::SIN_PPCF128)); 3127 break; 3128 case ISD::FCOS: 3129 Results.push_back(ExpandFPLibCall(Node, RTLIB::COS_F32, RTLIB::COS_F64, 3130 RTLIB::COS_F80, RTLIB::COS_PPCF128)); 3131 break; 3132 case ISD::FLOG: 3133 Results.push_back(ExpandFPLibCall(Node, RTLIB::LOG_F32, RTLIB::LOG_F64, 3134 RTLIB::LOG_F80, RTLIB::LOG_PPCF128)); 3135 break; 3136 case ISD::FLOG2: 3137 Results.push_back(ExpandFPLibCall(Node, RTLIB::LOG2_F32, RTLIB::LOG2_F64, 3138 RTLIB::LOG2_F80, RTLIB::LOG2_PPCF128)); 3139 break; 3140 case ISD::FLOG10: 3141 Results.push_back(ExpandFPLibCall(Node, RTLIB::LOG10_F32, RTLIB::LOG10_F64, 3142 RTLIB::LOG10_F80, RTLIB::LOG10_PPCF128)); 3143 break; 3144 case ISD::FEXP: 3145 Results.push_back(ExpandFPLibCall(Node, RTLIB::EXP_F32, RTLIB::EXP_F64, 3146 RTLIB::EXP_F80, RTLIB::EXP_PPCF128)); 3147 break; 3148 case ISD::FEXP2: 3149 Results.push_back(ExpandFPLibCall(Node, RTLIB::EXP2_F32, RTLIB::EXP2_F64, 3150 RTLIB::EXP2_F80, RTLIB::EXP2_PPCF128)); 3151 break; 3152 case ISD::FTRUNC: 3153 Results.push_back(ExpandFPLibCall(Node, RTLIB::TRUNC_F32, RTLIB::TRUNC_F64, 3154 RTLIB::TRUNC_F80, RTLIB::TRUNC_PPCF128)); 3155 break; 3156 case ISD::FFLOOR: 3157 Results.push_back(ExpandFPLibCall(Node, RTLIB::FLOOR_F32, RTLIB::FLOOR_F64, 3158 RTLIB::FLOOR_F80, RTLIB::FLOOR_PPCF128)); 3159 break; 3160 case ISD::FCEIL: 3161 Results.push_back(ExpandFPLibCall(Node, RTLIB::CEIL_F32, RTLIB::CEIL_F64, 3162 RTLIB::CEIL_F80, RTLIB::CEIL_PPCF128)); 3163 break; 3164 case ISD::FRINT: 3165 Results.push_back(ExpandFPLibCall(Node, RTLIB::RINT_F32, RTLIB::RINT_F64, 3166 RTLIB::RINT_F80, RTLIB::RINT_PPCF128)); 3167 break; 3168 case ISD::FNEARBYINT: 3169 Results.push_back(ExpandFPLibCall(Node, RTLIB::NEARBYINT_F32, 3170 RTLIB::NEARBYINT_F64, 3171 RTLIB::NEARBYINT_F80, 3172 RTLIB::NEARBYINT_PPCF128)); 3173 break; 3174 case ISD::FPOWI: 3175 Results.push_back(ExpandFPLibCall(Node, RTLIB::POWI_F32, RTLIB::POWI_F64, 3176 RTLIB::POWI_F80, RTLIB::POWI_PPCF128)); 3177 break; 3178 case ISD::FPOW: 3179 Results.push_back(ExpandFPLibCall(Node, RTLIB::POW_F32, RTLIB::POW_F64, 3180 RTLIB::POW_F80, RTLIB::POW_PPCF128)); 3181 break; 3182 case ISD::FDIV: 3183 Results.push_back(ExpandFPLibCall(Node, RTLIB::DIV_F32, RTLIB::DIV_F64, 3184 RTLIB::DIV_F80, RTLIB::DIV_PPCF128)); 3185 break; 3186 case ISD::FREM: 3187 Results.push_back(ExpandFPLibCall(Node, RTLIB::REM_F32, RTLIB::REM_F64, 3188 RTLIB::REM_F80, RTLIB::REM_PPCF128)); 3189 break; 3190 case ISD::FP16_TO_FP32: 3191 Results.push_back(ExpandLibCall(RTLIB::FPEXT_F16_F32, Node, false)); 3192 break; 3193 case ISD::FP32_TO_FP16: 3194 Results.push_back(ExpandLibCall(RTLIB::FPROUND_F32_F16, Node, false)); 3195 break; 3196 case ISD::ConstantFP: { 3197 ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Node); 3198 // Check to see if this FP immediate is already legal. 3199 // If this is a legal constant, turn it into a TargetConstantFP node. 3200 if (TLI.isFPImmLegal(CFP->getValueAPF(), Node->getValueType(0))) 3201 Results.push_back(SDValue(Node, 0)); 3202 else 3203 Results.push_back(ExpandConstantFP(CFP, true, DAG, TLI)); 3204 break; 3205 } 3206 case ISD::EHSELECTION: { 3207 unsigned Reg = TLI.getExceptionSelectorRegister(); 3208 assert(Reg && "Can't expand to unknown register!"); 3209 Results.push_back(DAG.getCopyFromReg(Node->getOperand(1), dl, Reg, 3210 Node->getValueType(0))); 3211 Results.push_back(Results[0].getValue(1)); 3212 break; 3213 } 3214 case ISD::EXCEPTIONADDR: { 3215 unsigned Reg = TLI.getExceptionAddressRegister(); 3216 assert(Reg && "Can't expand to unknown register!"); 3217 Results.push_back(DAG.getCopyFromReg(Node->getOperand(0), dl, Reg, 3218 Node->getValueType(0))); 3219 Results.push_back(Results[0].getValue(1)); 3220 break; 3221 } 3222 case ISD::SUB: { 3223 EVT VT = Node->getValueType(0); 3224 assert(TLI.isOperationLegalOrCustom(ISD::ADD, VT) && 3225 TLI.isOperationLegalOrCustom(ISD::XOR, VT) && 3226 "Don't know how to expand this subtraction!"); 3227 Tmp1 = DAG.getNode(ISD::XOR, dl, VT, Node->getOperand(1), 3228 DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), VT)); 3229 Tmp1 = DAG.getNode(ISD::ADD, dl, VT, Tmp2, DAG.getConstant(1, VT)); 3230 Results.push_back(DAG.getNode(ISD::ADD, dl, VT, Node->getOperand(0), Tmp1)); 3231 break; 3232 } 3233 case ISD::UREM: 3234 case ISD::SREM: { 3235 EVT VT = Node->getValueType(0); 3236 SDVTList VTs = DAG.getVTList(VT, VT); 3237 bool isSigned = Node->getOpcode() == ISD::SREM; 3238 unsigned DivOpc = isSigned ? ISD::SDIV : ISD::UDIV; 3239 unsigned DivRemOpc = isSigned ? ISD::SDIVREM : ISD::UDIVREM; 3240 Tmp2 = Node->getOperand(0); 3241 Tmp3 = Node->getOperand(1); 3242 if (TLI.isOperationLegalOrCustom(DivRemOpc, VT) || 3243 (isDivRemLibcallAvailable(Node, isSigned, TLI) && 3244 UseDivRem(Node, isSigned, false))) { 3245 Tmp1 = DAG.getNode(DivRemOpc, dl, VTs, Tmp2, Tmp3).getValue(1); 3246 } else if (TLI.isOperationLegalOrCustom(DivOpc, VT)) { 3247 // X % Y -> X-X/Y*Y 3248 Tmp1 = DAG.getNode(DivOpc, dl, VT, Tmp2, Tmp3); 3249 Tmp1 = DAG.getNode(ISD::MUL, dl, VT, Tmp1, Tmp3); 3250 Tmp1 = DAG.getNode(ISD::SUB, dl, VT, Tmp2, Tmp1); 3251 } else if (isSigned) 3252 Tmp1 = ExpandIntLibCall(Node, true, 3253 RTLIB::SREM_I8, 3254 RTLIB::SREM_I16, RTLIB::SREM_I32, 3255 RTLIB::SREM_I64, RTLIB::SREM_I128); 3256 else 3257 Tmp1 = ExpandIntLibCall(Node, false, 3258 RTLIB::UREM_I8, 3259 RTLIB::UREM_I16, RTLIB::UREM_I32, 3260 RTLIB::UREM_I64, RTLIB::UREM_I128); 3261 Results.push_back(Tmp1); 3262 break; 3263 } 3264 case ISD::UDIV: 3265 case ISD::SDIV: { 3266 bool isSigned = Node->getOpcode() == ISD::SDIV; 3267 unsigned DivRemOpc = isSigned ? ISD::SDIVREM : ISD::UDIVREM; 3268 EVT VT = Node->getValueType(0); 3269 SDVTList VTs = DAG.getVTList(VT, VT); 3270 if (TLI.isOperationLegalOrCustom(DivRemOpc, VT) || 3271 (isDivRemLibcallAvailable(Node, isSigned, TLI) && 3272 UseDivRem(Node, isSigned, true))) 3273 Tmp1 = DAG.getNode(DivRemOpc, dl, VTs, Node->getOperand(0), 3274 Node->getOperand(1)); 3275 else if (isSigned) 3276 Tmp1 = ExpandIntLibCall(Node, true, 3277 RTLIB::SDIV_I8, 3278 RTLIB::SDIV_I16, RTLIB::SDIV_I32, 3279 RTLIB::SDIV_I64, RTLIB::SDIV_I128); 3280 else 3281 Tmp1 = ExpandIntLibCall(Node, false, 3282 RTLIB::UDIV_I8, 3283 RTLIB::UDIV_I16, RTLIB::UDIV_I32, 3284 RTLIB::UDIV_I64, RTLIB::UDIV_I128); 3285 Results.push_back(Tmp1); 3286 break; 3287 } 3288 case ISD::MULHU: 3289 case ISD::MULHS: { 3290 unsigned ExpandOpcode = Node->getOpcode() == ISD::MULHU ? ISD::UMUL_LOHI : 3291 ISD::SMUL_LOHI; 3292 EVT VT = Node->getValueType(0); 3293 SDVTList VTs = DAG.getVTList(VT, VT); 3294 assert(TLI.isOperationLegalOrCustom(ExpandOpcode, VT) && 3295 "If this wasn't legal, it shouldn't have been created!"); 3296 Tmp1 = DAG.getNode(ExpandOpcode, dl, VTs, Node->getOperand(0), 3297 Node->getOperand(1)); 3298 Results.push_back(Tmp1.getValue(1)); 3299 break; 3300 } 3301 case ISD::SDIVREM: 3302 case ISD::UDIVREM: 3303 // Expand into divrem libcall 3304 ExpandDivRemLibCall(Node, Results); 3305 break; 3306 case ISD::MUL: { 3307 EVT VT = Node->getValueType(0); 3308 SDVTList VTs = DAG.getVTList(VT, VT); 3309 // See if multiply or divide can be lowered using two-result operations. 3310 // We just need the low half of the multiply; try both the signed 3311 // and unsigned forms. If the target supports both SMUL_LOHI and 3312 // UMUL_LOHI, form a preference by checking which forms of plain 3313 // MULH it supports. 3314 bool HasSMUL_LOHI = TLI.isOperationLegalOrCustom(ISD::SMUL_LOHI, VT); 3315 bool HasUMUL_LOHI = TLI.isOperationLegalOrCustom(ISD::UMUL_LOHI, VT); 3316 bool HasMULHS = TLI.isOperationLegalOrCustom(ISD::MULHS, VT); 3317 bool HasMULHU = TLI.isOperationLegalOrCustom(ISD::MULHU, VT); 3318 unsigned OpToUse = 0; 3319 if (HasSMUL_LOHI && !HasMULHS) { 3320 OpToUse = ISD::SMUL_LOHI; 3321 } else if (HasUMUL_LOHI && !HasMULHU) { 3322 OpToUse = ISD::UMUL_LOHI; 3323 } else if (HasSMUL_LOHI) { 3324 OpToUse = ISD::SMUL_LOHI; 3325 } else if (HasUMUL_LOHI) { 3326 OpToUse = ISD::UMUL_LOHI; 3327 } 3328 if (OpToUse) { 3329 Results.push_back(DAG.getNode(OpToUse, dl, VTs, Node->getOperand(0), 3330 Node->getOperand(1))); 3331 break; 3332 } 3333 Tmp1 = ExpandIntLibCall(Node, false, 3334 RTLIB::MUL_I8, 3335 RTLIB::MUL_I16, RTLIB::MUL_I32, 3336 RTLIB::MUL_I64, RTLIB::MUL_I128); 3337 Results.push_back(Tmp1); 3338 break; 3339 } 3340 case ISD::SADDO: 3341 case ISD::SSUBO: { 3342 SDValue LHS = Node->getOperand(0); 3343 SDValue RHS = Node->getOperand(1); 3344 SDValue Sum = DAG.getNode(Node->getOpcode() == ISD::SADDO ? 3345 ISD::ADD : ISD::SUB, dl, LHS.getValueType(), 3346 LHS, RHS); 3347 Results.push_back(Sum); 3348 EVT OType = Node->getValueType(1); 3349 3350 SDValue Zero = DAG.getConstant(0, LHS.getValueType()); 3351 3352 // LHSSign -> LHS >= 0 3353 // RHSSign -> RHS >= 0 3354 // SumSign -> Sum >= 0 3355 // 3356 // Add: 3357 // Overflow -> (LHSSign == RHSSign) && (LHSSign != SumSign) 3358 // Sub: 3359 // Overflow -> (LHSSign != RHSSign) && (LHSSign != SumSign) 3360 // 3361 SDValue LHSSign = DAG.getSetCC(dl, OType, LHS, Zero, ISD::SETGE); 3362 SDValue RHSSign = DAG.getSetCC(dl, OType, RHS, Zero, ISD::SETGE); 3363 SDValue SignsMatch = DAG.getSetCC(dl, OType, LHSSign, RHSSign, 3364 Node->getOpcode() == ISD::SADDO ? 3365 ISD::SETEQ : ISD::SETNE); 3366 3367 SDValue SumSign = DAG.getSetCC(dl, OType, Sum, Zero, ISD::SETGE); 3368 SDValue SumSignNE = DAG.getSetCC(dl, OType, LHSSign, SumSign, ISD::SETNE); 3369 3370 SDValue Cmp = DAG.getNode(ISD::AND, dl, OType, SignsMatch, SumSignNE); 3371 Results.push_back(Cmp); 3372 break; 3373 } 3374 case ISD::UADDO: 3375 case ISD::USUBO: { 3376 SDValue LHS = Node->getOperand(0); 3377 SDValue RHS = Node->getOperand(1); 3378 SDValue Sum = DAG.getNode(Node->getOpcode() == ISD::UADDO ? 3379 ISD::ADD : ISD::SUB, dl, LHS.getValueType(), 3380 LHS, RHS); 3381 Results.push_back(Sum); 3382 Results.push_back(DAG.getSetCC(dl, Node->getValueType(1), Sum, LHS, 3383 Node->getOpcode () == ISD::UADDO ? 3384 ISD::SETULT : ISD::SETUGT)); 3385 break; 3386 } 3387 case ISD::UMULO: 3388 case ISD::SMULO: { 3389 EVT VT = Node->getValueType(0); 3390 EVT WideVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits() * 2); 3391 SDValue LHS = Node->getOperand(0); 3392 SDValue RHS = Node->getOperand(1); 3393 SDValue BottomHalf; 3394 SDValue TopHalf; 3395 static const unsigned Ops[2][3] = 3396 { { ISD::MULHU, ISD::UMUL_LOHI, ISD::ZERO_EXTEND }, 3397 { ISD::MULHS, ISD::SMUL_LOHI, ISD::SIGN_EXTEND }}; 3398 bool isSigned = Node->getOpcode() == ISD::SMULO; 3399 if (TLI.isOperationLegalOrCustom(Ops[isSigned][0], VT)) { 3400 BottomHalf = DAG.getNode(ISD::MUL, dl, VT, LHS, RHS); 3401 TopHalf = DAG.getNode(Ops[isSigned][0], dl, VT, LHS, RHS); 3402 } else if (TLI.isOperationLegalOrCustom(Ops[isSigned][1], VT)) { 3403 BottomHalf = DAG.getNode(Ops[isSigned][1], dl, DAG.getVTList(VT, VT), LHS, 3404 RHS); 3405 TopHalf = BottomHalf.getValue(1); 3406 } else if (TLI.isTypeLegal(EVT::getIntegerVT(*DAG.getContext(), 3407 VT.getSizeInBits() * 2))) { 3408 LHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, LHS); 3409 RHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, RHS); 3410 Tmp1 = DAG.getNode(ISD::MUL, dl, WideVT, LHS, RHS); 3411 BottomHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, Tmp1, 3412 DAG.getIntPtrConstant(0)); 3413 TopHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, Tmp1, 3414 DAG.getIntPtrConstant(1)); 3415 } else { 3416 // We can fall back to a libcall with an illegal type for the MUL if we 3417 // have a libcall big enough. 3418 // Also, we can fall back to a division in some cases, but that's a big 3419 // performance hit in the general case. 3420 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 3421 if (WideVT == MVT::i16) 3422 LC = RTLIB::MUL_I16; 3423 else if (WideVT == MVT::i32) 3424 LC = RTLIB::MUL_I32; 3425 else if (WideVT == MVT::i64) 3426 LC = RTLIB::MUL_I64; 3427 else if (WideVT == MVT::i128) 3428 LC = RTLIB::MUL_I128; 3429 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Cannot expand this operation!"); 3430 3431 // The high part is obtained by SRA'ing all but one of the bits of low 3432 // part. 3433 unsigned LoSize = VT.getSizeInBits(); 3434 SDValue HiLHS = DAG.getNode(ISD::SRA, dl, VT, RHS, 3435 DAG.getConstant(LoSize-1, TLI.getPointerTy())); 3436 SDValue HiRHS = DAG.getNode(ISD::SRA, dl, VT, LHS, 3437 DAG.getConstant(LoSize-1, TLI.getPointerTy())); 3438 3439 // Here we're passing the 2 arguments explicitly as 4 arguments that are 3440 // pre-lowered to the correct types. This all depends upon WideVT not 3441 // being a legal type for the architecture and thus has to be split to 3442 // two arguments. 3443 SDValue Args[] = { LHS, HiLHS, RHS, HiRHS }; 3444 SDValue Ret = ExpandLibCall(LC, WideVT, Args, 4, isSigned, dl); 3445 BottomHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, Ret, 3446 DAG.getIntPtrConstant(0)); 3447 TopHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, Ret, 3448 DAG.getIntPtrConstant(1)); 3449 } 3450 3451 if (isSigned) { 3452 Tmp1 = DAG.getConstant(VT.getSizeInBits() - 1, 3453 TLI.getShiftAmountTy(BottomHalf.getValueType())); 3454 Tmp1 = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, Tmp1); 3455 TopHalf = DAG.getSetCC(dl, TLI.getSetCCResultType(VT), TopHalf, Tmp1, 3456 ISD::SETNE); 3457 } else { 3458 TopHalf = DAG.getSetCC(dl, TLI.getSetCCResultType(VT), TopHalf, 3459 DAG.getConstant(0, VT), ISD::SETNE); 3460 } 3461 Results.push_back(BottomHalf); 3462 Results.push_back(TopHalf); 3463 break; 3464 } 3465 case ISD::BUILD_PAIR: { 3466 EVT PairTy = Node->getValueType(0); 3467 Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, dl, PairTy, Node->getOperand(0)); 3468 Tmp2 = DAG.getNode(ISD::ANY_EXTEND, dl, PairTy, Node->getOperand(1)); 3469 Tmp2 = DAG.getNode(ISD::SHL, dl, PairTy, Tmp2, 3470 DAG.getConstant(PairTy.getSizeInBits()/2, 3471 TLI.getShiftAmountTy(PairTy))); 3472 Results.push_back(DAG.getNode(ISD::OR, dl, PairTy, Tmp1, Tmp2)); 3473 break; 3474 } 3475 case ISD::SELECT: 3476 Tmp1 = Node->getOperand(0); 3477 Tmp2 = Node->getOperand(1); 3478 Tmp3 = Node->getOperand(2); 3479 if (Tmp1.getOpcode() == ISD::SETCC) { 3480 Tmp1 = DAG.getSelectCC(dl, Tmp1.getOperand(0), Tmp1.getOperand(1), 3481 Tmp2, Tmp3, 3482 cast<CondCodeSDNode>(Tmp1.getOperand(2))->get()); 3483 } else { 3484 Tmp1 = DAG.getSelectCC(dl, Tmp1, 3485 DAG.getConstant(0, Tmp1.getValueType()), 3486 Tmp2, Tmp3, ISD::SETNE); 3487 } 3488 Results.push_back(Tmp1); 3489 break; 3490 case ISD::BR_JT: { 3491 SDValue Chain = Node->getOperand(0); 3492 SDValue Table = Node->getOperand(1); 3493 SDValue Index = Node->getOperand(2); 3494 3495 EVT PTy = TLI.getPointerTy(); 3496 3497 const TargetData &TD = *TLI.getTargetData(); 3498 unsigned EntrySize = 3499 DAG.getMachineFunction().getJumpTableInfo()->getEntrySize(TD); 3500 3501 Index = DAG.getNode(ISD::MUL, dl, PTy, 3502 Index, DAG.getConstant(EntrySize, PTy)); 3503 SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Index, Table); 3504 3505 EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), EntrySize * 8); 3506 SDValue LD = DAG.getExtLoad(ISD::SEXTLOAD, dl, PTy, Chain, Addr, 3507 MachinePointerInfo::getJumpTable(), MemVT, 3508 false, false, 0); 3509 Addr = LD; 3510 if (TM.getRelocationModel() == Reloc::PIC_) { 3511 // For PIC, the sequence is: 3512 // BRIND(load(Jumptable + index) + RelocBase) 3513 // RelocBase can be JumpTable, GOT or some sort of global base. 3514 Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr, 3515 TLI.getPICJumpTableRelocBase(Table, DAG)); 3516 } 3517 Tmp1 = DAG.getNode(ISD::BRIND, dl, MVT::Other, LD.getValue(1), Addr); 3518 Results.push_back(Tmp1); 3519 break; 3520 } 3521 case ISD::BRCOND: 3522 // Expand brcond's setcc into its constituent parts and create a BR_CC 3523 // Node. 3524 Tmp1 = Node->getOperand(0); 3525 Tmp2 = Node->getOperand(1); 3526 if (Tmp2.getOpcode() == ISD::SETCC) { 3527 Tmp1 = DAG.getNode(ISD::BR_CC, dl, MVT::Other, 3528 Tmp1, Tmp2.getOperand(2), 3529 Tmp2.getOperand(0), Tmp2.getOperand(1), 3530 Node->getOperand(2)); 3531 } else { 3532 // We test only the i1 bit. Skip the AND if UNDEF. 3533 Tmp3 = (Tmp2.getOpcode() == ISD::UNDEF) ? Tmp2 : 3534 DAG.getNode(ISD::AND, dl, Tmp2.getValueType(), Tmp2, 3535 DAG.getConstant(1, Tmp2.getValueType())); 3536 Tmp1 = DAG.getNode(ISD::BR_CC, dl, MVT::Other, Tmp1, 3537 DAG.getCondCode(ISD::SETNE), Tmp3, 3538 DAG.getConstant(0, Tmp3.getValueType()), 3539 Node->getOperand(2)); 3540 } 3541 Results.push_back(Tmp1); 3542 break; 3543 case ISD::SETCC: { 3544 Tmp1 = Node->getOperand(0); 3545 Tmp2 = Node->getOperand(1); 3546 Tmp3 = Node->getOperand(2); 3547 LegalizeSetCCCondCode(Node->getValueType(0), Tmp1, Tmp2, Tmp3, dl); 3548 3549 // If we expanded the SETCC into an AND/OR, return the new node 3550 if (Tmp2.getNode() == 0) { 3551 Results.push_back(Tmp1); 3552 break; 3553 } 3554 3555 // Otherwise, SETCC for the given comparison type must be completely 3556 // illegal; expand it into a SELECT_CC. 3557 EVT VT = Node->getValueType(0); 3558 Tmp1 = DAG.getNode(ISD::SELECT_CC, dl, VT, Tmp1, Tmp2, 3559 DAG.getConstant(1, VT), DAG.getConstant(0, VT), Tmp3); 3560 Results.push_back(Tmp1); 3561 break; 3562 } 3563 case ISD::SELECT_CC: { 3564 Tmp1 = Node->getOperand(0); // LHS 3565 Tmp2 = Node->getOperand(1); // RHS 3566 Tmp3 = Node->getOperand(2); // True 3567 Tmp4 = Node->getOperand(3); // False 3568 SDValue CC = Node->getOperand(4); 3569 3570 LegalizeSetCCCondCode(TLI.getSetCCResultType(Tmp1.getValueType()), 3571 Tmp1, Tmp2, CC, dl); 3572 3573 assert(!Tmp2.getNode() && "Can't legalize SELECT_CC with legal condition!"); 3574 Tmp2 = DAG.getConstant(0, Tmp1.getValueType()); 3575 CC = DAG.getCondCode(ISD::SETNE); 3576 Tmp1 = DAG.getNode(ISD::SELECT_CC, dl, Node->getValueType(0), Tmp1, Tmp2, 3577 Tmp3, Tmp4, CC); 3578 Results.push_back(Tmp1); 3579 break; 3580 } 3581 case ISD::BR_CC: { 3582 Tmp1 = Node->getOperand(0); // Chain 3583 Tmp2 = Node->getOperand(2); // LHS 3584 Tmp3 = Node->getOperand(3); // RHS 3585 Tmp4 = Node->getOperand(1); // CC 3586 3587 LegalizeSetCCCondCode(TLI.getSetCCResultType(Tmp2.getValueType()), 3588 Tmp2, Tmp3, Tmp4, dl); 3589 assert(LastCALLSEQ.size() == 1 && "branch inside CALLSEQ_BEGIN/END?"); 3590 setLastCALLSEQ(DAG.getEntryNode()); 3591 3592 assert(!Tmp3.getNode() && "Can't legalize BR_CC with legal condition!"); 3593 Tmp3 = DAG.getConstant(0, Tmp2.getValueType()); 3594 Tmp4 = DAG.getCondCode(ISD::SETNE); 3595 Tmp1 = DAG.getNode(ISD::BR_CC, dl, Node->getValueType(0), Tmp1, Tmp4, Tmp2, 3596 Tmp3, Node->getOperand(4)); 3597 Results.push_back(Tmp1); 3598 break; 3599 } 3600 case ISD::GLOBAL_OFFSET_TABLE: 3601 case ISD::GlobalAddress: 3602 case ISD::GlobalTLSAddress: 3603 case ISD::ExternalSymbol: 3604 case ISD::ConstantPool: 3605 case ISD::JumpTable: 3606 case ISD::INTRINSIC_W_CHAIN: 3607 case ISD::INTRINSIC_WO_CHAIN: 3608 case ISD::INTRINSIC_VOID: 3609 // FIXME: Custom lowering for these operations shouldn't return null! 3610 for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i) 3611 Results.push_back(SDValue(Node, i)); 3612 break; 3613 } 3614} 3615void SelectionDAGLegalize::PromoteNode(SDNode *Node, 3616 SmallVectorImpl<SDValue> &Results) { 3617 EVT OVT = Node->getValueType(0); 3618 if (Node->getOpcode() == ISD::UINT_TO_FP || 3619 Node->getOpcode() == ISD::SINT_TO_FP || 3620 Node->getOpcode() == ISD::SETCC) { 3621 OVT = Node->getOperand(0).getValueType(); 3622 } 3623 EVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), OVT); 3624 DebugLoc dl = Node->getDebugLoc(); 3625 SDValue Tmp1, Tmp2, Tmp3; 3626 switch (Node->getOpcode()) { 3627 case ISD::CTTZ: 3628 case ISD::CTLZ: 3629 case ISD::CTPOP: 3630 // Zero extend the argument. 3631 Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, Node->getOperand(0)); 3632 // Perform the larger operation. 3633 Tmp1 = DAG.getNode(Node->getOpcode(), dl, NVT, Tmp1); 3634 if (Node->getOpcode() == ISD::CTTZ) { 3635 //if Tmp1 == sizeinbits(NVT) then Tmp1 = sizeinbits(Old VT) 3636 Tmp2 = DAG.getSetCC(dl, TLI.getSetCCResultType(NVT), 3637 Tmp1, DAG.getConstant(NVT.getSizeInBits(), NVT), 3638 ISD::SETEQ); 3639 Tmp1 = DAG.getNode(ISD::SELECT, dl, NVT, Tmp2, 3640 DAG.getConstant(OVT.getSizeInBits(), NVT), Tmp1); 3641 } else if (Node->getOpcode() == ISD::CTLZ) { 3642 // Tmp1 = Tmp1 - (sizeinbits(NVT) - sizeinbits(Old VT)) 3643 Tmp1 = DAG.getNode(ISD::SUB, dl, NVT, Tmp1, 3644 DAG.getConstant(NVT.getSizeInBits() - 3645 OVT.getSizeInBits(), NVT)); 3646 } 3647 Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, OVT, Tmp1)); 3648 break; 3649 case ISD::BSWAP: { 3650 unsigned DiffBits = NVT.getSizeInBits() - OVT.getSizeInBits(); 3651 Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, Node->getOperand(0)); 3652 Tmp1 = DAG.getNode(ISD::BSWAP, dl, NVT, Tmp1); 3653 Tmp1 = DAG.getNode(ISD::SRL, dl, NVT, Tmp1, 3654 DAG.getConstant(DiffBits, TLI.getShiftAmountTy(NVT))); 3655 Results.push_back(Tmp1); 3656 break; 3657 } 3658 case ISD::FP_TO_UINT: 3659 case ISD::FP_TO_SINT: 3660 Tmp1 = PromoteLegalFP_TO_INT(Node->getOperand(0), Node->getValueType(0), 3661 Node->getOpcode() == ISD::FP_TO_SINT, dl); 3662 Results.push_back(Tmp1); 3663 break; 3664 case ISD::UINT_TO_FP: 3665 case ISD::SINT_TO_FP: 3666 Tmp1 = PromoteLegalINT_TO_FP(Node->getOperand(0), Node->getValueType(0), 3667 Node->getOpcode() == ISD::SINT_TO_FP, dl); 3668 Results.push_back(Tmp1); 3669 break; 3670 case ISD::AND: 3671 case ISD::OR: 3672 case ISD::XOR: { 3673 unsigned ExtOp, TruncOp; 3674 if (OVT.isVector()) { 3675 ExtOp = ISD::BITCAST; 3676 TruncOp = ISD::BITCAST; 3677 } else { 3678 assert(OVT.isInteger() && "Cannot promote logic operation"); 3679 ExtOp = ISD::ANY_EXTEND; 3680 TruncOp = ISD::TRUNCATE; 3681 } 3682 // Promote each of the values to the new type. 3683 Tmp1 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(0)); 3684 Tmp2 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(1)); 3685 // Perform the larger operation, then convert back 3686 Tmp1 = DAG.getNode(Node->getOpcode(), dl, NVT, Tmp1, Tmp2); 3687 Results.push_back(DAG.getNode(TruncOp, dl, OVT, Tmp1)); 3688 break; 3689 } 3690 case ISD::SELECT: { 3691 unsigned ExtOp, TruncOp; 3692 if (Node->getValueType(0).isVector()) { 3693 ExtOp = ISD::BITCAST; 3694 TruncOp = ISD::BITCAST; 3695 } else if (Node->getValueType(0).isInteger()) { 3696 ExtOp = ISD::ANY_EXTEND; 3697 TruncOp = ISD::TRUNCATE; 3698 } else { 3699 ExtOp = ISD::FP_EXTEND; 3700 TruncOp = ISD::FP_ROUND; 3701 } 3702 Tmp1 = Node->getOperand(0); 3703 // Promote each of the values to the new type. 3704 Tmp2 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(1)); 3705 Tmp3 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(2)); 3706 // Perform the larger operation, then round down. 3707 Tmp1 = DAG.getNode(ISD::SELECT, dl, NVT, Tmp1, Tmp2, Tmp3); 3708 if (TruncOp != ISD::FP_ROUND) 3709 Tmp1 = DAG.getNode(TruncOp, dl, Node->getValueType(0), Tmp1); 3710 else 3711 Tmp1 = DAG.getNode(TruncOp, dl, Node->getValueType(0), Tmp1, 3712 DAG.getIntPtrConstant(0)); 3713 Results.push_back(Tmp1); 3714 break; 3715 } 3716 case ISD::VECTOR_SHUFFLE: { 3717 SmallVector<int, 8> Mask; 3718 cast<ShuffleVectorSDNode>(Node)->getMask(Mask); 3719 3720 // Cast the two input vectors. 3721 Tmp1 = DAG.getNode(ISD::BITCAST, dl, NVT, Node->getOperand(0)); 3722 Tmp2 = DAG.getNode(ISD::BITCAST, dl, NVT, Node->getOperand(1)); 3723 3724 // Convert the shuffle mask to the right # elements. 3725 Tmp1 = ShuffleWithNarrowerEltType(NVT, OVT, dl, Tmp1, Tmp2, Mask); 3726 Tmp1 = DAG.getNode(ISD::BITCAST, dl, OVT, Tmp1); 3727 Results.push_back(Tmp1); 3728 break; 3729 } 3730 case ISD::SETCC: { 3731 unsigned ExtOp = ISD::FP_EXTEND; 3732 if (NVT.isInteger()) { 3733 ISD::CondCode CCCode = 3734 cast<CondCodeSDNode>(Node->getOperand(2))->get(); 3735 ExtOp = isSignedIntSetCC(CCCode) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 3736 } 3737 Tmp1 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(0)); 3738 Tmp2 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(1)); 3739 Results.push_back(DAG.getNode(ISD::SETCC, dl, Node->getValueType(0), 3740 Tmp1, Tmp2, Node->getOperand(2))); 3741 break; 3742 } 3743 } 3744} 3745 3746// SelectionDAG::Legalize - This is the entry point for the file. 3747// 3748void SelectionDAG::Legalize(CodeGenOpt::Level OptLevel) { 3749 /// run - This is the main entry point to this class. 3750 /// 3751 SelectionDAGLegalize(*this, OptLevel).LegalizeDAG(); 3752} 3753