LegalizeDAG.cpp revision 6726b6d75a8b679068a58cb954ba97cf9d1690ba
1//===-- LegalizeDAG.cpp - Implement SelectionDAG::Legalize ----------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the SelectionDAG::Legalize method. 11// 12//===----------------------------------------------------------------------===// 13 14#include "llvm/CodeGen/SelectionDAG.h" 15#include "llvm/CodeGen/MachineFunction.h" 16#include "llvm/CodeGen/MachineFrameInfo.h" 17#include "llvm/CodeGen/MachineJumpTableInfo.h" 18#include "llvm/CodeGen/MachineModuleInfo.h" 19#include "llvm/CodeGen/DwarfWriter.h" 20#include "llvm/Analysis/DebugInfo.h" 21#include "llvm/CodeGen/PseudoSourceValue.h" 22#include "llvm/Target/TargetFrameInfo.h" 23#include "llvm/Target/TargetLowering.h" 24#include "llvm/Target/TargetData.h" 25#include "llvm/Target/TargetMachine.h" 26#include "llvm/Target/TargetOptions.h" 27#include "llvm/Target/TargetSubtarget.h" 28#include "llvm/CallingConv.h" 29#include "llvm/Constants.h" 30#include "llvm/DerivedTypes.h" 31#include "llvm/Function.h" 32#include "llvm/GlobalVariable.h" 33#include "llvm/LLVMContext.h" 34#include "llvm/Support/CommandLine.h" 35#include "llvm/Support/Compiler.h" 36#include "llvm/Support/ErrorHandling.h" 37#include "llvm/Support/MathExtras.h" 38#include "llvm/Support/raw_ostream.h" 39#include "llvm/ADT/DenseMap.h" 40#include "llvm/ADT/SmallVector.h" 41#include "llvm/ADT/SmallPtrSet.h" 42#include <map> 43using namespace llvm; 44 45//===----------------------------------------------------------------------===// 46/// SelectionDAGLegalize - This takes an arbitrary SelectionDAG as input and 47/// hacks on it until the target machine can handle it. This involves 48/// eliminating value sizes the machine cannot handle (promoting small sizes to 49/// large sizes or splitting up large values into small values) as well as 50/// eliminating operations the machine cannot handle. 51/// 52/// This code also does a small amount of optimization and recognition of idioms 53/// as part of its processing. For example, if a target does not support a 54/// 'setcc' instruction efficiently, but does support 'brcc' instruction, this 55/// will attempt merge setcc and brc instructions into brcc's. 56/// 57namespace { 58class SelectionDAGLegalize { 59 TargetLowering &TLI; 60 SelectionDAG &DAG; 61 CodeGenOpt::Level OptLevel; 62 63 // Libcall insertion helpers. 64 65 /// LastCALLSEQ_END - This keeps track of the CALLSEQ_END node that has been 66 /// legalized. We use this to ensure that calls are properly serialized 67 /// against each other, including inserted libcalls. 68 SDValue LastCALLSEQ_END; 69 70 /// IsLegalizingCall - This member is used *only* for purposes of providing 71 /// helpful assertions that a libcall isn't created while another call is 72 /// being legalized (which could lead to non-serialized call sequences). 73 bool IsLegalizingCall; 74 75 enum LegalizeAction { 76 Legal, // The target natively supports this operation. 77 Promote, // This operation should be executed in a larger type. 78 Expand // Try to expand this to other ops, otherwise use a libcall. 79 }; 80 81 /// ValueTypeActions - This is a bitvector that contains two bits for each 82 /// value type, where the two bits correspond to the LegalizeAction enum. 83 /// This can be queried with "getTypeAction(VT)". 84 TargetLowering::ValueTypeActionImpl ValueTypeActions; 85 86 /// LegalizedNodes - For nodes that are of legal width, and that have more 87 /// than one use, this map indicates what regularized operand to use. This 88 /// allows us to avoid legalizing the same thing more than once. 89 DenseMap<SDValue, SDValue> LegalizedNodes; 90 91 void AddLegalizedOperand(SDValue From, SDValue To) { 92 LegalizedNodes.insert(std::make_pair(From, To)); 93 // If someone requests legalization of the new node, return itself. 94 if (From != To) 95 LegalizedNodes.insert(std::make_pair(To, To)); 96 } 97 98public: 99 SelectionDAGLegalize(SelectionDAG &DAG, CodeGenOpt::Level ol); 100 101 /// getTypeAction - Return how we should legalize values of this type, either 102 /// it is already legal or we need to expand it into multiple registers of 103 /// smaller integer type, or we need to promote it to a larger type. 104 LegalizeAction getTypeAction(EVT VT) const { 105 return 106 (LegalizeAction)ValueTypeActions.getTypeAction(*DAG.getContext(), VT); 107 } 108 109 /// isTypeLegal - Return true if this type is legal on this target. 110 /// 111 bool isTypeLegal(EVT VT) const { 112 return getTypeAction(VT) == Legal; 113 } 114 115 void LegalizeDAG(); 116 117private: 118 /// LegalizeOp - We know that the specified value has a legal type. 119 /// Recursively ensure that the operands have legal types, then return the 120 /// result. 121 SDValue LegalizeOp(SDValue O); 122 123 SDValue OptimizeFloatStore(StoreSDNode *ST); 124 125 /// PerformInsertVectorEltInMemory - Some target cannot handle a variable 126 /// insertion index for the INSERT_VECTOR_ELT instruction. In this case, it 127 /// is necessary to spill the vector being inserted into to memory, perform 128 /// the insert there, and then read the result back. 129 SDValue PerformInsertVectorEltInMemory(SDValue Vec, SDValue Val, 130 SDValue Idx, DebugLoc dl); 131 SDValue ExpandINSERT_VECTOR_ELT(SDValue Vec, SDValue Val, 132 SDValue Idx, DebugLoc dl); 133 134 /// ShuffleWithNarrowerEltType - Return a vector shuffle operation which 135 /// performs the same shuffe in terms of order or result bytes, but on a type 136 /// whose vector element type is narrower than the original shuffle type. 137 /// e.g. <v4i32> <0, 1, 0, 1> -> v8i16 <0, 1, 2, 3, 0, 1, 2, 3> 138 SDValue ShuffleWithNarrowerEltType(EVT NVT, EVT VT, DebugLoc dl, 139 SDValue N1, SDValue N2, 140 SmallVectorImpl<int> &Mask) const; 141 142 bool LegalizeAllNodesNotLeadingTo(SDNode *N, SDNode *Dest, 143 SmallPtrSet<SDNode*, 32> &NodesLeadingTo); 144 145 void LegalizeSetCCCondCode(EVT VT, SDValue &LHS, SDValue &RHS, SDValue &CC, 146 DebugLoc dl); 147 148 SDValue ExpandLibCall(RTLIB::Libcall LC, SDNode *Node, bool isSigned); 149 SDValue ExpandFPLibCall(SDNode *Node, RTLIB::Libcall Call_F32, 150 RTLIB::Libcall Call_F64, RTLIB::Libcall Call_F80, 151 RTLIB::Libcall Call_PPCF128); 152 SDValue ExpandIntLibCall(SDNode *Node, bool isSigned, RTLIB::Libcall Call_I16, 153 RTLIB::Libcall Call_I32, RTLIB::Libcall Call_I64, 154 RTLIB::Libcall Call_I128); 155 156 SDValue EmitStackConvert(SDValue SrcOp, EVT SlotVT, EVT DestVT, DebugLoc dl); 157 SDValue ExpandBUILD_VECTOR(SDNode *Node); 158 SDValue ExpandSCALAR_TO_VECTOR(SDNode *Node); 159 SDValue ExpandDBG_STOPPOINT(SDNode *Node); 160 void ExpandDYNAMIC_STACKALLOC(SDNode *Node, 161 SmallVectorImpl<SDValue> &Results); 162 SDValue ExpandFCOPYSIGN(SDNode *Node); 163 SDValue ExpandLegalINT_TO_FP(bool isSigned, SDValue LegalOp, EVT DestVT, 164 DebugLoc dl); 165 SDValue PromoteLegalINT_TO_FP(SDValue LegalOp, EVT DestVT, bool isSigned, 166 DebugLoc dl); 167 SDValue PromoteLegalFP_TO_INT(SDValue LegalOp, EVT DestVT, bool isSigned, 168 DebugLoc dl); 169 170 SDValue ExpandBSWAP(SDValue Op, DebugLoc dl); 171 SDValue ExpandBitCount(unsigned Opc, SDValue Op, DebugLoc dl); 172 173 SDValue ExpandExtractFromVectorThroughStack(SDValue Op); 174 SDValue ExpandVectorBuildThroughStack(SDNode* Node); 175 176 void ExpandNode(SDNode *Node, SmallVectorImpl<SDValue> &Results); 177 void PromoteNode(SDNode *Node, SmallVectorImpl<SDValue> &Results); 178}; 179} 180 181/// ShuffleWithNarrowerEltType - Return a vector shuffle operation which 182/// performs the same shuffe in terms of order or result bytes, but on a type 183/// whose vector element type is narrower than the original shuffle type. 184/// e.g. <v4i32> <0, 1, 0, 1> -> v8i16 <0, 1, 2, 3, 0, 1, 2, 3> 185SDValue 186SelectionDAGLegalize::ShuffleWithNarrowerEltType(EVT NVT, EVT VT, DebugLoc dl, 187 SDValue N1, SDValue N2, 188 SmallVectorImpl<int> &Mask) const { 189 EVT EltVT = NVT.getVectorElementType(); 190 unsigned NumMaskElts = VT.getVectorNumElements(); 191 unsigned NumDestElts = NVT.getVectorNumElements(); 192 unsigned NumEltsGrowth = NumDestElts / NumMaskElts; 193 194 assert(NumEltsGrowth && "Cannot promote to vector type with fewer elts!"); 195 196 if (NumEltsGrowth == 1) 197 return DAG.getVectorShuffle(NVT, dl, N1, N2, &Mask[0]); 198 199 SmallVector<int, 8> NewMask; 200 for (unsigned i = 0; i != NumMaskElts; ++i) { 201 int Idx = Mask[i]; 202 for (unsigned j = 0; j != NumEltsGrowth; ++j) { 203 if (Idx < 0) 204 NewMask.push_back(-1); 205 else 206 NewMask.push_back(Idx * NumEltsGrowth + j); 207 } 208 } 209 assert(NewMask.size() == NumDestElts && "Non-integer NumEltsGrowth?"); 210 assert(TLI.isShuffleMaskLegal(NewMask, NVT) && "Shuffle not legal?"); 211 return DAG.getVectorShuffle(NVT, dl, N1, N2, &NewMask[0]); 212} 213 214SelectionDAGLegalize::SelectionDAGLegalize(SelectionDAG &dag, 215 CodeGenOpt::Level ol) 216 : TLI(dag.getTargetLoweringInfo()), DAG(dag), OptLevel(ol), 217 ValueTypeActions(TLI.getValueTypeActions()) { 218 assert(MVT::LAST_VALUETYPE <= MVT::MAX_ALLOWED_VALUETYPE && 219 "Too many value types for ValueTypeActions to hold!"); 220} 221 222void SelectionDAGLegalize::LegalizeDAG() { 223 LastCALLSEQ_END = DAG.getEntryNode(); 224 IsLegalizingCall = false; 225 226 // The legalize process is inherently a bottom-up recursive process (users 227 // legalize their uses before themselves). Given infinite stack space, we 228 // could just start legalizing on the root and traverse the whole graph. In 229 // practice however, this causes us to run out of stack space on large basic 230 // blocks. To avoid this problem, compute an ordering of the nodes where each 231 // node is only legalized after all of its operands are legalized. 232 DAG.AssignTopologicalOrder(); 233 for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(), 234 E = prior(DAG.allnodes_end()); I != next(E); ++I) 235 LegalizeOp(SDValue(I, 0)); 236 237 // Finally, it's possible the root changed. Get the new root. 238 SDValue OldRoot = DAG.getRoot(); 239 assert(LegalizedNodes.count(OldRoot) && "Root didn't get legalized?"); 240 DAG.setRoot(LegalizedNodes[OldRoot]); 241 242 LegalizedNodes.clear(); 243 244 // Remove dead nodes now. 245 DAG.RemoveDeadNodes(); 246} 247 248 249/// FindCallEndFromCallStart - Given a chained node that is part of a call 250/// sequence, find the CALLSEQ_END node that terminates the call sequence. 251static SDNode *FindCallEndFromCallStart(SDNode *Node) { 252 if (Node->getOpcode() == ISD::CALLSEQ_END) 253 return Node; 254 if (Node->use_empty()) 255 return 0; // No CallSeqEnd 256 257 // The chain is usually at the end. 258 SDValue TheChain(Node, Node->getNumValues()-1); 259 if (TheChain.getValueType() != MVT::Other) { 260 // Sometimes it's at the beginning. 261 TheChain = SDValue(Node, 0); 262 if (TheChain.getValueType() != MVT::Other) { 263 // Otherwise, hunt for it. 264 for (unsigned i = 1, e = Node->getNumValues(); i != e; ++i) 265 if (Node->getValueType(i) == MVT::Other) { 266 TheChain = SDValue(Node, i); 267 break; 268 } 269 270 // Otherwise, we walked into a node without a chain. 271 if (TheChain.getValueType() != MVT::Other) 272 return 0; 273 } 274 } 275 276 for (SDNode::use_iterator UI = Node->use_begin(), 277 E = Node->use_end(); UI != E; ++UI) { 278 279 // Make sure to only follow users of our token chain. 280 SDNode *User = *UI; 281 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) 282 if (User->getOperand(i) == TheChain) 283 if (SDNode *Result = FindCallEndFromCallStart(User)) 284 return Result; 285 } 286 return 0; 287} 288 289/// FindCallStartFromCallEnd - Given a chained node that is part of a call 290/// sequence, find the CALLSEQ_START node that initiates the call sequence. 291static SDNode *FindCallStartFromCallEnd(SDNode *Node) { 292 assert(Node && "Didn't find callseq_start for a call??"); 293 if (Node->getOpcode() == ISD::CALLSEQ_START) return Node; 294 295 assert(Node->getOperand(0).getValueType() == MVT::Other && 296 "Node doesn't have a token chain argument!"); 297 return FindCallStartFromCallEnd(Node->getOperand(0).getNode()); 298} 299 300/// LegalizeAllNodesNotLeadingTo - Recursively walk the uses of N, looking to 301/// see if any uses can reach Dest. If no dest operands can get to dest, 302/// legalize them, legalize ourself, and return false, otherwise, return true. 303/// 304/// Keep track of the nodes we fine that actually do lead to Dest in 305/// NodesLeadingTo. This avoids retraversing them exponential number of times. 306/// 307bool SelectionDAGLegalize::LegalizeAllNodesNotLeadingTo(SDNode *N, SDNode *Dest, 308 SmallPtrSet<SDNode*, 32> &NodesLeadingTo) { 309 if (N == Dest) return true; // N certainly leads to Dest :) 310 311 // If we've already processed this node and it does lead to Dest, there is no 312 // need to reprocess it. 313 if (NodesLeadingTo.count(N)) return true; 314 315 // If the first result of this node has been already legalized, then it cannot 316 // reach N. 317 if (LegalizedNodes.count(SDValue(N, 0))) return false; 318 319 // Okay, this node has not already been legalized. Check and legalize all 320 // operands. If none lead to Dest, then we can legalize this node. 321 bool OperandsLeadToDest = false; 322 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) 323 OperandsLeadToDest |= // If an operand leads to Dest, so do we. 324 LegalizeAllNodesNotLeadingTo(N->getOperand(i).getNode(), Dest, NodesLeadingTo); 325 326 if (OperandsLeadToDest) { 327 NodesLeadingTo.insert(N); 328 return true; 329 } 330 331 // Okay, this node looks safe, legalize it and return false. 332 LegalizeOp(SDValue(N, 0)); 333 return false; 334} 335 336/// ExpandConstantFP - Expands the ConstantFP node to an integer constant or 337/// a load from the constant pool. 338static SDValue ExpandConstantFP(ConstantFPSDNode *CFP, bool UseCP, 339 SelectionDAG &DAG, const TargetLowering &TLI) { 340 bool Extend = false; 341 DebugLoc dl = CFP->getDebugLoc(); 342 343 // If a FP immediate is precise when represented as a float and if the 344 // target can do an extending load from float to double, we put it into 345 // the constant pool as a float, even if it's is statically typed as a 346 // double. This shrinks FP constants and canonicalizes them for targets where 347 // an FP extending load is the same cost as a normal load (such as on the x87 348 // fp stack or PPC FP unit). 349 EVT VT = CFP->getValueType(0); 350 ConstantFP *LLVMC = const_cast<ConstantFP*>(CFP->getConstantFPValue()); 351 if (!UseCP) { 352 assert((VT == MVT::f64 || VT == MVT::f32) && "Invalid type expansion"); 353 return DAG.getConstant(LLVMC->getValueAPF().bitcastToAPInt(), 354 (VT == MVT::f64) ? MVT::i64 : MVT::i32); 355 } 356 357 EVT OrigVT = VT; 358 EVT SVT = VT; 359 while (SVT != MVT::f32) { 360 SVT = (MVT::SimpleValueType)(SVT.getSimpleVT().SimpleTy - 1); 361 if (CFP->isValueValidForType(SVT, CFP->getValueAPF()) && 362 // Only do this if the target has a native EXTLOAD instruction from 363 // smaller type. 364 TLI.isLoadExtLegal(ISD::EXTLOAD, SVT) && 365 TLI.ShouldShrinkFPConstant(OrigVT)) { 366 const Type *SType = SVT.getTypeForEVT(*DAG.getContext()); 367 LLVMC = cast<ConstantFP>(ConstantExpr::getFPTrunc(LLVMC, SType)); 368 VT = SVT; 369 Extend = true; 370 } 371 } 372 373 SDValue CPIdx = DAG.getConstantPool(LLVMC, TLI.getPointerTy()); 374 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment(); 375 if (Extend) 376 return DAG.getExtLoad(ISD::EXTLOAD, dl, 377 OrigVT, DAG.getEntryNode(), 378 CPIdx, PseudoSourceValue::getConstantPool(), 379 0, VT, false, Alignment); 380 return DAG.getLoad(OrigVT, dl, DAG.getEntryNode(), CPIdx, 381 PseudoSourceValue::getConstantPool(), 0, false, Alignment); 382} 383 384/// ExpandUnalignedStore - Expands an unaligned store to 2 half-size stores. 385static 386SDValue ExpandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG, 387 const TargetLowering &TLI) { 388 SDValue Chain = ST->getChain(); 389 SDValue Ptr = ST->getBasePtr(); 390 SDValue Val = ST->getValue(); 391 EVT VT = Val.getValueType(); 392 int Alignment = ST->getAlignment(); 393 int SVOffset = ST->getSrcValueOffset(); 394 DebugLoc dl = ST->getDebugLoc(); 395 if (ST->getMemoryVT().isFloatingPoint() || 396 ST->getMemoryVT().isVector()) { 397 EVT intVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits()); 398 if (TLI.isTypeLegal(intVT)) { 399 // Expand to a bitconvert of the value to the integer type of the 400 // same size, then a (misaligned) int store. 401 // FIXME: Does not handle truncating floating point stores! 402 SDValue Result = DAG.getNode(ISD::BIT_CONVERT, dl, intVT, Val); 403 return DAG.getStore(Chain, dl, Result, Ptr, ST->getSrcValue(), 404 SVOffset, ST->isVolatile(), Alignment); 405 } else { 406 // Do a (aligned) store to a stack slot, then copy from the stack slot 407 // to the final destination using (unaligned) integer loads and stores. 408 EVT StoredVT = ST->getMemoryVT(); 409 EVT RegVT = 410 TLI.getRegisterType(*DAG.getContext(), EVT::getIntegerVT(*DAG.getContext(), StoredVT.getSizeInBits())); 411 unsigned StoredBytes = StoredVT.getSizeInBits() / 8; 412 unsigned RegBytes = RegVT.getSizeInBits() / 8; 413 unsigned NumRegs = (StoredBytes + RegBytes - 1) / RegBytes; 414 415 // Make sure the stack slot is also aligned for the register type. 416 SDValue StackPtr = DAG.CreateStackTemporary(StoredVT, RegVT); 417 418 // Perform the original store, only redirected to the stack slot. 419 SDValue Store = DAG.getTruncStore(Chain, dl, 420 Val, StackPtr, NULL, 0, StoredVT); 421 SDValue Increment = DAG.getConstant(RegBytes, TLI.getPointerTy()); 422 SmallVector<SDValue, 8> Stores; 423 unsigned Offset = 0; 424 425 // Do all but one copies using the full register width. 426 for (unsigned i = 1; i < NumRegs; i++) { 427 // Load one integer register's worth from the stack slot. 428 SDValue Load = DAG.getLoad(RegVT, dl, Store, StackPtr, NULL, 0); 429 // Store it to the final location. Remember the store. 430 Stores.push_back(DAG.getStore(Load.getValue(1), dl, Load, Ptr, 431 ST->getSrcValue(), SVOffset + Offset, 432 ST->isVolatile(), 433 MinAlign(ST->getAlignment(), Offset))); 434 // Increment the pointers. 435 Offset += RegBytes; 436 StackPtr = DAG.getNode(ISD::ADD, dl, StackPtr.getValueType(), StackPtr, 437 Increment); 438 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 439 } 440 441 // The last store may be partial. Do a truncating store. On big-endian 442 // machines this requires an extending load from the stack slot to ensure 443 // that the bits are in the right place. 444 EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), 8 * (StoredBytes - Offset)); 445 446 // Load from the stack slot. 447 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Store, StackPtr, 448 NULL, 0, MemVT); 449 450 Stores.push_back(DAG.getTruncStore(Load.getValue(1), dl, Load, Ptr, 451 ST->getSrcValue(), SVOffset + Offset, 452 MemVT, ST->isVolatile(), 453 MinAlign(ST->getAlignment(), Offset))); 454 // The order of the stores doesn't matter - say it with a TokenFactor. 455 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Stores[0], 456 Stores.size()); 457 } 458 } 459 assert(ST->getMemoryVT().isInteger() && 460 !ST->getMemoryVT().isVector() && 461 "Unaligned store of unknown type."); 462 // Get the half-size VT 463 EVT NewStoredVT = 464 (MVT::SimpleValueType)(ST->getMemoryVT().getSimpleVT().SimpleTy - 1); 465 int NumBits = NewStoredVT.getSizeInBits(); 466 int IncrementSize = NumBits / 8; 467 468 // Divide the stored value in two parts. 469 SDValue ShiftAmount = DAG.getConstant(NumBits, TLI.getShiftAmountTy()); 470 SDValue Lo = Val; 471 SDValue Hi = DAG.getNode(ISD::SRL, dl, VT, Val, ShiftAmount); 472 473 // Store the two parts 474 SDValue Store1, Store2; 475 Store1 = DAG.getTruncStore(Chain, dl, TLI.isLittleEndian()?Lo:Hi, Ptr, 476 ST->getSrcValue(), SVOffset, NewStoredVT, 477 ST->isVolatile(), Alignment); 478 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, 479 DAG.getConstant(IncrementSize, TLI.getPointerTy())); 480 Alignment = MinAlign(Alignment, IncrementSize); 481 Store2 = DAG.getTruncStore(Chain, dl, TLI.isLittleEndian()?Hi:Lo, Ptr, 482 ST->getSrcValue(), SVOffset + IncrementSize, 483 NewStoredVT, ST->isVolatile(), Alignment); 484 485 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Store1, Store2); 486} 487 488/// ExpandUnalignedLoad - Expands an unaligned load to 2 half-size loads. 489static 490SDValue ExpandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG, 491 const TargetLowering &TLI) { 492 int SVOffset = LD->getSrcValueOffset(); 493 SDValue Chain = LD->getChain(); 494 SDValue Ptr = LD->getBasePtr(); 495 EVT VT = LD->getValueType(0); 496 EVT LoadedVT = LD->getMemoryVT(); 497 DebugLoc dl = LD->getDebugLoc(); 498 if (VT.isFloatingPoint() || VT.isVector()) { 499 EVT intVT = EVT::getIntegerVT(*DAG.getContext(), LoadedVT.getSizeInBits()); 500 if (TLI.isTypeLegal(intVT)) { 501 // Expand to a (misaligned) integer load of the same size, 502 // then bitconvert to floating point or vector. 503 SDValue newLoad = DAG.getLoad(intVT, dl, Chain, Ptr, LD->getSrcValue(), 504 SVOffset, LD->isVolatile(), 505 LD->getAlignment()); 506 SDValue Result = DAG.getNode(ISD::BIT_CONVERT, dl, LoadedVT, newLoad); 507 if (VT.isFloatingPoint() && LoadedVT != VT) 508 Result = DAG.getNode(ISD::FP_EXTEND, dl, VT, Result); 509 510 SDValue Ops[] = { Result, Chain }; 511 return DAG.getMergeValues(Ops, 2, dl); 512 } else { 513 // Copy the value to a (aligned) stack slot using (unaligned) integer 514 // loads and stores, then do a (aligned) load from the stack slot. 515 EVT RegVT = TLI.getRegisterType(*DAG.getContext(), intVT); 516 unsigned LoadedBytes = LoadedVT.getSizeInBits() / 8; 517 unsigned RegBytes = RegVT.getSizeInBits() / 8; 518 unsigned NumRegs = (LoadedBytes + RegBytes - 1) / RegBytes; 519 520 // Make sure the stack slot is also aligned for the register type. 521 SDValue StackBase = DAG.CreateStackTemporary(LoadedVT, RegVT); 522 523 SDValue Increment = DAG.getConstant(RegBytes, TLI.getPointerTy()); 524 SmallVector<SDValue, 8> Stores; 525 SDValue StackPtr = StackBase; 526 unsigned Offset = 0; 527 528 // Do all but one copies using the full register width. 529 for (unsigned i = 1; i < NumRegs; i++) { 530 // Load one integer register's worth from the original location. 531 SDValue Load = DAG.getLoad(RegVT, dl, Chain, Ptr, LD->getSrcValue(), 532 SVOffset + Offset, LD->isVolatile(), 533 MinAlign(LD->getAlignment(), Offset)); 534 // Follow the load with a store to the stack slot. Remember the store. 535 Stores.push_back(DAG.getStore(Load.getValue(1), dl, Load, StackPtr, 536 NULL, 0)); 537 // Increment the pointers. 538 Offset += RegBytes; 539 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 540 StackPtr = DAG.getNode(ISD::ADD, dl, StackPtr.getValueType(), StackPtr, 541 Increment); 542 } 543 544 // The last copy may be partial. Do an extending load. 545 EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), 8 * (LoadedBytes - Offset)); 546 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Chain, Ptr, 547 LD->getSrcValue(), SVOffset + Offset, 548 MemVT, LD->isVolatile(), 549 MinAlign(LD->getAlignment(), Offset)); 550 // Follow the load with a store to the stack slot. Remember the store. 551 // On big-endian machines this requires a truncating store to ensure 552 // that the bits end up in the right place. 553 Stores.push_back(DAG.getTruncStore(Load.getValue(1), dl, Load, StackPtr, 554 NULL, 0, MemVT)); 555 556 // The order of the stores doesn't matter - say it with a TokenFactor. 557 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Stores[0], 558 Stores.size()); 559 560 // Finally, perform the original load only redirected to the stack slot. 561 Load = DAG.getExtLoad(LD->getExtensionType(), dl, VT, TF, StackBase, 562 NULL, 0, LoadedVT); 563 564 // Callers expect a MERGE_VALUES node. 565 SDValue Ops[] = { Load, TF }; 566 return DAG.getMergeValues(Ops, 2, dl); 567 } 568 } 569 assert(LoadedVT.isInteger() && !LoadedVT.isVector() && 570 "Unaligned load of unsupported type."); 571 572 // Compute the new VT that is half the size of the old one. This is an 573 // integer MVT. 574 unsigned NumBits = LoadedVT.getSizeInBits(); 575 EVT NewLoadedVT; 576 NewLoadedVT = EVT::getIntegerVT(*DAG.getContext(), NumBits/2); 577 NumBits >>= 1; 578 579 unsigned Alignment = LD->getAlignment(); 580 unsigned IncrementSize = NumBits / 8; 581 ISD::LoadExtType HiExtType = LD->getExtensionType(); 582 583 // If the original load is NON_EXTLOAD, the hi part load must be ZEXTLOAD. 584 if (HiExtType == ISD::NON_EXTLOAD) 585 HiExtType = ISD::ZEXTLOAD; 586 587 // Load the value in two parts 588 SDValue Lo, Hi; 589 if (TLI.isLittleEndian()) { 590 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, LD->getSrcValue(), 591 SVOffset, NewLoadedVT, LD->isVolatile(), Alignment); 592 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, 593 DAG.getConstant(IncrementSize, TLI.getPointerTy())); 594 Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, LD->getSrcValue(), 595 SVOffset + IncrementSize, NewLoadedVT, LD->isVolatile(), 596 MinAlign(Alignment, IncrementSize)); 597 } else { 598 Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, LD->getSrcValue(), 599 SVOffset, NewLoadedVT, LD->isVolatile(), Alignment); 600 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, 601 DAG.getConstant(IncrementSize, TLI.getPointerTy())); 602 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, LD->getSrcValue(), 603 SVOffset + IncrementSize, NewLoadedVT, LD->isVolatile(), 604 MinAlign(Alignment, IncrementSize)); 605 } 606 607 // aggregate the two parts 608 SDValue ShiftAmount = DAG.getConstant(NumBits, TLI.getShiftAmountTy()); 609 SDValue Result = DAG.getNode(ISD::SHL, dl, VT, Hi, ShiftAmount); 610 Result = DAG.getNode(ISD::OR, dl, VT, Result, Lo); 611 612 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1), 613 Hi.getValue(1)); 614 615 SDValue Ops[] = { Result, TF }; 616 return DAG.getMergeValues(Ops, 2, dl); 617} 618 619/// PerformInsertVectorEltInMemory - Some target cannot handle a variable 620/// insertion index for the INSERT_VECTOR_ELT instruction. In this case, it 621/// is necessary to spill the vector being inserted into to memory, perform 622/// the insert there, and then read the result back. 623SDValue SelectionDAGLegalize:: 624PerformInsertVectorEltInMemory(SDValue Vec, SDValue Val, SDValue Idx, 625 DebugLoc dl) { 626 SDValue Tmp1 = Vec; 627 SDValue Tmp2 = Val; 628 SDValue Tmp3 = Idx; 629 630 // If the target doesn't support this, we have to spill the input vector 631 // to a temporary stack slot, update the element, then reload it. This is 632 // badness. We could also load the value into a vector register (either 633 // with a "move to register" or "extload into register" instruction, then 634 // permute it into place, if the idx is a constant and if the idx is 635 // supported by the target. 636 EVT VT = Tmp1.getValueType(); 637 EVT EltVT = VT.getVectorElementType(); 638 EVT IdxVT = Tmp3.getValueType(); 639 EVT PtrVT = TLI.getPointerTy(); 640 SDValue StackPtr = DAG.CreateStackTemporary(VT); 641 642 int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex(); 643 644 // Store the vector. 645 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, Tmp1, StackPtr, 646 PseudoSourceValue::getFixedStack(SPFI), 0); 647 648 // Truncate or zero extend offset to target pointer type. 649 unsigned CastOpc = IdxVT.bitsGT(PtrVT) ? ISD::TRUNCATE : ISD::ZERO_EXTEND; 650 Tmp3 = DAG.getNode(CastOpc, dl, PtrVT, Tmp3); 651 // Add the offset to the index. 652 unsigned EltSize = EltVT.getSizeInBits()/8; 653 Tmp3 = DAG.getNode(ISD::MUL, dl, IdxVT, Tmp3,DAG.getConstant(EltSize, IdxVT)); 654 SDValue StackPtr2 = DAG.getNode(ISD::ADD, dl, IdxVT, Tmp3, StackPtr); 655 // Store the scalar value. 656 Ch = DAG.getTruncStore(Ch, dl, Tmp2, StackPtr2, 657 PseudoSourceValue::getFixedStack(SPFI), 0, EltVT); 658 // Load the updated vector. 659 return DAG.getLoad(VT, dl, Ch, StackPtr, 660 PseudoSourceValue::getFixedStack(SPFI), 0); 661} 662 663 664SDValue SelectionDAGLegalize:: 665ExpandINSERT_VECTOR_ELT(SDValue Vec, SDValue Val, SDValue Idx, DebugLoc dl) { 666 if (ConstantSDNode *InsertPos = dyn_cast<ConstantSDNode>(Idx)) { 667 // SCALAR_TO_VECTOR requires that the type of the value being inserted 668 // match the element type of the vector being created, except for 669 // integers in which case the inserted value can be over width. 670 EVT EltVT = Vec.getValueType().getVectorElementType(); 671 if (Val.getValueType() == EltVT || 672 (EltVT.isInteger() && Val.getValueType().bitsGE(EltVT))) { 673 SDValue ScVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 674 Vec.getValueType(), Val); 675 676 unsigned NumElts = Vec.getValueType().getVectorNumElements(); 677 // We generate a shuffle of InVec and ScVec, so the shuffle mask 678 // should be 0,1,2,3,4,5... with the appropriate element replaced with 679 // elt 0 of the RHS. 680 SmallVector<int, 8> ShufOps; 681 for (unsigned i = 0; i != NumElts; ++i) 682 ShufOps.push_back(i != InsertPos->getZExtValue() ? i : NumElts); 683 684 return DAG.getVectorShuffle(Vec.getValueType(), dl, Vec, ScVec, 685 &ShufOps[0]); 686 } 687 } 688 return PerformInsertVectorEltInMemory(Vec, Val, Idx, dl); 689} 690 691SDValue SelectionDAGLegalize::OptimizeFloatStore(StoreSDNode* ST) { 692 // Turn 'store float 1.0, Ptr' -> 'store int 0x12345678, Ptr' 693 // FIXME: We shouldn't do this for TargetConstantFP's. 694 // FIXME: move this to the DAG Combiner! Note that we can't regress due 695 // to phase ordering between legalized code and the dag combiner. This 696 // probably means that we need to integrate dag combiner and legalizer 697 // together. 698 // We generally can't do this one for long doubles. 699 SDValue Tmp1 = ST->getChain(); 700 SDValue Tmp2 = ST->getBasePtr(); 701 SDValue Tmp3; 702 int SVOffset = ST->getSrcValueOffset(); 703 unsigned Alignment = ST->getAlignment(); 704 bool isVolatile = ST->isVolatile(); 705 DebugLoc dl = ST->getDebugLoc(); 706 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(ST->getValue())) { 707 if (CFP->getValueType(0) == MVT::f32 && 708 getTypeAction(MVT::i32) == Legal) { 709 Tmp3 = DAG.getConstant(CFP->getValueAPF(). 710 bitcastToAPInt().zextOrTrunc(32), 711 MVT::i32); 712 return DAG.getStore(Tmp1, dl, Tmp3, Tmp2, ST->getSrcValue(), 713 SVOffset, isVolatile, Alignment); 714 } else if (CFP->getValueType(0) == MVT::f64) { 715 // If this target supports 64-bit registers, do a single 64-bit store. 716 if (getTypeAction(MVT::i64) == Legal) { 717 Tmp3 = DAG.getConstant(CFP->getValueAPF().bitcastToAPInt(). 718 zextOrTrunc(64), MVT::i64); 719 return DAG.getStore(Tmp1, dl, Tmp3, Tmp2, ST->getSrcValue(), 720 SVOffset, isVolatile, Alignment); 721 } else if (getTypeAction(MVT::i32) == Legal && !ST->isVolatile()) { 722 // Otherwise, if the target supports 32-bit registers, use 2 32-bit 723 // stores. If the target supports neither 32- nor 64-bits, this 724 // xform is certainly not worth it. 725 const APInt &IntVal =CFP->getValueAPF().bitcastToAPInt(); 726 SDValue Lo = DAG.getConstant(APInt(IntVal).trunc(32), MVT::i32); 727 SDValue Hi = DAG.getConstant(IntVal.lshr(32).trunc(32), MVT::i32); 728 if (TLI.isBigEndian()) std::swap(Lo, Hi); 729 730 Lo = DAG.getStore(Tmp1, dl, Lo, Tmp2, ST->getSrcValue(), 731 SVOffset, isVolatile, Alignment); 732 Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2, 733 DAG.getIntPtrConstant(4)); 734 Hi = DAG.getStore(Tmp1, dl, Hi, Tmp2, ST->getSrcValue(), SVOffset+4, 735 isVolatile, MinAlign(Alignment, 4U)); 736 737 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo, Hi); 738 } 739 } 740 } 741 return SDValue(); 742} 743 744/// LegalizeOp - We know that the specified value has a legal type, and 745/// that its operands are legal. Now ensure that the operation itself 746/// is legal, recursively ensuring that the operands' operations remain 747/// legal. 748SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) { 749 if (Op.getOpcode() == ISD::TargetConstant) // Allow illegal target nodes. 750 return Op; 751 752 SDNode *Node = Op.getNode(); 753 DebugLoc dl = Node->getDebugLoc(); 754 755 for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i) 756 assert(getTypeAction(Node->getValueType(i)) == Legal && 757 "Unexpected illegal type!"); 758 759 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) 760 assert((isTypeLegal(Node->getOperand(i).getValueType()) || 761 Node->getOperand(i).getOpcode() == ISD::TargetConstant) && 762 "Unexpected illegal type!"); 763 764 // Note that LegalizeOp may be reentered even from single-use nodes, which 765 // means that we always must cache transformed nodes. 766 DenseMap<SDValue, SDValue>::iterator I = LegalizedNodes.find(Op); 767 if (I != LegalizedNodes.end()) return I->second; 768 769 SDValue Tmp1, Tmp2, Tmp3, Tmp4; 770 SDValue Result = Op; 771 bool isCustom = false; 772 773 // Figure out the correct action; the way to query this varies by opcode 774 TargetLowering::LegalizeAction Action; 775 bool SimpleFinishLegalizing = true; 776 switch (Node->getOpcode()) { 777 case ISD::INTRINSIC_W_CHAIN: 778 case ISD::INTRINSIC_WO_CHAIN: 779 case ISD::INTRINSIC_VOID: 780 case ISD::VAARG: 781 case ISD::STACKSAVE: 782 Action = TLI.getOperationAction(Node->getOpcode(), MVT::Other); 783 break; 784 case ISD::SINT_TO_FP: 785 case ISD::UINT_TO_FP: 786 case ISD::EXTRACT_VECTOR_ELT: 787 Action = TLI.getOperationAction(Node->getOpcode(), 788 Node->getOperand(0).getValueType()); 789 break; 790 case ISD::FP_ROUND_INREG: 791 case ISD::SIGN_EXTEND_INREG: { 792 EVT InnerType = cast<VTSDNode>(Node->getOperand(1))->getVT(); 793 Action = TLI.getOperationAction(Node->getOpcode(), InnerType); 794 break; 795 } 796 case ISD::SELECT_CC: 797 case ISD::SETCC: 798 case ISD::BR_CC: { 799 unsigned CCOperand = Node->getOpcode() == ISD::SELECT_CC ? 4 : 800 Node->getOpcode() == ISD::SETCC ? 2 : 1; 801 unsigned CompareOperand = Node->getOpcode() == ISD::BR_CC ? 2 : 0; 802 EVT OpVT = Node->getOperand(CompareOperand).getValueType(); 803 ISD::CondCode CCCode = 804 cast<CondCodeSDNode>(Node->getOperand(CCOperand))->get(); 805 Action = TLI.getCondCodeAction(CCCode, OpVT); 806 if (Action == TargetLowering::Legal) { 807 if (Node->getOpcode() == ISD::SELECT_CC) 808 Action = TLI.getOperationAction(Node->getOpcode(), 809 Node->getValueType(0)); 810 else 811 Action = TLI.getOperationAction(Node->getOpcode(), OpVT); 812 } 813 break; 814 } 815 case ISD::LOAD: 816 case ISD::STORE: 817 // FIXME: Model these properly. LOAD and STORE are complicated, and 818 // STORE expects the unlegalized operand in some cases. 819 SimpleFinishLegalizing = false; 820 break; 821 case ISD::CALLSEQ_START: 822 case ISD::CALLSEQ_END: 823 // FIXME: This shouldn't be necessary. These nodes have special properties 824 // dealing with the recursive nature of legalization. Removing this 825 // special case should be done as part of making LegalizeDAG non-recursive. 826 SimpleFinishLegalizing = false; 827 break; 828 case ISD::EXTRACT_ELEMENT: 829 case ISD::FLT_ROUNDS_: 830 case ISD::SADDO: 831 case ISD::SSUBO: 832 case ISD::UADDO: 833 case ISD::USUBO: 834 case ISD::SMULO: 835 case ISD::UMULO: 836 case ISD::FPOWI: 837 case ISD::MERGE_VALUES: 838 case ISD::EH_RETURN: 839 case ISD::FRAME_TO_ARGS_OFFSET: 840 // These operations lie about being legal: when they claim to be legal, 841 // they should actually be expanded. 842 Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0)); 843 if (Action == TargetLowering::Legal) 844 Action = TargetLowering::Expand; 845 break; 846 case ISD::TRAMPOLINE: 847 case ISD::FRAMEADDR: 848 case ISD::RETURNADDR: 849 // These operations lie about being legal: when they claim to be legal, 850 // they should actually be custom-lowered. 851 Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0)); 852 if (Action == TargetLowering::Legal) 853 Action = TargetLowering::Custom; 854 break; 855 case ISD::BUILD_VECTOR: 856 // A weird case: legalization for BUILD_VECTOR never legalizes the 857 // operands! 858 // FIXME: This really sucks... changing it isn't semantically incorrect, 859 // but it massively pessimizes the code for floating-point BUILD_VECTORs 860 // because ConstantFP operands get legalized into constant pool loads 861 // before the BUILD_VECTOR code can see them. It doesn't usually bite, 862 // though, because BUILD_VECTORS usually get lowered into other nodes 863 // which get legalized properly. 864 SimpleFinishLegalizing = false; 865 break; 866 default: 867 if (Node->getOpcode() >= ISD::BUILTIN_OP_END) { 868 Action = TargetLowering::Legal; 869 } else { 870 Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0)); 871 } 872 break; 873 } 874 875 if (SimpleFinishLegalizing) { 876 SmallVector<SDValue, 8> Ops, ResultVals; 877 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) 878 Ops.push_back(LegalizeOp(Node->getOperand(i))); 879 switch (Node->getOpcode()) { 880 default: break; 881 case ISD::BR: 882 case ISD::BRIND: 883 case ISD::BR_JT: 884 case ISD::BR_CC: 885 case ISD::BRCOND: 886 // Branches tweak the chain to include LastCALLSEQ_END 887 Ops[0] = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ops[0], 888 LastCALLSEQ_END); 889 Ops[0] = LegalizeOp(Ops[0]); 890 LastCALLSEQ_END = DAG.getEntryNode(); 891 break; 892 case ISD::SHL: 893 case ISD::SRL: 894 case ISD::SRA: 895 case ISD::ROTL: 896 case ISD::ROTR: 897 // Legalizing shifts/rotates requires adjusting the shift amount 898 // to the appropriate width. 899 if (!Ops[1].getValueType().isVector()) 900 Ops[1] = LegalizeOp(DAG.getShiftAmountOperand(Ops[1])); 901 break; 902 case ISD::SRL_PARTS: 903 case ISD::SRA_PARTS: 904 case ISD::SHL_PARTS: 905 // Legalizing shifts/rotates requires adjusting the shift amount 906 // to the appropriate width. 907 if (!Ops[2].getValueType().isVector()) 908 Ops[2] = LegalizeOp(DAG.getShiftAmountOperand(Ops[2])); 909 break; 910 } 911 912 Result = DAG.UpdateNodeOperands(Result.getValue(0), Ops.data(), 913 Ops.size()); 914 switch (Action) { 915 case TargetLowering::Legal: 916 for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i) 917 ResultVals.push_back(Result.getValue(i)); 918 break; 919 case TargetLowering::Custom: 920 // FIXME: The handling for custom lowering with multiple results is 921 // a complete mess. 922 Tmp1 = TLI.LowerOperation(Result, DAG); 923 if (Tmp1.getNode()) { 924 for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i) { 925 if (e == 1) 926 ResultVals.push_back(Tmp1); 927 else 928 ResultVals.push_back(Tmp1.getValue(i)); 929 } 930 break; 931 } 932 933 // FALL THROUGH 934 case TargetLowering::Expand: 935 ExpandNode(Result.getNode(), ResultVals); 936 break; 937 case TargetLowering::Promote: 938 PromoteNode(Result.getNode(), ResultVals); 939 break; 940 } 941 if (!ResultVals.empty()) { 942 for (unsigned i = 0, e = ResultVals.size(); i != e; ++i) { 943 if (ResultVals[i] != SDValue(Node, i)) 944 ResultVals[i] = LegalizeOp(ResultVals[i]); 945 AddLegalizedOperand(SDValue(Node, i), ResultVals[i]); 946 } 947 return ResultVals[Op.getResNo()]; 948 } 949 } 950 951 switch (Node->getOpcode()) { 952 default: 953#ifndef NDEBUG 954 errs() << "NODE: "; 955 Node->dump(&DAG); 956 errs() << "\n"; 957#endif 958 llvm_unreachable("Do not know how to legalize this operator!"); 959 960 case ISD::BUILD_VECTOR: 961 switch (TLI.getOperationAction(ISD::BUILD_VECTOR, Node->getValueType(0))) { 962 default: llvm_unreachable("This action is not supported yet!"); 963 case TargetLowering::Custom: 964 Tmp3 = TLI.LowerOperation(Result, DAG); 965 if (Tmp3.getNode()) { 966 Result = Tmp3; 967 break; 968 } 969 // FALLTHROUGH 970 case TargetLowering::Expand: 971 Result = ExpandBUILD_VECTOR(Result.getNode()); 972 break; 973 } 974 break; 975 case ISD::CALLSEQ_START: { 976 SDNode *CallEnd = FindCallEndFromCallStart(Node); 977 978 // Recursively Legalize all of the inputs of the call end that do not lead 979 // to this call start. This ensures that any libcalls that need be inserted 980 // are inserted *before* the CALLSEQ_START. 981 {SmallPtrSet<SDNode*, 32> NodesLeadingTo; 982 for (unsigned i = 0, e = CallEnd->getNumOperands(); i != e; ++i) 983 LegalizeAllNodesNotLeadingTo(CallEnd->getOperand(i).getNode(), Node, 984 NodesLeadingTo); 985 } 986 987 // Now that we legalized all of the inputs (which may have inserted 988 // libcalls) create the new CALLSEQ_START node. 989 Tmp1 = LegalizeOp(Node->getOperand(0)); // Legalize the chain. 990 991 // Merge in the last call, to ensure that this call start after the last 992 // call ended. 993 if (LastCALLSEQ_END.getOpcode() != ISD::EntryToken) { 994 Tmp1 = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 995 Tmp1, LastCALLSEQ_END); 996 Tmp1 = LegalizeOp(Tmp1); 997 } 998 999 // Do not try to legalize the target-specific arguments (#1+). 1000 if (Tmp1 != Node->getOperand(0)) { 1001 SmallVector<SDValue, 8> Ops(Node->op_begin(), Node->op_end()); 1002 Ops[0] = Tmp1; 1003 Result = DAG.UpdateNodeOperands(Result, &Ops[0], Ops.size()); 1004 } 1005 1006 // Remember that the CALLSEQ_START is legalized. 1007 AddLegalizedOperand(Op.getValue(0), Result); 1008 if (Node->getNumValues() == 2) // If this has a flag result, remember it. 1009 AddLegalizedOperand(Op.getValue(1), Result.getValue(1)); 1010 1011 // Now that the callseq_start and all of the non-call nodes above this call 1012 // sequence have been legalized, legalize the call itself. During this 1013 // process, no libcalls can/will be inserted, guaranteeing that no calls 1014 // can overlap. 1015 assert(!IsLegalizingCall && "Inconsistent sequentialization of calls!"); 1016 // Note that we are selecting this call! 1017 LastCALLSEQ_END = SDValue(CallEnd, 0); 1018 IsLegalizingCall = true; 1019 1020 // Legalize the call, starting from the CALLSEQ_END. 1021 LegalizeOp(LastCALLSEQ_END); 1022 assert(!IsLegalizingCall && "CALLSEQ_END should have cleared this!"); 1023 return Result; 1024 } 1025 case ISD::CALLSEQ_END: 1026 // If the CALLSEQ_START node hasn't been legalized first, legalize it. This 1027 // will cause this node to be legalized as well as handling libcalls right. 1028 if (LastCALLSEQ_END.getNode() != Node) { 1029 LegalizeOp(SDValue(FindCallStartFromCallEnd(Node), 0)); 1030 DenseMap<SDValue, SDValue>::iterator I = LegalizedNodes.find(Op); 1031 assert(I != LegalizedNodes.end() && 1032 "Legalizing the call start should have legalized this node!"); 1033 return I->second; 1034 } 1035 1036 // Otherwise, the call start has been legalized and everything is going 1037 // according to plan. Just legalize ourselves normally here. 1038 Tmp1 = LegalizeOp(Node->getOperand(0)); // Legalize the chain. 1039 // Do not try to legalize the target-specific arguments (#1+), except for 1040 // an optional flag input. 1041 if (Node->getOperand(Node->getNumOperands()-1).getValueType() != MVT::Flag){ 1042 if (Tmp1 != Node->getOperand(0)) { 1043 SmallVector<SDValue, 8> Ops(Node->op_begin(), Node->op_end()); 1044 Ops[0] = Tmp1; 1045 Result = DAG.UpdateNodeOperands(Result, &Ops[0], Ops.size()); 1046 } 1047 } else { 1048 Tmp2 = LegalizeOp(Node->getOperand(Node->getNumOperands()-1)); 1049 if (Tmp1 != Node->getOperand(0) || 1050 Tmp2 != Node->getOperand(Node->getNumOperands()-1)) { 1051 SmallVector<SDValue, 8> Ops(Node->op_begin(), Node->op_end()); 1052 Ops[0] = Tmp1; 1053 Ops.back() = Tmp2; 1054 Result = DAG.UpdateNodeOperands(Result, &Ops[0], Ops.size()); 1055 } 1056 } 1057 assert(IsLegalizingCall && "Call sequence imbalance between start/end?"); 1058 // This finishes up call legalization. 1059 IsLegalizingCall = false; 1060 1061 // If the CALLSEQ_END node has a flag, remember that we legalized it. 1062 AddLegalizedOperand(SDValue(Node, 0), Result.getValue(0)); 1063 if (Node->getNumValues() == 2) 1064 AddLegalizedOperand(SDValue(Node, 1), Result.getValue(1)); 1065 return Result.getValue(Op.getResNo()); 1066 case ISD::LOAD: { 1067 LoadSDNode *LD = cast<LoadSDNode>(Node); 1068 Tmp1 = LegalizeOp(LD->getChain()); // Legalize the chain. 1069 Tmp2 = LegalizeOp(LD->getBasePtr()); // Legalize the base pointer. 1070 1071 ISD::LoadExtType ExtType = LD->getExtensionType(); 1072 if (ExtType == ISD::NON_EXTLOAD) { 1073 EVT VT = Node->getValueType(0); 1074 Result = DAG.UpdateNodeOperands(Result, Tmp1, Tmp2, LD->getOffset()); 1075 Tmp3 = Result.getValue(0); 1076 Tmp4 = Result.getValue(1); 1077 1078 switch (TLI.getOperationAction(Node->getOpcode(), VT)) { 1079 default: llvm_unreachable("This action is not supported yet!"); 1080 case TargetLowering::Legal: 1081 // If this is an unaligned load and the target doesn't support it, 1082 // expand it. 1083 if (!TLI.allowsUnalignedMemoryAccesses(LD->getMemoryVT())) { 1084 const Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext()); 1085 unsigned ABIAlignment = TLI.getTargetData()->getABITypeAlignment(Ty); 1086 if (LD->getAlignment() < ABIAlignment){ 1087 Result = ExpandUnalignedLoad(cast<LoadSDNode>(Result.getNode()), 1088 DAG, TLI); 1089 Tmp3 = Result.getOperand(0); 1090 Tmp4 = Result.getOperand(1); 1091 Tmp3 = LegalizeOp(Tmp3); 1092 Tmp4 = LegalizeOp(Tmp4); 1093 } 1094 } 1095 break; 1096 case TargetLowering::Custom: 1097 Tmp1 = TLI.LowerOperation(Tmp3, DAG); 1098 if (Tmp1.getNode()) { 1099 Tmp3 = LegalizeOp(Tmp1); 1100 Tmp4 = LegalizeOp(Tmp1.getValue(1)); 1101 } 1102 break; 1103 case TargetLowering::Promote: { 1104 // Only promote a load of vector type to another. 1105 assert(VT.isVector() && "Cannot promote this load!"); 1106 // Change base type to a different vector type. 1107 EVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), VT); 1108 1109 Tmp1 = DAG.getLoad(NVT, dl, Tmp1, Tmp2, LD->getSrcValue(), 1110 LD->getSrcValueOffset(), 1111 LD->isVolatile(), LD->getAlignment()); 1112 Tmp3 = LegalizeOp(DAG.getNode(ISD::BIT_CONVERT, dl, VT, Tmp1)); 1113 Tmp4 = LegalizeOp(Tmp1.getValue(1)); 1114 break; 1115 } 1116 } 1117 // Since loads produce two values, make sure to remember that we 1118 // legalized both of them. 1119 AddLegalizedOperand(SDValue(Node, 0), Tmp3); 1120 AddLegalizedOperand(SDValue(Node, 1), Tmp4); 1121 return Op.getResNo() ? Tmp4 : Tmp3; 1122 } else { 1123 EVT SrcVT = LD->getMemoryVT(); 1124 unsigned SrcWidth = SrcVT.getSizeInBits(); 1125 int SVOffset = LD->getSrcValueOffset(); 1126 unsigned Alignment = LD->getAlignment(); 1127 bool isVolatile = LD->isVolatile(); 1128 1129 if (SrcWidth != SrcVT.getStoreSizeInBits() && 1130 // Some targets pretend to have an i1 loading operation, and actually 1131 // load an i8. This trick is correct for ZEXTLOAD because the top 7 1132 // bits are guaranteed to be zero; it helps the optimizers understand 1133 // that these bits are zero. It is also useful for EXTLOAD, since it 1134 // tells the optimizers that those bits are undefined. It would be 1135 // nice to have an effective generic way of getting these benefits... 1136 // Until such a way is found, don't insist on promoting i1 here. 1137 (SrcVT != MVT::i1 || 1138 TLI.getLoadExtAction(ExtType, MVT::i1) == TargetLowering::Promote)) { 1139 // Promote to a byte-sized load if not loading an integral number of 1140 // bytes. For example, promote EXTLOAD:i20 -> EXTLOAD:i24. 1141 unsigned NewWidth = SrcVT.getStoreSizeInBits(); 1142 EVT NVT = EVT::getIntegerVT(*DAG.getContext(), NewWidth); 1143 SDValue Ch; 1144 1145 // The extra bits are guaranteed to be zero, since we stored them that 1146 // way. A zext load from NVT thus automatically gives zext from SrcVT. 1147 1148 ISD::LoadExtType NewExtType = 1149 ExtType == ISD::ZEXTLOAD ? ISD::ZEXTLOAD : ISD::EXTLOAD; 1150 1151 Result = DAG.getExtLoad(NewExtType, dl, Node->getValueType(0), 1152 Tmp1, Tmp2, LD->getSrcValue(), SVOffset, 1153 NVT, isVolatile, Alignment); 1154 1155 Ch = Result.getValue(1); // The chain. 1156 1157 if (ExtType == ISD::SEXTLOAD) 1158 // Having the top bits zero doesn't help when sign extending. 1159 Result = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, 1160 Result.getValueType(), 1161 Result, DAG.getValueType(SrcVT)); 1162 else if (ExtType == ISD::ZEXTLOAD || NVT == Result.getValueType()) 1163 // All the top bits are guaranteed to be zero - inform the optimizers. 1164 Result = DAG.getNode(ISD::AssertZext, dl, 1165 Result.getValueType(), Result, 1166 DAG.getValueType(SrcVT)); 1167 1168 Tmp1 = LegalizeOp(Result); 1169 Tmp2 = LegalizeOp(Ch); 1170 } else if (SrcWidth & (SrcWidth - 1)) { 1171 // If not loading a power-of-2 number of bits, expand as two loads. 1172 assert(SrcVT.isExtended() && !SrcVT.isVector() && 1173 "Unsupported extload!"); 1174 unsigned RoundWidth = 1 << Log2_32(SrcWidth); 1175 assert(RoundWidth < SrcWidth); 1176 unsigned ExtraWidth = SrcWidth - RoundWidth; 1177 assert(ExtraWidth < RoundWidth); 1178 assert(!(RoundWidth % 8) && !(ExtraWidth % 8) && 1179 "Load size not an integral number of bytes!"); 1180 EVT RoundVT = EVT::getIntegerVT(*DAG.getContext(), RoundWidth); 1181 EVT ExtraVT = EVT::getIntegerVT(*DAG.getContext(), ExtraWidth); 1182 SDValue Lo, Hi, Ch; 1183 unsigned IncrementSize; 1184 1185 if (TLI.isLittleEndian()) { 1186 // EXTLOAD:i24 -> ZEXTLOAD:i16 | (shl EXTLOAD@+2:i8, 16) 1187 // Load the bottom RoundWidth bits. 1188 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, 1189 Node->getValueType(0), Tmp1, Tmp2, 1190 LD->getSrcValue(), SVOffset, RoundVT, isVolatile, 1191 Alignment); 1192 1193 // Load the remaining ExtraWidth bits. 1194 IncrementSize = RoundWidth / 8; 1195 Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2, 1196 DAG.getIntPtrConstant(IncrementSize)); 1197 Hi = DAG.getExtLoad(ExtType, dl, Node->getValueType(0), Tmp1, Tmp2, 1198 LD->getSrcValue(), SVOffset + IncrementSize, 1199 ExtraVT, isVolatile, 1200 MinAlign(Alignment, IncrementSize)); 1201 1202 // Build a factor node to remember that this load is independent of the 1203 // other one. 1204 Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1), 1205 Hi.getValue(1)); 1206 1207 // Move the top bits to the right place. 1208 Hi = DAG.getNode(ISD::SHL, dl, Hi.getValueType(), Hi, 1209 DAG.getConstant(RoundWidth, TLI.getShiftAmountTy())); 1210 1211 // Join the hi and lo parts. 1212 Result = DAG.getNode(ISD::OR, dl, Node->getValueType(0), Lo, Hi); 1213 } else { 1214 // Big endian - avoid unaligned loads. 1215 // EXTLOAD:i24 -> (shl EXTLOAD:i16, 8) | ZEXTLOAD@+2:i8 1216 // Load the top RoundWidth bits. 1217 Hi = DAG.getExtLoad(ExtType, dl, Node->getValueType(0), Tmp1, Tmp2, 1218 LD->getSrcValue(), SVOffset, RoundVT, isVolatile, 1219 Alignment); 1220 1221 // Load the remaining ExtraWidth bits. 1222 IncrementSize = RoundWidth / 8; 1223 Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2, 1224 DAG.getIntPtrConstant(IncrementSize)); 1225 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, 1226 Node->getValueType(0), Tmp1, Tmp2, 1227 LD->getSrcValue(), SVOffset + IncrementSize, 1228 ExtraVT, isVolatile, 1229 MinAlign(Alignment, IncrementSize)); 1230 1231 // Build a factor node to remember that this load is independent of the 1232 // other one. 1233 Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1), 1234 Hi.getValue(1)); 1235 1236 // Move the top bits to the right place. 1237 Hi = DAG.getNode(ISD::SHL, dl, Hi.getValueType(), Hi, 1238 DAG.getConstant(ExtraWidth, TLI.getShiftAmountTy())); 1239 1240 // Join the hi and lo parts. 1241 Result = DAG.getNode(ISD::OR, dl, Node->getValueType(0), Lo, Hi); 1242 } 1243 1244 Tmp1 = LegalizeOp(Result); 1245 Tmp2 = LegalizeOp(Ch); 1246 } else { 1247 switch (TLI.getLoadExtAction(ExtType, SrcVT)) { 1248 default: llvm_unreachable("This action is not supported yet!"); 1249 case TargetLowering::Custom: 1250 isCustom = true; 1251 // FALLTHROUGH 1252 case TargetLowering::Legal: 1253 Result = DAG.UpdateNodeOperands(Result, Tmp1, Tmp2, LD->getOffset()); 1254 Tmp1 = Result.getValue(0); 1255 Tmp2 = Result.getValue(1); 1256 1257 if (isCustom) { 1258 Tmp3 = TLI.LowerOperation(Result, DAG); 1259 if (Tmp3.getNode()) { 1260 Tmp1 = LegalizeOp(Tmp3); 1261 Tmp2 = LegalizeOp(Tmp3.getValue(1)); 1262 } 1263 } else { 1264 // If this is an unaligned load and the target doesn't support it, 1265 // expand it. 1266 if (!TLI.allowsUnalignedMemoryAccesses(LD->getMemoryVT())) { 1267 const Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext()); 1268 unsigned ABIAlignment = TLI.getTargetData()->getABITypeAlignment(Ty); 1269 if (LD->getAlignment() < ABIAlignment){ 1270 Result = ExpandUnalignedLoad(cast<LoadSDNode>(Result.getNode()), 1271 DAG, TLI); 1272 Tmp1 = Result.getOperand(0); 1273 Tmp2 = Result.getOperand(1); 1274 Tmp1 = LegalizeOp(Tmp1); 1275 Tmp2 = LegalizeOp(Tmp2); 1276 } 1277 } 1278 } 1279 break; 1280 case TargetLowering::Expand: 1281 // f64 = EXTLOAD f32 should expand to LOAD, FP_EXTEND 1282 // f128 = EXTLOAD {f32,f64} too 1283 if ((SrcVT == MVT::f32 && (Node->getValueType(0) == MVT::f64 || 1284 Node->getValueType(0) == MVT::f128)) || 1285 (SrcVT == MVT::f64 && Node->getValueType(0) == MVT::f128)) { 1286 SDValue Load = DAG.getLoad(SrcVT, dl, Tmp1, Tmp2, LD->getSrcValue(), 1287 LD->getSrcValueOffset(), 1288 LD->isVolatile(), LD->getAlignment()); 1289 Result = DAG.getNode(ISD::FP_EXTEND, dl, 1290 Node->getValueType(0), Load); 1291 Tmp1 = LegalizeOp(Result); // Relegalize new nodes. 1292 Tmp2 = LegalizeOp(Load.getValue(1)); 1293 break; 1294 } 1295 assert(ExtType != ISD::EXTLOAD &&"EXTLOAD should always be supported!"); 1296 // Turn the unsupported load into an EXTLOAD followed by an explicit 1297 // zero/sign extend inreg. 1298 Result = DAG.getExtLoad(ISD::EXTLOAD, dl, Node->getValueType(0), 1299 Tmp1, Tmp2, LD->getSrcValue(), 1300 LD->getSrcValueOffset(), SrcVT, 1301 LD->isVolatile(), LD->getAlignment()); 1302 SDValue ValRes; 1303 if (ExtType == ISD::SEXTLOAD) 1304 ValRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, 1305 Result.getValueType(), 1306 Result, DAG.getValueType(SrcVT)); 1307 else 1308 ValRes = DAG.getZeroExtendInReg(Result, dl, SrcVT); 1309 Tmp1 = LegalizeOp(ValRes); // Relegalize new nodes. 1310 Tmp2 = LegalizeOp(Result.getValue(1)); // Relegalize new nodes. 1311 break; 1312 } 1313 } 1314 1315 // Since loads produce two values, make sure to remember that we legalized 1316 // both of them. 1317 AddLegalizedOperand(SDValue(Node, 0), Tmp1); 1318 AddLegalizedOperand(SDValue(Node, 1), Tmp2); 1319 return Op.getResNo() ? Tmp2 : Tmp1; 1320 } 1321 } 1322 case ISD::STORE: { 1323 StoreSDNode *ST = cast<StoreSDNode>(Node); 1324 Tmp1 = LegalizeOp(ST->getChain()); // Legalize the chain. 1325 Tmp2 = LegalizeOp(ST->getBasePtr()); // Legalize the pointer. 1326 int SVOffset = ST->getSrcValueOffset(); 1327 unsigned Alignment = ST->getAlignment(); 1328 bool isVolatile = ST->isVolatile(); 1329 1330 if (!ST->isTruncatingStore()) { 1331 if (SDNode *OptStore = OptimizeFloatStore(ST).getNode()) { 1332 Result = SDValue(OptStore, 0); 1333 break; 1334 } 1335 1336 { 1337 Tmp3 = LegalizeOp(ST->getValue()); 1338 Result = DAG.UpdateNodeOperands(Result, Tmp1, Tmp3, Tmp2, 1339 ST->getOffset()); 1340 1341 EVT VT = Tmp3.getValueType(); 1342 switch (TLI.getOperationAction(ISD::STORE, VT)) { 1343 default: llvm_unreachable("This action is not supported yet!"); 1344 case TargetLowering::Legal: 1345 // If this is an unaligned store and the target doesn't support it, 1346 // expand it. 1347 if (!TLI.allowsUnalignedMemoryAccesses(ST->getMemoryVT())) { 1348 const Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext()); 1349 unsigned ABIAlignment = TLI.getTargetData()->getABITypeAlignment(Ty); 1350 if (ST->getAlignment() < ABIAlignment) 1351 Result = ExpandUnalignedStore(cast<StoreSDNode>(Result.getNode()), 1352 DAG, TLI); 1353 } 1354 break; 1355 case TargetLowering::Custom: 1356 Tmp1 = TLI.LowerOperation(Result, DAG); 1357 if (Tmp1.getNode()) Result = Tmp1; 1358 break; 1359 case TargetLowering::Promote: 1360 assert(VT.isVector() && "Unknown legal promote case!"); 1361 Tmp3 = DAG.getNode(ISD::BIT_CONVERT, dl, 1362 TLI.getTypeToPromoteTo(ISD::STORE, VT), Tmp3); 1363 Result = DAG.getStore(Tmp1, dl, Tmp3, Tmp2, 1364 ST->getSrcValue(), SVOffset, isVolatile, 1365 Alignment); 1366 break; 1367 } 1368 break; 1369 } 1370 } else { 1371 Tmp3 = LegalizeOp(ST->getValue()); 1372 1373 EVT StVT = ST->getMemoryVT(); 1374 unsigned StWidth = StVT.getSizeInBits(); 1375 1376 if (StWidth != StVT.getStoreSizeInBits()) { 1377 // Promote to a byte-sized store with upper bits zero if not 1378 // storing an integral number of bytes. For example, promote 1379 // TRUNCSTORE:i1 X -> TRUNCSTORE:i8 (and X, 1) 1380 EVT NVT = EVT::getIntegerVT(*DAG.getContext(), StVT.getStoreSizeInBits()); 1381 Tmp3 = DAG.getZeroExtendInReg(Tmp3, dl, StVT); 1382 Result = DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2, ST->getSrcValue(), 1383 SVOffset, NVT, isVolatile, Alignment); 1384 } else if (StWidth & (StWidth - 1)) { 1385 // If not storing a power-of-2 number of bits, expand as two stores. 1386 assert(StVT.isExtended() && !StVT.isVector() && 1387 "Unsupported truncstore!"); 1388 unsigned RoundWidth = 1 << Log2_32(StWidth); 1389 assert(RoundWidth < StWidth); 1390 unsigned ExtraWidth = StWidth - RoundWidth; 1391 assert(ExtraWidth < RoundWidth); 1392 assert(!(RoundWidth % 8) && !(ExtraWidth % 8) && 1393 "Store size not an integral number of bytes!"); 1394 EVT RoundVT = EVT::getIntegerVT(*DAG.getContext(), RoundWidth); 1395 EVT ExtraVT = EVT::getIntegerVT(*DAG.getContext(), ExtraWidth); 1396 SDValue Lo, Hi; 1397 unsigned IncrementSize; 1398 1399 if (TLI.isLittleEndian()) { 1400 // TRUNCSTORE:i24 X -> TRUNCSTORE:i16 X, TRUNCSTORE@+2:i8 (srl X, 16) 1401 // Store the bottom RoundWidth bits. 1402 Lo = DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2, ST->getSrcValue(), 1403 SVOffset, RoundVT, 1404 isVolatile, Alignment); 1405 1406 // Store the remaining ExtraWidth bits. 1407 IncrementSize = RoundWidth / 8; 1408 Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2, 1409 DAG.getIntPtrConstant(IncrementSize)); 1410 Hi = DAG.getNode(ISD::SRL, dl, Tmp3.getValueType(), Tmp3, 1411 DAG.getConstant(RoundWidth, TLI.getShiftAmountTy())); 1412 Hi = DAG.getTruncStore(Tmp1, dl, Hi, Tmp2, ST->getSrcValue(), 1413 SVOffset + IncrementSize, ExtraVT, isVolatile, 1414 MinAlign(Alignment, IncrementSize)); 1415 } else { 1416 // Big endian - avoid unaligned stores. 1417 // TRUNCSTORE:i24 X -> TRUNCSTORE:i16 (srl X, 8), TRUNCSTORE@+2:i8 X 1418 // Store the top RoundWidth bits. 1419 Hi = DAG.getNode(ISD::SRL, dl, Tmp3.getValueType(), Tmp3, 1420 DAG.getConstant(ExtraWidth, TLI.getShiftAmountTy())); 1421 Hi = DAG.getTruncStore(Tmp1, dl, Hi, Tmp2, ST->getSrcValue(), 1422 SVOffset, RoundVT, isVolatile, Alignment); 1423 1424 // Store the remaining ExtraWidth bits. 1425 IncrementSize = RoundWidth / 8; 1426 Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2, 1427 DAG.getIntPtrConstant(IncrementSize)); 1428 Lo = DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2, ST->getSrcValue(), 1429 SVOffset + IncrementSize, ExtraVT, isVolatile, 1430 MinAlign(Alignment, IncrementSize)); 1431 } 1432 1433 // The order of the stores doesn't matter. 1434 Result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo, Hi); 1435 } else { 1436 if (Tmp1 != ST->getChain() || Tmp3 != ST->getValue() || 1437 Tmp2 != ST->getBasePtr()) 1438 Result = DAG.UpdateNodeOperands(Result, Tmp1, Tmp3, Tmp2, 1439 ST->getOffset()); 1440 1441 switch (TLI.getTruncStoreAction(ST->getValue().getValueType(), StVT)) { 1442 default: llvm_unreachable("This action is not supported yet!"); 1443 case TargetLowering::Legal: 1444 // If this is an unaligned store and the target doesn't support it, 1445 // expand it. 1446 if (!TLI.allowsUnalignedMemoryAccesses(ST->getMemoryVT())) { 1447 const Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext()); 1448 unsigned ABIAlignment = TLI.getTargetData()->getABITypeAlignment(Ty); 1449 if (ST->getAlignment() < ABIAlignment) 1450 Result = ExpandUnalignedStore(cast<StoreSDNode>(Result.getNode()), 1451 DAG, TLI); 1452 } 1453 break; 1454 case TargetLowering::Custom: 1455 Result = TLI.LowerOperation(Result, DAG); 1456 break; 1457 case Expand: 1458 // TRUNCSTORE:i16 i32 -> STORE i16 1459 assert(isTypeLegal(StVT) && "Do not know how to expand this store!"); 1460 Tmp3 = DAG.getNode(ISD::TRUNCATE, dl, StVT, Tmp3); 1461 Result = DAG.getStore(Tmp1, dl, Tmp3, Tmp2, ST->getSrcValue(), 1462 SVOffset, isVolatile, Alignment); 1463 break; 1464 } 1465 } 1466 } 1467 break; 1468 } 1469 } 1470 assert(Result.getValueType() == Op.getValueType() && 1471 "Bad legalization!"); 1472 1473 // Make sure that the generated code is itself legal. 1474 if (Result != Op) 1475 Result = LegalizeOp(Result); 1476 1477 // Note that LegalizeOp may be reentered even from single-use nodes, which 1478 // means that we always must cache transformed nodes. 1479 AddLegalizedOperand(Op, Result); 1480 return Result; 1481} 1482 1483SDValue SelectionDAGLegalize::ExpandExtractFromVectorThroughStack(SDValue Op) { 1484 SDValue Vec = Op.getOperand(0); 1485 SDValue Idx = Op.getOperand(1); 1486 DebugLoc dl = Op.getDebugLoc(); 1487 // Store the value to a temporary stack slot, then LOAD the returned part. 1488 SDValue StackPtr = DAG.CreateStackTemporary(Vec.getValueType()); 1489 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, NULL, 0); 1490 1491 // Add the offset to the index. 1492 unsigned EltSize = 1493 Vec.getValueType().getVectorElementType().getSizeInBits()/8; 1494 Idx = DAG.getNode(ISD::MUL, dl, Idx.getValueType(), Idx, 1495 DAG.getConstant(EltSize, Idx.getValueType())); 1496 1497 if (Idx.getValueType().bitsGT(TLI.getPointerTy())) 1498 Idx = DAG.getNode(ISD::TRUNCATE, dl, TLI.getPointerTy(), Idx); 1499 else 1500 Idx = DAG.getNode(ISD::ZERO_EXTEND, dl, TLI.getPointerTy(), Idx); 1501 1502 StackPtr = DAG.getNode(ISD::ADD, dl, Idx.getValueType(), Idx, StackPtr); 1503 1504 if (Op.getValueType().isVector()) 1505 return DAG.getLoad(Op.getValueType(), dl, Ch, StackPtr, NULL, 0); 1506 else 1507 return DAG.getExtLoad(ISD::EXTLOAD, dl, Op.getValueType(), Ch, StackPtr, 1508 NULL, 0, Vec.getValueType().getVectorElementType()); 1509} 1510 1511SDValue SelectionDAGLegalize::ExpandVectorBuildThroughStack(SDNode* Node) { 1512 // We can't handle this case efficiently. Allocate a sufficiently 1513 // aligned object on the stack, store each element into it, then load 1514 // the result as a vector. 1515 // Create the stack frame object. 1516 EVT VT = Node->getValueType(0); 1517 EVT OpVT = Node->getOperand(0).getValueType(); 1518 DebugLoc dl = Node->getDebugLoc(); 1519 SDValue FIPtr = DAG.CreateStackTemporary(VT); 1520 int FI = cast<FrameIndexSDNode>(FIPtr.getNode())->getIndex(); 1521 const Value *SV = PseudoSourceValue::getFixedStack(FI); 1522 1523 // Emit a store of each element to the stack slot. 1524 SmallVector<SDValue, 8> Stores; 1525 unsigned TypeByteSize = OpVT.getSizeInBits() / 8; 1526 // Store (in the right endianness) the elements to memory. 1527 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) { 1528 // Ignore undef elements. 1529 if (Node->getOperand(i).getOpcode() == ISD::UNDEF) continue; 1530 1531 unsigned Offset = TypeByteSize*i; 1532 1533 SDValue Idx = DAG.getConstant(Offset, FIPtr.getValueType()); 1534 Idx = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr, Idx); 1535 1536 Stores.push_back(DAG.getStore(DAG.getEntryNode(), dl, Node->getOperand(i), 1537 Idx, SV, Offset)); 1538 } 1539 1540 SDValue StoreChain; 1541 if (!Stores.empty()) // Not all undef elements? 1542 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 1543 &Stores[0], Stores.size()); 1544 else 1545 StoreChain = DAG.getEntryNode(); 1546 1547 // Result is a load from the stack slot. 1548 return DAG.getLoad(VT, dl, StoreChain, FIPtr, SV, 0); 1549} 1550 1551SDValue SelectionDAGLegalize::ExpandFCOPYSIGN(SDNode* Node) { 1552 DebugLoc dl = Node->getDebugLoc(); 1553 SDValue Tmp1 = Node->getOperand(0); 1554 SDValue Tmp2 = Node->getOperand(1); 1555 assert((Tmp2.getValueType() == MVT::f32 || 1556 Tmp2.getValueType() == MVT::f64) && 1557 "Ugly special-cased code!"); 1558 // Get the sign bit of the RHS. 1559 SDValue SignBit; 1560 EVT IVT = Tmp2.getValueType() == MVT::f64 ? MVT::i64 : MVT::i32; 1561 if (isTypeLegal(IVT)) { 1562 SignBit = DAG.getNode(ISD::BIT_CONVERT, dl, IVT, Tmp2); 1563 } else { 1564 assert(isTypeLegal(TLI.getPointerTy()) && 1565 (TLI.getPointerTy() == MVT::i32 || 1566 TLI.getPointerTy() == MVT::i64) && 1567 "Legal type for load?!"); 1568 SDValue StackPtr = DAG.CreateStackTemporary(Tmp2.getValueType()); 1569 SDValue StorePtr = StackPtr, LoadPtr = StackPtr; 1570 SDValue Ch = 1571 DAG.getStore(DAG.getEntryNode(), dl, Tmp2, StorePtr, NULL, 0); 1572 if (Tmp2.getValueType() == MVT::f64 && TLI.isLittleEndian()) 1573 LoadPtr = DAG.getNode(ISD::ADD, dl, StackPtr.getValueType(), 1574 LoadPtr, DAG.getIntPtrConstant(4)); 1575 SignBit = DAG.getExtLoad(ISD::SEXTLOAD, dl, TLI.getPointerTy(), 1576 Ch, LoadPtr, NULL, 0, MVT::i32); 1577 } 1578 SignBit = 1579 DAG.getSetCC(dl, TLI.getSetCCResultType(SignBit.getValueType()), 1580 SignBit, DAG.getConstant(0, SignBit.getValueType()), 1581 ISD::SETLT); 1582 // Get the absolute value of the result. 1583 SDValue AbsVal = DAG.getNode(ISD::FABS, dl, Tmp1.getValueType(), Tmp1); 1584 // Select between the nabs and abs value based on the sign bit of 1585 // the input. 1586 return DAG.getNode(ISD::SELECT, dl, AbsVal.getValueType(), SignBit, 1587 DAG.getNode(ISD::FNEG, dl, AbsVal.getValueType(), AbsVal), 1588 AbsVal); 1589} 1590 1591SDValue SelectionDAGLegalize::ExpandDBG_STOPPOINT(SDNode* Node) { 1592 DebugLoc dl = Node->getDebugLoc(); 1593 DwarfWriter *DW = DAG.getDwarfWriter(); 1594 bool useDEBUG_LOC = TLI.isOperationLegalOrCustom(ISD::DEBUG_LOC, 1595 MVT::Other); 1596 bool useLABEL = TLI.isOperationLegalOrCustom(ISD::DBG_LABEL, MVT::Other); 1597 1598 const DbgStopPointSDNode *DSP = cast<DbgStopPointSDNode>(Node); 1599 MDNode *CU_Node = DSP->getCompileUnit(); 1600 if (DW && (useDEBUG_LOC || useLABEL)) { 1601 1602 unsigned Line = DSP->getLine(); 1603 unsigned Col = DSP->getColumn(); 1604 1605 if (OptLevel == CodeGenOpt::None) { 1606 // A bit self-referential to have DebugLoc on Debug_Loc nodes, but it 1607 // won't hurt anything. 1608 if (useDEBUG_LOC) { 1609 return DAG.getNode(ISD::DEBUG_LOC, dl, MVT::Other, Node->getOperand(0), 1610 DAG.getConstant(Line, MVT::i32), 1611 DAG.getConstant(Col, MVT::i32), 1612 DAG.getSrcValue(CU_Node)); 1613 } else { 1614 unsigned ID = DW->RecordSourceLine(Line, Col, CU_Node); 1615 return DAG.getLabel(ISD::DBG_LABEL, dl, Node->getOperand(0), ID); 1616 } 1617 } 1618 } 1619 return Node->getOperand(0); 1620} 1621 1622void SelectionDAGLegalize::ExpandDYNAMIC_STACKALLOC(SDNode* Node, 1623 SmallVectorImpl<SDValue> &Results) { 1624 unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore(); 1625 assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and" 1626 " not tell us which reg is the stack pointer!"); 1627 DebugLoc dl = Node->getDebugLoc(); 1628 EVT VT = Node->getValueType(0); 1629 SDValue Tmp1 = SDValue(Node, 0); 1630 SDValue Tmp2 = SDValue(Node, 1); 1631 SDValue Tmp3 = Node->getOperand(2); 1632 SDValue Chain = Tmp1.getOperand(0); 1633 1634 // Chain the dynamic stack allocation so that it doesn't modify the stack 1635 // pointer when other instructions are using the stack. 1636 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(0, true)); 1637 1638 SDValue Size = Tmp2.getOperand(1); 1639 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT); 1640 Chain = SP.getValue(1); 1641 unsigned Align = cast<ConstantSDNode>(Tmp3)->getZExtValue(); 1642 unsigned StackAlign = 1643 TLI.getTargetMachine().getFrameInfo()->getStackAlignment(); 1644 if (Align > StackAlign) 1645 SP = DAG.getNode(ISD::AND, dl, VT, SP, 1646 DAG.getConstant(-(uint64_t)Align, VT)); 1647 Tmp1 = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value 1648 Chain = DAG.getCopyToReg(Chain, dl, SPReg, Tmp1); // Output chain 1649 1650 Tmp2 = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, true), 1651 DAG.getIntPtrConstant(0, true), SDValue()); 1652 1653 Results.push_back(Tmp1); 1654 Results.push_back(Tmp2); 1655} 1656 1657/// LegalizeSetCCCondCode - Legalize a SETCC with given LHS and RHS and 1658/// condition code CC on the current target. This routine expands SETCC with 1659/// illegal condition code into AND / OR of multiple SETCC values. 1660void SelectionDAGLegalize::LegalizeSetCCCondCode(EVT VT, 1661 SDValue &LHS, SDValue &RHS, 1662 SDValue &CC, 1663 DebugLoc dl) { 1664 EVT OpVT = LHS.getValueType(); 1665 ISD::CondCode CCCode = cast<CondCodeSDNode>(CC)->get(); 1666 switch (TLI.getCondCodeAction(CCCode, OpVT)) { 1667 default: llvm_unreachable("Unknown condition code action!"); 1668 case TargetLowering::Legal: 1669 // Nothing to do. 1670 break; 1671 case TargetLowering::Expand: { 1672 ISD::CondCode CC1 = ISD::SETCC_INVALID, CC2 = ISD::SETCC_INVALID; 1673 unsigned Opc = 0; 1674 switch (CCCode) { 1675 default: llvm_unreachable("Don't know how to expand this condition!"); 1676 case ISD::SETOEQ: CC1 = ISD::SETEQ; CC2 = ISD::SETO; Opc = ISD::AND; break; 1677 case ISD::SETOGT: CC1 = ISD::SETGT; CC2 = ISD::SETO; Opc = ISD::AND; break; 1678 case ISD::SETOGE: CC1 = ISD::SETGE; CC2 = ISD::SETO; Opc = ISD::AND; break; 1679 case ISD::SETOLT: CC1 = ISD::SETLT; CC2 = ISD::SETO; Opc = ISD::AND; break; 1680 case ISD::SETOLE: CC1 = ISD::SETLE; CC2 = ISD::SETO; Opc = ISD::AND; break; 1681 case ISD::SETONE: CC1 = ISD::SETNE; CC2 = ISD::SETO; Opc = ISD::AND; break; 1682 case ISD::SETUEQ: CC1 = ISD::SETEQ; CC2 = ISD::SETUO; Opc = ISD::OR; break; 1683 case ISD::SETUGT: CC1 = ISD::SETGT; CC2 = ISD::SETUO; Opc = ISD::OR; break; 1684 case ISD::SETUGE: CC1 = ISD::SETGE; CC2 = ISD::SETUO; Opc = ISD::OR; break; 1685 case ISD::SETULT: CC1 = ISD::SETLT; CC2 = ISD::SETUO; Opc = ISD::OR; break; 1686 case ISD::SETULE: CC1 = ISD::SETLE; CC2 = ISD::SETUO; Opc = ISD::OR; break; 1687 case ISD::SETUNE: CC1 = ISD::SETNE; CC2 = ISD::SETUO; Opc = ISD::OR; break; 1688 // FIXME: Implement more expansions. 1689 } 1690 1691 SDValue SetCC1 = DAG.getSetCC(dl, VT, LHS, RHS, CC1); 1692 SDValue SetCC2 = DAG.getSetCC(dl, VT, LHS, RHS, CC2); 1693 LHS = DAG.getNode(Opc, dl, VT, SetCC1, SetCC2); 1694 RHS = SDValue(); 1695 CC = SDValue(); 1696 break; 1697 } 1698 } 1699} 1700 1701/// EmitStackConvert - Emit a store/load combination to the stack. This stores 1702/// SrcOp to a stack slot of type SlotVT, truncating it if needed. It then does 1703/// a load from the stack slot to DestVT, extending it if needed. 1704/// The resultant code need not be legal. 1705SDValue SelectionDAGLegalize::EmitStackConvert(SDValue SrcOp, 1706 EVT SlotVT, 1707 EVT DestVT, 1708 DebugLoc dl) { 1709 // Create the stack frame object. 1710 unsigned SrcAlign = 1711 TLI.getTargetData()->getPrefTypeAlignment(SrcOp.getValueType(). 1712 getTypeForEVT(*DAG.getContext())); 1713 SDValue FIPtr = DAG.CreateStackTemporary(SlotVT, SrcAlign); 1714 1715 FrameIndexSDNode *StackPtrFI = cast<FrameIndexSDNode>(FIPtr); 1716 int SPFI = StackPtrFI->getIndex(); 1717 const Value *SV = PseudoSourceValue::getFixedStack(SPFI); 1718 1719 unsigned SrcSize = SrcOp.getValueType().getSizeInBits(); 1720 unsigned SlotSize = SlotVT.getSizeInBits(); 1721 unsigned DestSize = DestVT.getSizeInBits(); 1722 unsigned DestAlign = 1723 TLI.getTargetData()->getPrefTypeAlignment(DestVT.getTypeForEVT(*DAG.getContext())); 1724 1725 // Emit a store to the stack slot. Use a truncstore if the input value is 1726 // later than DestVT. 1727 SDValue Store; 1728 1729 if (SrcSize > SlotSize) 1730 Store = DAG.getTruncStore(DAG.getEntryNode(), dl, SrcOp, FIPtr, 1731 SV, 0, SlotVT, false, SrcAlign); 1732 else { 1733 assert(SrcSize == SlotSize && "Invalid store"); 1734 Store = DAG.getStore(DAG.getEntryNode(), dl, SrcOp, FIPtr, 1735 SV, 0, false, SrcAlign); 1736 } 1737 1738 // Result is a load from the stack slot. 1739 if (SlotSize == DestSize) 1740 return DAG.getLoad(DestVT, dl, Store, FIPtr, SV, 0, false, DestAlign); 1741 1742 assert(SlotSize < DestSize && "Unknown extension!"); 1743 return DAG.getExtLoad(ISD::EXTLOAD, dl, DestVT, Store, FIPtr, SV, 0, SlotVT, 1744 false, DestAlign); 1745} 1746 1747SDValue SelectionDAGLegalize::ExpandSCALAR_TO_VECTOR(SDNode *Node) { 1748 DebugLoc dl = Node->getDebugLoc(); 1749 // Create a vector sized/aligned stack slot, store the value to element #0, 1750 // then load the whole vector back out. 1751 SDValue StackPtr = DAG.CreateStackTemporary(Node->getValueType(0)); 1752 1753 FrameIndexSDNode *StackPtrFI = cast<FrameIndexSDNode>(StackPtr); 1754 int SPFI = StackPtrFI->getIndex(); 1755 1756 SDValue Ch = DAG.getTruncStore(DAG.getEntryNode(), dl, Node->getOperand(0), 1757 StackPtr, 1758 PseudoSourceValue::getFixedStack(SPFI), 0, 1759 Node->getValueType(0).getVectorElementType()); 1760 return DAG.getLoad(Node->getValueType(0), dl, Ch, StackPtr, 1761 PseudoSourceValue::getFixedStack(SPFI), 0); 1762} 1763 1764 1765/// ExpandBUILD_VECTOR - Expand a BUILD_VECTOR node on targets that don't 1766/// support the operation, but do support the resultant vector type. 1767SDValue SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) { 1768 unsigned NumElems = Node->getNumOperands(); 1769 SDValue Value1, Value2; 1770 DebugLoc dl = Node->getDebugLoc(); 1771 EVT VT = Node->getValueType(0); 1772 EVT OpVT = Node->getOperand(0).getValueType(); 1773 EVT EltVT = VT.getVectorElementType(); 1774 1775 // If the only non-undef value is the low element, turn this into a 1776 // SCALAR_TO_VECTOR node. If this is { X, X, X, X }, determine X. 1777 bool isOnlyLowElement = true; 1778 bool MoreThanTwoValues = false; 1779 bool isConstant = true; 1780 for (unsigned i = 0; i < NumElems; ++i) { 1781 SDValue V = Node->getOperand(i); 1782 if (V.getOpcode() == ISD::UNDEF) 1783 continue; 1784 if (i > 0) 1785 isOnlyLowElement = false; 1786 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V)) 1787 isConstant = false; 1788 1789 if (!Value1.getNode()) { 1790 Value1 = V; 1791 } else if (!Value2.getNode()) { 1792 if (V != Value1) 1793 Value2 = V; 1794 } else if (V != Value1 && V != Value2) { 1795 MoreThanTwoValues = true; 1796 } 1797 } 1798 1799 if (!Value1.getNode()) 1800 return DAG.getUNDEF(VT); 1801 1802 if (isOnlyLowElement) 1803 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Node->getOperand(0)); 1804 1805 // If all elements are constants, create a load from the constant pool. 1806 if (isConstant) { 1807 std::vector<Constant*> CV; 1808 for (unsigned i = 0, e = NumElems; i != e; ++i) { 1809 if (ConstantFPSDNode *V = 1810 dyn_cast<ConstantFPSDNode>(Node->getOperand(i))) { 1811 CV.push_back(const_cast<ConstantFP *>(V->getConstantFPValue())); 1812 } else if (ConstantSDNode *V = 1813 dyn_cast<ConstantSDNode>(Node->getOperand(i))) { 1814 CV.push_back(const_cast<ConstantInt *>(V->getConstantIntValue())); 1815 } else { 1816 assert(Node->getOperand(i).getOpcode() == ISD::UNDEF); 1817 const Type *OpNTy = OpVT.getTypeForEVT(*DAG.getContext()); 1818 CV.push_back(UndefValue::get(OpNTy)); 1819 } 1820 } 1821 Constant *CP = ConstantVector::get(CV); 1822 SDValue CPIdx = DAG.getConstantPool(CP, TLI.getPointerTy()); 1823 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment(); 1824 return DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, 1825 PseudoSourceValue::getConstantPool(), 0, 1826 false, Alignment); 1827 } 1828 1829 if (!MoreThanTwoValues) { 1830 SmallVector<int, 8> ShuffleVec(NumElems, -1); 1831 for (unsigned i = 0; i < NumElems; ++i) { 1832 SDValue V = Node->getOperand(i); 1833 if (V.getOpcode() == ISD::UNDEF) 1834 continue; 1835 ShuffleVec[i] = V == Value1 ? 0 : NumElems; 1836 } 1837 if (TLI.isShuffleMaskLegal(ShuffleVec, Node->getValueType(0))) { 1838 // Get the splatted value into the low element of a vector register. 1839 SDValue Vec1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value1); 1840 SDValue Vec2; 1841 if (Value2.getNode()) 1842 Vec2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value2); 1843 else 1844 Vec2 = DAG.getUNDEF(VT); 1845 1846 // Return shuffle(LowValVec, undef, <0,0,0,0>) 1847 return DAG.getVectorShuffle(VT, dl, Vec1, Vec2, ShuffleVec.data()); 1848 } 1849 } 1850 1851 // Otherwise, we can't handle this case efficiently. 1852 return ExpandVectorBuildThroughStack(Node); 1853} 1854 1855// ExpandLibCall - Expand a node into a call to a libcall. If the result value 1856// does not fit into a register, return the lo part and set the hi part to the 1857// by-reg argument. If it does fit into a single register, return the result 1858// and leave the Hi part unset. 1859SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, SDNode *Node, 1860 bool isSigned) { 1861 assert(!IsLegalizingCall && "Cannot overlap legalization of calls!"); 1862 // The input chain to this libcall is the entry node of the function. 1863 // Legalizing the call will automatically add the previous call to the 1864 // dependence. 1865 SDValue InChain = DAG.getEntryNode(); 1866 1867 TargetLowering::ArgListTy Args; 1868 TargetLowering::ArgListEntry Entry; 1869 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) { 1870 EVT ArgVT = Node->getOperand(i).getValueType(); 1871 const Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); 1872 Entry.Node = Node->getOperand(i); Entry.Ty = ArgTy; 1873 Entry.isSExt = isSigned; 1874 Entry.isZExt = !isSigned; 1875 Args.push_back(Entry); 1876 } 1877 SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC), 1878 TLI.getPointerTy()); 1879 1880 // Splice the libcall in wherever FindInputOutputChains tells us to. 1881 const Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext()); 1882 std::pair<SDValue, SDValue> CallInfo = 1883 TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false, 1884 0, TLI.getLibcallCallingConv(LC), false, 1885 /*isReturnValueUsed=*/true, 1886 Callee, Args, DAG, 1887 Node->getDebugLoc()); 1888 1889 // Legalize the call sequence, starting with the chain. This will advance 1890 // the LastCALLSEQ_END to the legalized version of the CALLSEQ_END node that 1891 // was added by LowerCallTo (guaranteeing proper serialization of calls). 1892 LegalizeOp(CallInfo.second); 1893 return CallInfo.first; 1894} 1895 1896SDValue SelectionDAGLegalize::ExpandFPLibCall(SDNode* Node, 1897 RTLIB::Libcall Call_F32, 1898 RTLIB::Libcall Call_F64, 1899 RTLIB::Libcall Call_F80, 1900 RTLIB::Libcall Call_PPCF128) { 1901 RTLIB::Libcall LC; 1902 switch (Node->getValueType(0).getSimpleVT().SimpleTy) { 1903 default: llvm_unreachable("Unexpected request for libcall!"); 1904 case MVT::f32: LC = Call_F32; break; 1905 case MVT::f64: LC = Call_F64; break; 1906 case MVT::f80: LC = Call_F80; break; 1907 case MVT::ppcf128: LC = Call_PPCF128; break; 1908 } 1909 return ExpandLibCall(LC, Node, false); 1910} 1911 1912SDValue SelectionDAGLegalize::ExpandIntLibCall(SDNode* Node, bool isSigned, 1913 RTLIB::Libcall Call_I16, 1914 RTLIB::Libcall Call_I32, 1915 RTLIB::Libcall Call_I64, 1916 RTLIB::Libcall Call_I128) { 1917 RTLIB::Libcall LC; 1918 switch (Node->getValueType(0).getSimpleVT().SimpleTy) { 1919 default: llvm_unreachable("Unexpected request for libcall!"); 1920 case MVT::i16: LC = Call_I16; break; 1921 case MVT::i32: LC = Call_I32; break; 1922 case MVT::i64: LC = Call_I64; break; 1923 case MVT::i128: LC = Call_I128; break; 1924 } 1925 return ExpandLibCall(LC, Node, isSigned); 1926} 1927 1928/// ExpandLegalINT_TO_FP - This function is responsible for legalizing a 1929/// INT_TO_FP operation of the specified operand when the target requests that 1930/// we expand it. At this point, we know that the result and operand types are 1931/// legal for the target. 1932SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned, 1933 SDValue Op0, 1934 EVT DestVT, 1935 DebugLoc dl) { 1936 if (Op0.getValueType() == MVT::i32) { 1937 // simple 32-bit [signed|unsigned] integer to float/double expansion 1938 1939 // Get the stack frame index of a 8 byte buffer. 1940 SDValue StackSlot = DAG.CreateStackTemporary(MVT::f64); 1941 1942 // word offset constant for Hi/Lo address computation 1943 SDValue WordOff = DAG.getConstant(sizeof(int), TLI.getPointerTy()); 1944 // set up Hi and Lo (into buffer) address based on endian 1945 SDValue Hi = StackSlot; 1946 SDValue Lo = DAG.getNode(ISD::ADD, dl, 1947 TLI.getPointerTy(), StackSlot, WordOff); 1948 if (TLI.isLittleEndian()) 1949 std::swap(Hi, Lo); 1950 1951 // if signed map to unsigned space 1952 SDValue Op0Mapped; 1953 if (isSigned) { 1954 // constant used to invert sign bit (signed to unsigned mapping) 1955 SDValue SignBit = DAG.getConstant(0x80000000u, MVT::i32); 1956 Op0Mapped = DAG.getNode(ISD::XOR, dl, MVT::i32, Op0, SignBit); 1957 } else { 1958 Op0Mapped = Op0; 1959 } 1960 // store the lo of the constructed double - based on integer input 1961 SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, 1962 Op0Mapped, Lo, NULL, 0); 1963 // initial hi portion of constructed double 1964 SDValue InitialHi = DAG.getConstant(0x43300000u, MVT::i32); 1965 // store the hi of the constructed double - biased exponent 1966 SDValue Store2=DAG.getStore(Store1, dl, InitialHi, Hi, NULL, 0); 1967 // load the constructed double 1968 SDValue Load = DAG.getLoad(MVT::f64, dl, Store2, StackSlot, NULL, 0); 1969 // FP constant to bias correct the final result 1970 SDValue Bias = DAG.getConstantFP(isSigned ? 1971 BitsToDouble(0x4330000080000000ULL) : 1972 BitsToDouble(0x4330000000000000ULL), 1973 MVT::f64); 1974 // subtract the bias 1975 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Load, Bias); 1976 // final result 1977 SDValue Result; 1978 // handle final rounding 1979 if (DestVT == MVT::f64) { 1980 // do nothing 1981 Result = Sub; 1982 } else if (DestVT.bitsLT(MVT::f64)) { 1983 Result = DAG.getNode(ISD::FP_ROUND, dl, DestVT, Sub, 1984 DAG.getIntPtrConstant(0)); 1985 } else if (DestVT.bitsGT(MVT::f64)) { 1986 Result = DAG.getNode(ISD::FP_EXTEND, dl, DestVT, Sub); 1987 } 1988 return Result; 1989 } 1990 assert(!isSigned && "Legalize cannot Expand SINT_TO_FP for i64 yet"); 1991 SDValue Tmp1 = DAG.getNode(ISD::SINT_TO_FP, dl, DestVT, Op0); 1992 1993 SDValue SignSet = DAG.getSetCC(dl, TLI.getSetCCResultType(Op0.getValueType()), 1994 Op0, DAG.getConstant(0, Op0.getValueType()), 1995 ISD::SETLT); 1996 SDValue Zero = DAG.getIntPtrConstant(0), Four = DAG.getIntPtrConstant(4); 1997 SDValue CstOffset = DAG.getNode(ISD::SELECT, dl, Zero.getValueType(), 1998 SignSet, Four, Zero); 1999 2000 // If the sign bit of the integer is set, the large number will be treated 2001 // as a negative number. To counteract this, the dynamic code adds an 2002 // offset depending on the data type. 2003 uint64_t FF; 2004 switch (Op0.getValueType().getSimpleVT().SimpleTy) { 2005 default: llvm_unreachable("Unsupported integer type!"); 2006 case MVT::i8 : FF = 0x43800000ULL; break; // 2^8 (as a float) 2007 case MVT::i16: FF = 0x47800000ULL; break; // 2^16 (as a float) 2008 case MVT::i32: FF = 0x4F800000ULL; break; // 2^32 (as a float) 2009 case MVT::i64: FF = 0x5F800000ULL; break; // 2^64 (as a float) 2010 } 2011 if (TLI.isLittleEndian()) FF <<= 32; 2012 Constant *FudgeFactor = ConstantInt::get( 2013 Type::getInt64Ty(*DAG.getContext()), FF); 2014 2015 SDValue CPIdx = DAG.getConstantPool(FudgeFactor, TLI.getPointerTy()); 2016 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment(); 2017 CPIdx = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), CPIdx, CstOffset); 2018 Alignment = std::min(Alignment, 4u); 2019 SDValue FudgeInReg; 2020 if (DestVT == MVT::f32) 2021 FudgeInReg = DAG.getLoad(MVT::f32, dl, DAG.getEntryNode(), CPIdx, 2022 PseudoSourceValue::getConstantPool(), 0, 2023 false, Alignment); 2024 else { 2025 FudgeInReg = 2026 LegalizeOp(DAG.getExtLoad(ISD::EXTLOAD, dl, DestVT, 2027 DAG.getEntryNode(), CPIdx, 2028 PseudoSourceValue::getConstantPool(), 0, 2029 MVT::f32, false, Alignment)); 2030 } 2031 2032 return DAG.getNode(ISD::FADD, dl, DestVT, Tmp1, FudgeInReg); 2033} 2034 2035/// PromoteLegalINT_TO_FP - This function is responsible for legalizing a 2036/// *INT_TO_FP operation of the specified operand when the target requests that 2037/// we promote it. At this point, we know that the result and operand types are 2038/// legal for the target, and that there is a legal UINT_TO_FP or SINT_TO_FP 2039/// operation that takes a larger input. 2040SDValue SelectionDAGLegalize::PromoteLegalINT_TO_FP(SDValue LegalOp, 2041 EVT DestVT, 2042 bool isSigned, 2043 DebugLoc dl) { 2044 // First step, figure out the appropriate *INT_TO_FP operation to use. 2045 EVT NewInTy = LegalOp.getValueType(); 2046 2047 unsigned OpToUse = 0; 2048 2049 // Scan for the appropriate larger type to use. 2050 while (1) { 2051 NewInTy = (MVT::SimpleValueType)(NewInTy.getSimpleVT().SimpleTy+1); 2052 assert(NewInTy.isInteger() && "Ran out of possibilities!"); 2053 2054 // If the target supports SINT_TO_FP of this type, use it. 2055 if (TLI.isOperationLegalOrCustom(ISD::SINT_TO_FP, NewInTy)) { 2056 OpToUse = ISD::SINT_TO_FP; 2057 break; 2058 } 2059 if (isSigned) continue; 2060 2061 // If the target supports UINT_TO_FP of this type, use it. 2062 if (TLI.isOperationLegalOrCustom(ISD::UINT_TO_FP, NewInTy)) { 2063 OpToUse = ISD::UINT_TO_FP; 2064 break; 2065 } 2066 2067 // Otherwise, try a larger type. 2068 } 2069 2070 // Okay, we found the operation and type to use. Zero extend our input to the 2071 // desired type then run the operation on it. 2072 return DAG.getNode(OpToUse, dl, DestVT, 2073 DAG.getNode(isSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, 2074 dl, NewInTy, LegalOp)); 2075} 2076 2077/// PromoteLegalFP_TO_INT - This function is responsible for legalizing a 2078/// FP_TO_*INT operation of the specified operand when the target requests that 2079/// we promote it. At this point, we know that the result and operand types are 2080/// legal for the target, and that there is a legal FP_TO_UINT or FP_TO_SINT 2081/// operation that returns a larger result. 2082SDValue SelectionDAGLegalize::PromoteLegalFP_TO_INT(SDValue LegalOp, 2083 EVT DestVT, 2084 bool isSigned, 2085 DebugLoc dl) { 2086 // First step, figure out the appropriate FP_TO*INT operation to use. 2087 EVT NewOutTy = DestVT; 2088 2089 unsigned OpToUse = 0; 2090 2091 // Scan for the appropriate larger type to use. 2092 while (1) { 2093 NewOutTy = (MVT::SimpleValueType)(NewOutTy.getSimpleVT().SimpleTy+1); 2094 assert(NewOutTy.isInteger() && "Ran out of possibilities!"); 2095 2096 if (TLI.isOperationLegalOrCustom(ISD::FP_TO_SINT, NewOutTy)) { 2097 OpToUse = ISD::FP_TO_SINT; 2098 break; 2099 } 2100 2101 if (TLI.isOperationLegalOrCustom(ISD::FP_TO_UINT, NewOutTy)) { 2102 OpToUse = ISD::FP_TO_UINT; 2103 break; 2104 } 2105 2106 // Otherwise, try a larger type. 2107 } 2108 2109 2110 // Okay, we found the operation and type to use. 2111 SDValue Operation = DAG.getNode(OpToUse, dl, NewOutTy, LegalOp); 2112 2113 // Truncate the result of the extended FP_TO_*INT operation to the desired 2114 // size. 2115 return DAG.getNode(ISD::TRUNCATE, dl, DestVT, Operation); 2116} 2117 2118/// ExpandBSWAP - Open code the operations for BSWAP of the specified operation. 2119/// 2120SDValue SelectionDAGLegalize::ExpandBSWAP(SDValue Op, DebugLoc dl) { 2121 EVT VT = Op.getValueType(); 2122 EVT SHVT = TLI.getShiftAmountTy(); 2123 SDValue Tmp1, Tmp2, Tmp3, Tmp4, Tmp5, Tmp6, Tmp7, Tmp8; 2124 switch (VT.getSimpleVT().SimpleTy) { 2125 default: llvm_unreachable("Unhandled Expand type in BSWAP!"); 2126 case MVT::i16: 2127 Tmp2 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(8, SHVT)); 2128 Tmp1 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(8, SHVT)); 2129 return DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 2130 case MVT::i32: 2131 Tmp4 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(24, SHVT)); 2132 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(8, SHVT)); 2133 Tmp2 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(8, SHVT)); 2134 Tmp1 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(24, SHVT)); 2135 Tmp3 = DAG.getNode(ISD::AND, dl, VT, Tmp3, DAG.getConstant(0xFF0000, VT)); 2136 Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, DAG.getConstant(0xFF00, VT)); 2137 Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp3); 2138 Tmp2 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp1); 2139 return DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp2); 2140 case MVT::i64: 2141 Tmp8 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(56, SHVT)); 2142 Tmp7 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(40, SHVT)); 2143 Tmp6 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(24, SHVT)); 2144 Tmp5 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(8, SHVT)); 2145 Tmp4 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(8, SHVT)); 2146 Tmp3 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(24, SHVT)); 2147 Tmp2 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(40, SHVT)); 2148 Tmp1 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(56, SHVT)); 2149 Tmp7 = DAG.getNode(ISD::AND, dl, VT, Tmp7, DAG.getConstant(255ULL<<48, VT)); 2150 Tmp6 = DAG.getNode(ISD::AND, dl, VT, Tmp6, DAG.getConstant(255ULL<<40, VT)); 2151 Tmp5 = DAG.getNode(ISD::AND, dl, VT, Tmp5, DAG.getConstant(255ULL<<32, VT)); 2152 Tmp4 = DAG.getNode(ISD::AND, dl, VT, Tmp4, DAG.getConstant(255ULL<<24, VT)); 2153 Tmp3 = DAG.getNode(ISD::AND, dl, VT, Tmp3, DAG.getConstant(255ULL<<16, VT)); 2154 Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, DAG.getConstant(255ULL<<8 , VT)); 2155 Tmp8 = DAG.getNode(ISD::OR, dl, VT, Tmp8, Tmp7); 2156 Tmp6 = DAG.getNode(ISD::OR, dl, VT, Tmp6, Tmp5); 2157 Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp3); 2158 Tmp2 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp1); 2159 Tmp8 = DAG.getNode(ISD::OR, dl, VT, Tmp8, Tmp6); 2160 Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp2); 2161 return DAG.getNode(ISD::OR, dl, VT, Tmp8, Tmp4); 2162 } 2163} 2164 2165/// ExpandBitCount - Expand the specified bitcount instruction into operations. 2166/// 2167SDValue SelectionDAGLegalize::ExpandBitCount(unsigned Opc, SDValue Op, 2168 DebugLoc dl) { 2169 switch (Opc) { 2170 default: llvm_unreachable("Cannot expand this yet!"); 2171 case ISD::CTPOP: { 2172 static const uint64_t mask[6] = { 2173 0x5555555555555555ULL, 0x3333333333333333ULL, 2174 0x0F0F0F0F0F0F0F0FULL, 0x00FF00FF00FF00FFULL, 2175 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL 2176 }; 2177 EVT VT = Op.getValueType(); 2178 EVT ShVT = TLI.getShiftAmountTy(); 2179 unsigned len = VT.getSizeInBits(); 2180 for (unsigned i = 0; (1U << i) <= (len / 2); ++i) { 2181 //x = (x & mask[i][len/8]) + (x >> (1 << i) & mask[i][len/8]) 2182 unsigned EltSize = VT.isVector() ? 2183 VT.getVectorElementType().getSizeInBits() : len; 2184 SDValue Tmp2 = DAG.getConstant(APInt(EltSize, mask[i]), VT); 2185 SDValue Tmp3 = DAG.getConstant(1ULL << i, ShVT); 2186 Op = DAG.getNode(ISD::ADD, dl, VT, 2187 DAG.getNode(ISD::AND, dl, VT, Op, Tmp2), 2188 DAG.getNode(ISD::AND, dl, VT, 2189 DAG.getNode(ISD::SRL, dl, VT, Op, Tmp3), 2190 Tmp2)); 2191 } 2192 return Op; 2193 } 2194 case ISD::CTLZ: { 2195 // for now, we do this: 2196 // x = x | (x >> 1); 2197 // x = x | (x >> 2); 2198 // ... 2199 // x = x | (x >>16); 2200 // x = x | (x >>32); // for 64-bit input 2201 // return popcount(~x); 2202 // 2203 // but see also: http://www.hackersdelight.org/HDcode/nlz.cc 2204 EVT VT = Op.getValueType(); 2205 EVT ShVT = TLI.getShiftAmountTy(); 2206 unsigned len = VT.getSizeInBits(); 2207 for (unsigned i = 0; (1U << i) <= (len / 2); ++i) { 2208 SDValue Tmp3 = DAG.getConstant(1ULL << i, ShVT); 2209 Op = DAG.getNode(ISD::OR, dl, VT, Op, 2210 DAG.getNode(ISD::SRL, dl, VT, Op, Tmp3)); 2211 } 2212 Op = DAG.getNOT(dl, Op, VT); 2213 return DAG.getNode(ISD::CTPOP, dl, VT, Op); 2214 } 2215 case ISD::CTTZ: { 2216 // for now, we use: { return popcount(~x & (x - 1)); } 2217 // unless the target has ctlz but not ctpop, in which case we use: 2218 // { return 32 - nlz(~x & (x-1)); } 2219 // see also http://www.hackersdelight.org/HDcode/ntz.cc 2220 EVT VT = Op.getValueType(); 2221 SDValue Tmp3 = DAG.getNode(ISD::AND, dl, VT, 2222 DAG.getNOT(dl, Op, VT), 2223 DAG.getNode(ISD::SUB, dl, VT, Op, 2224 DAG.getConstant(1, VT))); 2225 // If ISD::CTLZ is legal and CTPOP isn't, then do that instead. 2226 if (!TLI.isOperationLegalOrCustom(ISD::CTPOP, VT) && 2227 TLI.isOperationLegalOrCustom(ISD::CTLZ, VT)) 2228 return DAG.getNode(ISD::SUB, dl, VT, 2229 DAG.getConstant(VT.getSizeInBits(), VT), 2230 DAG.getNode(ISD::CTLZ, dl, VT, Tmp3)); 2231 return DAG.getNode(ISD::CTPOP, dl, VT, Tmp3); 2232 } 2233 } 2234} 2235 2236void SelectionDAGLegalize::ExpandNode(SDNode *Node, 2237 SmallVectorImpl<SDValue> &Results) { 2238 DebugLoc dl = Node->getDebugLoc(); 2239 SDValue Tmp1, Tmp2, Tmp3, Tmp4; 2240 switch (Node->getOpcode()) { 2241 case ISD::CTPOP: 2242 case ISD::CTLZ: 2243 case ISD::CTTZ: 2244 Tmp1 = ExpandBitCount(Node->getOpcode(), Node->getOperand(0), dl); 2245 Results.push_back(Tmp1); 2246 break; 2247 case ISD::BSWAP: 2248 Results.push_back(ExpandBSWAP(Node->getOperand(0), dl)); 2249 break; 2250 case ISD::FRAMEADDR: 2251 case ISD::RETURNADDR: 2252 case ISD::FRAME_TO_ARGS_OFFSET: 2253 Results.push_back(DAG.getConstant(0, Node->getValueType(0))); 2254 break; 2255 case ISD::FLT_ROUNDS_: 2256 Results.push_back(DAG.getConstant(1, Node->getValueType(0))); 2257 break; 2258 case ISD::EH_RETURN: 2259 case ISD::DBG_LABEL: 2260 case ISD::EH_LABEL: 2261 case ISD::PREFETCH: 2262 case ISD::MEMBARRIER: 2263 case ISD::VAEND: 2264 Results.push_back(Node->getOperand(0)); 2265 break; 2266 case ISD::DBG_STOPPOINT: 2267 Results.push_back(ExpandDBG_STOPPOINT(Node)); 2268 break; 2269 case ISD::DYNAMIC_STACKALLOC: 2270 ExpandDYNAMIC_STACKALLOC(Node, Results); 2271 break; 2272 case ISD::MERGE_VALUES: 2273 for (unsigned i = 0; i < Node->getNumValues(); i++) 2274 Results.push_back(Node->getOperand(i)); 2275 break; 2276 case ISD::UNDEF: { 2277 EVT VT = Node->getValueType(0); 2278 if (VT.isInteger()) 2279 Results.push_back(DAG.getConstant(0, VT)); 2280 else if (VT.isFloatingPoint()) 2281 Results.push_back(DAG.getConstantFP(0, VT)); 2282 else 2283 llvm_unreachable("Unknown value type!"); 2284 break; 2285 } 2286 case ISD::TRAP: { 2287 // If this operation is not supported, lower it to 'abort()' call 2288 TargetLowering::ArgListTy Args; 2289 std::pair<SDValue, SDValue> CallResult = 2290 TLI.LowerCallTo(Node->getOperand(0), Type::getVoidTy(*DAG.getContext()), 2291 false, false, false, false, 0, CallingConv::C, false, 2292 /*isReturnValueUsed=*/true, 2293 DAG.getExternalSymbol("abort", TLI.getPointerTy()), 2294 Args, DAG, dl); 2295 Results.push_back(CallResult.second); 2296 break; 2297 } 2298 case ISD::FP_ROUND: 2299 case ISD::BIT_CONVERT: 2300 Tmp1 = EmitStackConvert(Node->getOperand(0), Node->getValueType(0), 2301 Node->getValueType(0), dl); 2302 Results.push_back(Tmp1); 2303 break; 2304 case ISD::FP_EXTEND: 2305 Tmp1 = EmitStackConvert(Node->getOperand(0), 2306 Node->getOperand(0).getValueType(), 2307 Node->getValueType(0), dl); 2308 Results.push_back(Tmp1); 2309 break; 2310 case ISD::SIGN_EXTEND_INREG: { 2311 // NOTE: we could fall back on load/store here too for targets without 2312 // SAR. However, it is doubtful that any exist. 2313 EVT ExtraVT = cast<VTSDNode>(Node->getOperand(1))->getVT(); 2314 unsigned BitsDiff = Node->getValueType(0).getSizeInBits() - 2315 ExtraVT.getSizeInBits(); 2316 SDValue ShiftCst = DAG.getConstant(BitsDiff, TLI.getShiftAmountTy()); 2317 Tmp1 = DAG.getNode(ISD::SHL, dl, Node->getValueType(0), 2318 Node->getOperand(0), ShiftCst); 2319 Tmp1 = DAG.getNode(ISD::SRA, dl, Node->getValueType(0), Tmp1, ShiftCst); 2320 Results.push_back(Tmp1); 2321 break; 2322 } 2323 case ISD::FP_ROUND_INREG: { 2324 // The only way we can lower this is to turn it into a TRUNCSTORE, 2325 // EXTLOAD pair, targetting a temporary location (a stack slot). 2326 2327 // NOTE: there is a choice here between constantly creating new stack 2328 // slots and always reusing the same one. We currently always create 2329 // new ones, as reuse may inhibit scheduling. 2330 EVT ExtraVT = cast<VTSDNode>(Node->getOperand(1))->getVT(); 2331 Tmp1 = EmitStackConvert(Node->getOperand(0), ExtraVT, 2332 Node->getValueType(0), dl); 2333 Results.push_back(Tmp1); 2334 break; 2335 } 2336 case ISD::SINT_TO_FP: 2337 case ISD::UINT_TO_FP: 2338 Tmp1 = ExpandLegalINT_TO_FP(Node->getOpcode() == ISD::SINT_TO_FP, 2339 Node->getOperand(0), Node->getValueType(0), dl); 2340 Results.push_back(Tmp1); 2341 break; 2342 case ISD::FP_TO_UINT: { 2343 SDValue True, False; 2344 EVT VT = Node->getOperand(0).getValueType(); 2345 EVT NVT = Node->getValueType(0); 2346 const uint64_t zero[] = {0, 0}; 2347 APFloat apf = APFloat(APInt(VT.getSizeInBits(), 2, zero)); 2348 APInt x = APInt::getSignBit(NVT.getSizeInBits()); 2349 (void)apf.convertFromAPInt(x, false, APFloat::rmNearestTiesToEven); 2350 Tmp1 = DAG.getConstantFP(apf, VT); 2351 Tmp2 = DAG.getSetCC(dl, TLI.getSetCCResultType(VT), 2352 Node->getOperand(0), 2353 Tmp1, ISD::SETLT); 2354 True = DAG.getNode(ISD::FP_TO_SINT, dl, NVT, Node->getOperand(0)); 2355 False = DAG.getNode(ISD::FP_TO_SINT, dl, NVT, 2356 DAG.getNode(ISD::FSUB, dl, VT, 2357 Node->getOperand(0), Tmp1)); 2358 False = DAG.getNode(ISD::XOR, dl, NVT, False, 2359 DAG.getConstant(x, NVT)); 2360 Tmp1 = DAG.getNode(ISD::SELECT, dl, NVT, Tmp2, True, False); 2361 Results.push_back(Tmp1); 2362 break; 2363 } 2364 case ISD::VAARG: { 2365 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 2366 EVT VT = Node->getValueType(0); 2367 Tmp1 = Node->getOperand(0); 2368 Tmp2 = Node->getOperand(1); 2369 SDValue VAList = DAG.getLoad(TLI.getPointerTy(), dl, Tmp1, Tmp2, V, 0); 2370 // Increment the pointer, VAList, to the next vaarg 2371 Tmp3 = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), VAList, 2372 DAG.getConstant(TLI.getTargetData()-> 2373 getTypeAllocSize(VT.getTypeForEVT(*DAG.getContext())), 2374 TLI.getPointerTy())); 2375 // Store the incremented VAList to the legalized pointer 2376 Tmp3 = DAG.getStore(VAList.getValue(1), dl, Tmp3, Tmp2, V, 0); 2377 // Load the actual argument out of the pointer VAList 2378 Results.push_back(DAG.getLoad(VT, dl, Tmp3, VAList, NULL, 0)); 2379 Results.push_back(Results[0].getValue(1)); 2380 break; 2381 } 2382 case ISD::VACOPY: { 2383 // This defaults to loading a pointer from the input and storing it to the 2384 // output, returning the chain. 2385 const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue(); 2386 const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue(); 2387 Tmp1 = DAG.getLoad(TLI.getPointerTy(), dl, Node->getOperand(0), 2388 Node->getOperand(2), VS, 0); 2389 Tmp1 = DAG.getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1), VD, 0); 2390 Results.push_back(Tmp1); 2391 break; 2392 } 2393 case ISD::EXTRACT_VECTOR_ELT: 2394 if (Node->getOperand(0).getValueType().getVectorNumElements() == 1) 2395 // This must be an access of the only element. Return it. 2396 Tmp1 = DAG.getNode(ISD::BIT_CONVERT, dl, Node->getValueType(0), 2397 Node->getOperand(0)); 2398 else 2399 Tmp1 = ExpandExtractFromVectorThroughStack(SDValue(Node, 0)); 2400 Results.push_back(Tmp1); 2401 break; 2402 case ISD::EXTRACT_SUBVECTOR: 2403 Results.push_back(ExpandExtractFromVectorThroughStack(SDValue(Node, 0))); 2404 break; 2405 case ISD::CONCAT_VECTORS: { 2406 Results.push_back(ExpandVectorBuildThroughStack(Node)); 2407 break; 2408 } 2409 case ISD::SCALAR_TO_VECTOR: 2410 Results.push_back(ExpandSCALAR_TO_VECTOR(Node)); 2411 break; 2412 case ISD::INSERT_VECTOR_ELT: 2413 Results.push_back(ExpandINSERT_VECTOR_ELT(Node->getOperand(0), 2414 Node->getOperand(1), 2415 Node->getOperand(2), dl)); 2416 break; 2417 case ISD::VECTOR_SHUFFLE: { 2418 SmallVector<int, 8> Mask; 2419 cast<ShuffleVectorSDNode>(Node)->getMask(Mask); 2420 2421 EVT VT = Node->getValueType(0); 2422 EVT EltVT = VT.getVectorElementType(); 2423 unsigned NumElems = VT.getVectorNumElements(); 2424 SmallVector<SDValue, 8> Ops; 2425 for (unsigned i = 0; i != NumElems; ++i) { 2426 if (Mask[i] < 0) { 2427 Ops.push_back(DAG.getUNDEF(EltVT)); 2428 continue; 2429 } 2430 unsigned Idx = Mask[i]; 2431 if (Idx < NumElems) 2432 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, 2433 Node->getOperand(0), 2434 DAG.getIntPtrConstant(Idx))); 2435 else 2436 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, 2437 Node->getOperand(1), 2438 DAG.getIntPtrConstant(Idx - NumElems))); 2439 } 2440 Tmp1 = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &Ops[0], Ops.size()); 2441 Results.push_back(Tmp1); 2442 break; 2443 } 2444 case ISD::EXTRACT_ELEMENT: { 2445 EVT OpTy = Node->getOperand(0).getValueType(); 2446 if (cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue()) { 2447 // 1 -> Hi 2448 Tmp1 = DAG.getNode(ISD::SRL, dl, OpTy, Node->getOperand(0), 2449 DAG.getConstant(OpTy.getSizeInBits()/2, 2450 TLI.getShiftAmountTy())); 2451 Tmp1 = DAG.getNode(ISD::TRUNCATE, dl, Node->getValueType(0), Tmp1); 2452 } else { 2453 // 0 -> Lo 2454 Tmp1 = DAG.getNode(ISD::TRUNCATE, dl, Node->getValueType(0), 2455 Node->getOperand(0)); 2456 } 2457 Results.push_back(Tmp1); 2458 break; 2459 } 2460 case ISD::STACKSAVE: 2461 // Expand to CopyFromReg if the target set 2462 // StackPointerRegisterToSaveRestore. 2463 if (unsigned SP = TLI.getStackPointerRegisterToSaveRestore()) { 2464 Results.push_back(DAG.getCopyFromReg(Node->getOperand(0), dl, SP, 2465 Node->getValueType(0))); 2466 Results.push_back(Results[0].getValue(1)); 2467 } else { 2468 Results.push_back(DAG.getUNDEF(Node->getValueType(0))); 2469 Results.push_back(Node->getOperand(0)); 2470 } 2471 break; 2472 case ISD::STACKRESTORE: 2473 // Expand to CopyToReg if the target set 2474 // StackPointerRegisterToSaveRestore. 2475 if (unsigned SP = TLI.getStackPointerRegisterToSaveRestore()) { 2476 Results.push_back(DAG.getCopyToReg(Node->getOperand(0), dl, SP, 2477 Node->getOperand(1))); 2478 } else { 2479 Results.push_back(Node->getOperand(0)); 2480 } 2481 break; 2482 case ISD::FCOPYSIGN: 2483 Results.push_back(ExpandFCOPYSIGN(Node)); 2484 break; 2485 case ISD::FNEG: 2486 // Expand Y = FNEG(X) -> Y = SUB -0.0, X 2487 Tmp1 = DAG.getConstantFP(-0.0, Node->getValueType(0)); 2488 Tmp1 = DAG.getNode(ISD::FSUB, dl, Node->getValueType(0), Tmp1, 2489 Node->getOperand(0)); 2490 Results.push_back(Tmp1); 2491 break; 2492 case ISD::FABS: { 2493 // Expand Y = FABS(X) -> Y = (X >u 0.0) ? X : fneg(X). 2494 EVT VT = Node->getValueType(0); 2495 Tmp1 = Node->getOperand(0); 2496 Tmp2 = DAG.getConstantFP(0.0, VT); 2497 Tmp2 = DAG.getSetCC(dl, TLI.getSetCCResultType(Tmp1.getValueType()), 2498 Tmp1, Tmp2, ISD::SETUGT); 2499 Tmp3 = DAG.getNode(ISD::FNEG, dl, VT, Tmp1); 2500 Tmp1 = DAG.getNode(ISD::SELECT, dl, VT, Tmp2, Tmp1, Tmp3); 2501 Results.push_back(Tmp1); 2502 break; 2503 } 2504 case ISD::FSQRT: 2505 Results.push_back(ExpandFPLibCall(Node, RTLIB::SQRT_F32, RTLIB::SQRT_F64, 2506 RTLIB::SQRT_F80, RTLIB::SQRT_PPCF128)); 2507 break; 2508 case ISD::FSIN: 2509 Results.push_back(ExpandFPLibCall(Node, RTLIB::SIN_F32, RTLIB::SIN_F64, 2510 RTLIB::SIN_F80, RTLIB::SIN_PPCF128)); 2511 break; 2512 case ISD::FCOS: 2513 Results.push_back(ExpandFPLibCall(Node, RTLIB::COS_F32, RTLIB::COS_F64, 2514 RTLIB::COS_F80, RTLIB::COS_PPCF128)); 2515 break; 2516 case ISD::FLOG: 2517 Results.push_back(ExpandFPLibCall(Node, RTLIB::LOG_F32, RTLIB::LOG_F64, 2518 RTLIB::LOG_F80, RTLIB::LOG_PPCF128)); 2519 break; 2520 case ISD::FLOG2: 2521 Results.push_back(ExpandFPLibCall(Node, RTLIB::LOG2_F32, RTLIB::LOG2_F64, 2522 RTLIB::LOG2_F80, RTLIB::LOG2_PPCF128)); 2523 break; 2524 case ISD::FLOG10: 2525 Results.push_back(ExpandFPLibCall(Node, RTLIB::LOG10_F32, RTLIB::LOG10_F64, 2526 RTLIB::LOG10_F80, RTLIB::LOG10_PPCF128)); 2527 break; 2528 case ISD::FEXP: 2529 Results.push_back(ExpandFPLibCall(Node, RTLIB::EXP_F32, RTLIB::EXP_F64, 2530 RTLIB::EXP_F80, RTLIB::EXP_PPCF128)); 2531 break; 2532 case ISD::FEXP2: 2533 Results.push_back(ExpandFPLibCall(Node, RTLIB::EXP2_F32, RTLIB::EXP2_F64, 2534 RTLIB::EXP2_F80, RTLIB::EXP2_PPCF128)); 2535 break; 2536 case ISD::FTRUNC: 2537 Results.push_back(ExpandFPLibCall(Node, RTLIB::TRUNC_F32, RTLIB::TRUNC_F64, 2538 RTLIB::TRUNC_F80, RTLIB::TRUNC_PPCF128)); 2539 break; 2540 case ISD::FFLOOR: 2541 Results.push_back(ExpandFPLibCall(Node, RTLIB::FLOOR_F32, RTLIB::FLOOR_F64, 2542 RTLIB::FLOOR_F80, RTLIB::FLOOR_PPCF128)); 2543 break; 2544 case ISD::FCEIL: 2545 Results.push_back(ExpandFPLibCall(Node, RTLIB::CEIL_F32, RTLIB::CEIL_F64, 2546 RTLIB::CEIL_F80, RTLIB::CEIL_PPCF128)); 2547 break; 2548 case ISD::FRINT: 2549 Results.push_back(ExpandFPLibCall(Node, RTLIB::RINT_F32, RTLIB::RINT_F64, 2550 RTLIB::RINT_F80, RTLIB::RINT_PPCF128)); 2551 break; 2552 case ISD::FNEARBYINT: 2553 Results.push_back(ExpandFPLibCall(Node, RTLIB::NEARBYINT_F32, 2554 RTLIB::NEARBYINT_F64, 2555 RTLIB::NEARBYINT_F80, 2556 RTLIB::NEARBYINT_PPCF128)); 2557 break; 2558 case ISD::FPOWI: 2559 Results.push_back(ExpandFPLibCall(Node, RTLIB::POWI_F32, RTLIB::POWI_F64, 2560 RTLIB::POWI_F80, RTLIB::POWI_PPCF128)); 2561 break; 2562 case ISD::FPOW: 2563 Results.push_back(ExpandFPLibCall(Node, RTLIB::POW_F32, RTLIB::POW_F64, 2564 RTLIB::POW_F80, RTLIB::POW_PPCF128)); 2565 break; 2566 case ISD::FDIV: 2567 Results.push_back(ExpandFPLibCall(Node, RTLIB::DIV_F32, RTLIB::DIV_F64, 2568 RTLIB::DIV_F80, RTLIB::DIV_PPCF128)); 2569 break; 2570 case ISD::FREM: 2571 Results.push_back(ExpandFPLibCall(Node, RTLIB::REM_F32, RTLIB::REM_F64, 2572 RTLIB::REM_F80, RTLIB::REM_PPCF128)); 2573 break; 2574 case ISD::ConstantFP: { 2575 ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Node); 2576 // Check to see if this FP immediate is already legal. 2577 bool isLegal = false; 2578 for (TargetLowering::legal_fpimm_iterator I = TLI.legal_fpimm_begin(), 2579 E = TLI.legal_fpimm_end(); I != E; ++I) { 2580 if (CFP->isExactlyValue(*I)) { 2581 isLegal = true; 2582 break; 2583 } 2584 } 2585 // If this is a legal constant, turn it into a TargetConstantFP node. 2586 if (isLegal) 2587 Results.push_back(SDValue(Node, 0)); 2588 else 2589 Results.push_back(ExpandConstantFP(CFP, true, DAG, TLI)); 2590 break; 2591 } 2592 case ISD::EHSELECTION: { 2593 unsigned Reg = TLI.getExceptionSelectorRegister(); 2594 assert(Reg && "Can't expand to unknown register!"); 2595 Results.push_back(DAG.getCopyFromReg(Node->getOperand(1), dl, Reg, 2596 Node->getValueType(0))); 2597 Results.push_back(Results[0].getValue(1)); 2598 break; 2599 } 2600 case ISD::EXCEPTIONADDR: { 2601 unsigned Reg = TLI.getExceptionAddressRegister(); 2602 assert(Reg && "Can't expand to unknown register!"); 2603 Results.push_back(DAG.getCopyFromReg(Node->getOperand(0), dl, Reg, 2604 Node->getValueType(0))); 2605 Results.push_back(Results[0].getValue(1)); 2606 break; 2607 } 2608 case ISD::SUB: { 2609 EVT VT = Node->getValueType(0); 2610 assert(TLI.isOperationLegalOrCustom(ISD::ADD, VT) && 2611 TLI.isOperationLegalOrCustom(ISD::XOR, VT) && 2612 "Don't know how to expand this subtraction!"); 2613 Tmp1 = DAG.getNode(ISD::XOR, dl, VT, Node->getOperand(1), 2614 DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), VT)); 2615 Tmp1 = DAG.getNode(ISD::ADD, dl, VT, Tmp2, DAG.getConstant(1, VT)); 2616 Results.push_back(DAG.getNode(ISD::ADD, dl, VT, Node->getOperand(0), Tmp1)); 2617 break; 2618 } 2619 case ISD::UREM: 2620 case ISD::SREM: { 2621 EVT VT = Node->getValueType(0); 2622 SDVTList VTs = DAG.getVTList(VT, VT); 2623 bool isSigned = Node->getOpcode() == ISD::SREM; 2624 unsigned DivOpc = isSigned ? ISD::SDIV : ISD::UDIV; 2625 unsigned DivRemOpc = isSigned ? ISD::SDIVREM : ISD::UDIVREM; 2626 Tmp2 = Node->getOperand(0); 2627 Tmp3 = Node->getOperand(1); 2628 if (TLI.isOperationLegalOrCustom(DivRemOpc, VT)) { 2629 Tmp1 = DAG.getNode(DivRemOpc, dl, VTs, Tmp2, Tmp3).getValue(1); 2630 } else if (TLI.isOperationLegalOrCustom(DivOpc, VT)) { 2631 // X % Y -> X-X/Y*Y 2632 Tmp1 = DAG.getNode(DivOpc, dl, VT, Tmp2, Tmp3); 2633 Tmp1 = DAG.getNode(ISD::MUL, dl, VT, Tmp1, Tmp3); 2634 Tmp1 = DAG.getNode(ISD::SUB, dl, VT, Tmp2, Tmp1); 2635 } else if (isSigned) { 2636 Tmp1 = ExpandIntLibCall(Node, true, RTLIB::SREM_I16, RTLIB::SREM_I32, 2637 RTLIB::SREM_I64, RTLIB::SREM_I128); 2638 } else { 2639 Tmp1 = ExpandIntLibCall(Node, false, RTLIB::UREM_I16, RTLIB::UREM_I32, 2640 RTLIB::UREM_I64, RTLIB::UREM_I128); 2641 } 2642 Results.push_back(Tmp1); 2643 break; 2644 } 2645 case ISD::UDIV: 2646 case ISD::SDIV: { 2647 bool isSigned = Node->getOpcode() == ISD::SDIV; 2648 unsigned DivRemOpc = isSigned ? ISD::SDIVREM : ISD::UDIVREM; 2649 EVT VT = Node->getValueType(0); 2650 SDVTList VTs = DAG.getVTList(VT, VT); 2651 if (TLI.isOperationLegalOrCustom(DivRemOpc, VT)) 2652 Tmp1 = DAG.getNode(DivRemOpc, dl, VTs, Node->getOperand(0), 2653 Node->getOperand(1)); 2654 else if (isSigned) 2655 Tmp1 = ExpandIntLibCall(Node, true, RTLIB::SDIV_I16, RTLIB::SDIV_I32, 2656 RTLIB::SDIV_I64, RTLIB::SDIV_I128); 2657 else 2658 Tmp1 = ExpandIntLibCall(Node, false, RTLIB::UDIV_I16, RTLIB::UDIV_I32, 2659 RTLIB::UDIV_I64, RTLIB::UDIV_I128); 2660 Results.push_back(Tmp1); 2661 break; 2662 } 2663 case ISD::MULHU: 2664 case ISD::MULHS: { 2665 unsigned ExpandOpcode = Node->getOpcode() == ISD::MULHU ? ISD::UMUL_LOHI : 2666 ISD::SMUL_LOHI; 2667 EVT VT = Node->getValueType(0); 2668 SDVTList VTs = DAG.getVTList(VT, VT); 2669 assert(TLI.isOperationLegalOrCustom(ExpandOpcode, VT) && 2670 "If this wasn't legal, it shouldn't have been created!"); 2671 Tmp1 = DAG.getNode(ExpandOpcode, dl, VTs, Node->getOperand(0), 2672 Node->getOperand(1)); 2673 Results.push_back(Tmp1.getValue(1)); 2674 break; 2675 } 2676 case ISD::MUL: { 2677 EVT VT = Node->getValueType(0); 2678 SDVTList VTs = DAG.getVTList(VT, VT); 2679 // See if multiply or divide can be lowered using two-result operations. 2680 // We just need the low half of the multiply; try both the signed 2681 // and unsigned forms. If the target supports both SMUL_LOHI and 2682 // UMUL_LOHI, form a preference by checking which forms of plain 2683 // MULH it supports. 2684 bool HasSMUL_LOHI = TLI.isOperationLegalOrCustom(ISD::SMUL_LOHI, VT); 2685 bool HasUMUL_LOHI = TLI.isOperationLegalOrCustom(ISD::UMUL_LOHI, VT); 2686 bool HasMULHS = TLI.isOperationLegalOrCustom(ISD::MULHS, VT); 2687 bool HasMULHU = TLI.isOperationLegalOrCustom(ISD::MULHU, VT); 2688 unsigned OpToUse = 0; 2689 if (HasSMUL_LOHI && !HasMULHS) { 2690 OpToUse = ISD::SMUL_LOHI; 2691 } else if (HasUMUL_LOHI && !HasMULHU) { 2692 OpToUse = ISD::UMUL_LOHI; 2693 } else if (HasSMUL_LOHI) { 2694 OpToUse = ISD::SMUL_LOHI; 2695 } else if (HasUMUL_LOHI) { 2696 OpToUse = ISD::UMUL_LOHI; 2697 } 2698 if (OpToUse) { 2699 Results.push_back(DAG.getNode(OpToUse, dl, VTs, Node->getOperand(0), 2700 Node->getOperand(1))); 2701 break; 2702 } 2703 Tmp1 = ExpandIntLibCall(Node, false, RTLIB::MUL_I16, RTLIB::MUL_I32, 2704 RTLIB::MUL_I64, RTLIB::MUL_I128); 2705 Results.push_back(Tmp1); 2706 break; 2707 } 2708 case ISD::SADDO: 2709 case ISD::SSUBO: { 2710 SDValue LHS = Node->getOperand(0); 2711 SDValue RHS = Node->getOperand(1); 2712 SDValue Sum = DAG.getNode(Node->getOpcode() == ISD::SADDO ? 2713 ISD::ADD : ISD::SUB, dl, LHS.getValueType(), 2714 LHS, RHS); 2715 Results.push_back(Sum); 2716 EVT OType = Node->getValueType(1); 2717 2718 SDValue Zero = DAG.getConstant(0, LHS.getValueType()); 2719 2720 // LHSSign -> LHS >= 0 2721 // RHSSign -> RHS >= 0 2722 // SumSign -> Sum >= 0 2723 // 2724 // Add: 2725 // Overflow -> (LHSSign == RHSSign) && (LHSSign != SumSign) 2726 // Sub: 2727 // Overflow -> (LHSSign != RHSSign) && (LHSSign != SumSign) 2728 // 2729 SDValue LHSSign = DAG.getSetCC(dl, OType, LHS, Zero, ISD::SETGE); 2730 SDValue RHSSign = DAG.getSetCC(dl, OType, RHS, Zero, ISD::SETGE); 2731 SDValue SignsMatch = DAG.getSetCC(dl, OType, LHSSign, RHSSign, 2732 Node->getOpcode() == ISD::SADDO ? 2733 ISD::SETEQ : ISD::SETNE); 2734 2735 SDValue SumSign = DAG.getSetCC(dl, OType, Sum, Zero, ISD::SETGE); 2736 SDValue SumSignNE = DAG.getSetCC(dl, OType, LHSSign, SumSign, ISD::SETNE); 2737 2738 SDValue Cmp = DAG.getNode(ISD::AND, dl, OType, SignsMatch, SumSignNE); 2739 Results.push_back(Cmp); 2740 break; 2741 } 2742 case ISD::UADDO: 2743 case ISD::USUBO: { 2744 SDValue LHS = Node->getOperand(0); 2745 SDValue RHS = Node->getOperand(1); 2746 SDValue Sum = DAG.getNode(Node->getOpcode() == ISD::UADDO ? 2747 ISD::ADD : ISD::SUB, dl, LHS.getValueType(), 2748 LHS, RHS); 2749 Results.push_back(Sum); 2750 Results.push_back(DAG.getSetCC(dl, Node->getValueType(1), Sum, LHS, 2751 Node->getOpcode () == ISD::UADDO ? 2752 ISD::SETULT : ISD::SETUGT)); 2753 break; 2754 } 2755 case ISD::UMULO: 2756 case ISD::SMULO: { 2757 EVT VT = Node->getValueType(0); 2758 SDValue LHS = Node->getOperand(0); 2759 SDValue RHS = Node->getOperand(1); 2760 SDValue BottomHalf; 2761 SDValue TopHalf; 2762 static unsigned Ops[2][3] = 2763 { { ISD::MULHU, ISD::UMUL_LOHI, ISD::ZERO_EXTEND }, 2764 { ISD::MULHS, ISD::SMUL_LOHI, ISD::SIGN_EXTEND }}; 2765 bool isSigned = Node->getOpcode() == ISD::SMULO; 2766 if (TLI.isOperationLegalOrCustom(Ops[isSigned][0], VT)) { 2767 BottomHalf = DAG.getNode(ISD::MUL, dl, VT, LHS, RHS); 2768 TopHalf = DAG.getNode(Ops[isSigned][0], dl, VT, LHS, RHS); 2769 } else if (TLI.isOperationLegalOrCustom(Ops[isSigned][1], VT)) { 2770 BottomHalf = DAG.getNode(Ops[isSigned][1], dl, DAG.getVTList(VT, VT), LHS, 2771 RHS); 2772 TopHalf = BottomHalf.getValue(1); 2773 } else if (TLI.isTypeLegal(EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits() * 2))) { 2774 EVT WideVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits() * 2); 2775 LHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, LHS); 2776 RHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, RHS); 2777 Tmp1 = DAG.getNode(ISD::MUL, dl, WideVT, LHS, RHS); 2778 BottomHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, Tmp1, 2779 DAG.getIntPtrConstant(0)); 2780 TopHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, Tmp1, 2781 DAG.getIntPtrConstant(1)); 2782 } else { 2783 // FIXME: We should be able to fall back to a libcall with an illegal 2784 // type in some cases cases. 2785 // Also, we can fall back to a division in some cases, but that's a big 2786 // performance hit in the general case. 2787 llvm_unreachable("Don't know how to expand this operation yet!"); 2788 } 2789 if (isSigned) { 2790 Tmp1 = DAG.getConstant(VT.getSizeInBits() - 1, TLI.getShiftAmountTy()); 2791 Tmp1 = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, Tmp1); 2792 TopHalf = DAG.getSetCC(dl, TLI.getSetCCResultType(VT), TopHalf, Tmp1, 2793 ISD::SETNE); 2794 } else { 2795 TopHalf = DAG.getSetCC(dl, TLI.getSetCCResultType(VT), TopHalf, 2796 DAG.getConstant(0, VT), ISD::SETNE); 2797 } 2798 Results.push_back(BottomHalf); 2799 Results.push_back(TopHalf); 2800 break; 2801 } 2802 case ISD::BUILD_PAIR: { 2803 EVT PairTy = Node->getValueType(0); 2804 Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, dl, PairTy, Node->getOperand(0)); 2805 Tmp2 = DAG.getNode(ISD::ANY_EXTEND, dl, PairTy, Node->getOperand(1)); 2806 Tmp2 = DAG.getNode(ISD::SHL, dl, PairTy, Tmp2, 2807 DAG.getConstant(PairTy.getSizeInBits()/2, 2808 TLI.getShiftAmountTy())); 2809 Results.push_back(DAG.getNode(ISD::OR, dl, PairTy, Tmp1, Tmp2)); 2810 break; 2811 } 2812 case ISD::SELECT: 2813 Tmp1 = Node->getOperand(0); 2814 Tmp2 = Node->getOperand(1); 2815 Tmp3 = Node->getOperand(2); 2816 if (Tmp1.getOpcode() == ISD::SETCC) { 2817 Tmp1 = DAG.getSelectCC(dl, Tmp1.getOperand(0), Tmp1.getOperand(1), 2818 Tmp2, Tmp3, 2819 cast<CondCodeSDNode>(Tmp1.getOperand(2))->get()); 2820 } else { 2821 Tmp1 = DAG.getSelectCC(dl, Tmp1, 2822 DAG.getConstant(0, Tmp1.getValueType()), 2823 Tmp2, Tmp3, ISD::SETNE); 2824 } 2825 Results.push_back(Tmp1); 2826 break; 2827 case ISD::BR_JT: { 2828 SDValue Chain = Node->getOperand(0); 2829 SDValue Table = Node->getOperand(1); 2830 SDValue Index = Node->getOperand(2); 2831 2832 EVT PTy = TLI.getPointerTy(); 2833 MachineFunction &MF = DAG.getMachineFunction(); 2834 unsigned EntrySize = MF.getJumpTableInfo()->getEntrySize(); 2835 Index= DAG.getNode(ISD::MUL, dl, PTy, 2836 Index, DAG.getConstant(EntrySize, PTy)); 2837 SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Index, Table); 2838 2839 EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), EntrySize * 8); 2840 SDValue LD = DAG.getExtLoad(ISD::SEXTLOAD, dl, PTy, Chain, Addr, 2841 PseudoSourceValue::getJumpTable(), 0, MemVT); 2842 Addr = LD; 2843 if (TLI.getTargetMachine().getRelocationModel() == Reloc::PIC_) { 2844 // For PIC, the sequence is: 2845 // BRIND(load(Jumptable + index) + RelocBase) 2846 // RelocBase can be JumpTable, GOT or some sort of global base. 2847 Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr, 2848 TLI.getPICJumpTableRelocBase(Table, DAG)); 2849 } 2850 Tmp1 = DAG.getNode(ISD::BRIND, dl, MVT::Other, LD.getValue(1), Addr); 2851 Results.push_back(Tmp1); 2852 break; 2853 } 2854 case ISD::BRCOND: 2855 // Expand brcond's setcc into its constituent parts and create a BR_CC 2856 // Node. 2857 Tmp1 = Node->getOperand(0); 2858 Tmp2 = Node->getOperand(1); 2859 if (Tmp2.getOpcode() == ISD::SETCC) { 2860 Tmp1 = DAG.getNode(ISD::BR_CC, dl, MVT::Other, 2861 Tmp1, Tmp2.getOperand(2), 2862 Tmp2.getOperand(0), Tmp2.getOperand(1), 2863 Node->getOperand(2)); 2864 } else { 2865 Tmp1 = DAG.getNode(ISD::BR_CC, dl, MVT::Other, Tmp1, 2866 DAG.getCondCode(ISD::SETNE), Tmp2, 2867 DAG.getConstant(0, Tmp2.getValueType()), 2868 Node->getOperand(2)); 2869 } 2870 Results.push_back(Tmp1); 2871 break; 2872 case ISD::SETCC: { 2873 Tmp1 = Node->getOperand(0); 2874 Tmp2 = Node->getOperand(1); 2875 Tmp3 = Node->getOperand(2); 2876 LegalizeSetCCCondCode(Node->getValueType(0), Tmp1, Tmp2, Tmp3, dl); 2877 2878 // If we expanded the SETCC into an AND/OR, return the new node 2879 if (Tmp2.getNode() == 0) { 2880 Results.push_back(Tmp1); 2881 break; 2882 } 2883 2884 // Otherwise, SETCC for the given comparison type must be completely 2885 // illegal; expand it into a SELECT_CC. 2886 EVT VT = Node->getValueType(0); 2887 Tmp1 = DAG.getNode(ISD::SELECT_CC, dl, VT, Tmp1, Tmp2, 2888 DAG.getConstant(1, VT), DAG.getConstant(0, VT), Tmp3); 2889 Results.push_back(Tmp1); 2890 break; 2891 } 2892 case ISD::SELECT_CC: { 2893 Tmp1 = Node->getOperand(0); // LHS 2894 Tmp2 = Node->getOperand(1); // RHS 2895 Tmp3 = Node->getOperand(2); // True 2896 Tmp4 = Node->getOperand(3); // False 2897 SDValue CC = Node->getOperand(4); 2898 2899 LegalizeSetCCCondCode(TLI.getSetCCResultType(Tmp1.getValueType()), 2900 Tmp1, Tmp2, CC, dl); 2901 2902 assert(!Tmp2.getNode() && "Can't legalize SELECT_CC with legal condition!"); 2903 Tmp2 = DAG.getConstant(0, Tmp1.getValueType()); 2904 CC = DAG.getCondCode(ISD::SETNE); 2905 Tmp1 = DAG.getNode(ISD::SELECT_CC, dl, Node->getValueType(0), Tmp1, Tmp2, 2906 Tmp3, Tmp4, CC); 2907 Results.push_back(Tmp1); 2908 break; 2909 } 2910 case ISD::BR_CC: { 2911 Tmp1 = Node->getOperand(0); // Chain 2912 Tmp2 = Node->getOperand(2); // LHS 2913 Tmp3 = Node->getOperand(3); // RHS 2914 Tmp4 = Node->getOperand(1); // CC 2915 2916 LegalizeSetCCCondCode(TLI.getSetCCResultType(Tmp2.getValueType()), 2917 Tmp2, Tmp3, Tmp4, dl); 2918 LastCALLSEQ_END = DAG.getEntryNode(); 2919 2920 assert(!Tmp3.getNode() && "Can't legalize BR_CC with legal condition!"); 2921 Tmp3 = DAG.getConstant(0, Tmp2.getValueType()); 2922 Tmp4 = DAG.getCondCode(ISD::SETNE); 2923 Tmp1 = DAG.getNode(ISD::BR_CC, dl, Node->getValueType(0), Tmp1, Tmp4, Tmp2, 2924 Tmp3, Node->getOperand(4)); 2925 Results.push_back(Tmp1); 2926 break; 2927 } 2928 case ISD::GLOBAL_OFFSET_TABLE: 2929 case ISD::GlobalAddress: 2930 case ISD::GlobalTLSAddress: 2931 case ISD::ExternalSymbol: 2932 case ISD::ConstantPool: 2933 case ISD::JumpTable: 2934 case ISD::INTRINSIC_W_CHAIN: 2935 case ISD::INTRINSIC_WO_CHAIN: 2936 case ISD::INTRINSIC_VOID: 2937 // FIXME: Custom lowering for these operations shouldn't return null! 2938 for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i) 2939 Results.push_back(SDValue(Node, i)); 2940 break; 2941 } 2942} 2943void SelectionDAGLegalize::PromoteNode(SDNode *Node, 2944 SmallVectorImpl<SDValue> &Results) { 2945 EVT OVT = Node->getValueType(0); 2946 if (Node->getOpcode() == ISD::UINT_TO_FP || 2947 Node->getOpcode() == ISD::SINT_TO_FP || 2948 Node->getOpcode() == ISD::SETCC) { 2949 OVT = Node->getOperand(0).getValueType(); 2950 } 2951 EVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), OVT); 2952 DebugLoc dl = Node->getDebugLoc(); 2953 SDValue Tmp1, Tmp2, Tmp3; 2954 switch (Node->getOpcode()) { 2955 case ISD::CTTZ: 2956 case ISD::CTLZ: 2957 case ISD::CTPOP: 2958 // Zero extend the argument. 2959 Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, Node->getOperand(0)); 2960 // Perform the larger operation. 2961 Tmp1 = DAG.getNode(Node->getOpcode(), dl, NVT, Tmp1); 2962 if (Node->getOpcode() == ISD::CTTZ) { 2963 //if Tmp1 == sizeinbits(NVT) then Tmp1 = sizeinbits(Old VT) 2964 Tmp2 = DAG.getSetCC(dl, TLI.getSetCCResultType(NVT), 2965 Tmp1, DAG.getConstant(NVT.getSizeInBits(), NVT), 2966 ISD::SETEQ); 2967 Tmp1 = DAG.getNode(ISD::SELECT, dl, NVT, Tmp2, 2968 DAG.getConstant(OVT.getSizeInBits(), NVT), Tmp1); 2969 } else if (Node->getOpcode() == ISD::CTLZ) { 2970 // Tmp1 = Tmp1 - (sizeinbits(NVT) - sizeinbits(Old VT)) 2971 Tmp1 = DAG.getNode(ISD::SUB, dl, NVT, Tmp1, 2972 DAG.getConstant(NVT.getSizeInBits() - 2973 OVT.getSizeInBits(), NVT)); 2974 } 2975 Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, OVT, Tmp1)); 2976 break; 2977 case ISD::BSWAP: { 2978 unsigned DiffBits = NVT.getSizeInBits() - OVT.getSizeInBits(); 2979 Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, Tmp1); 2980 Tmp1 = DAG.getNode(ISD::BSWAP, dl, NVT, Tmp1); 2981 Tmp1 = DAG.getNode(ISD::SRL, dl, NVT, Tmp1, 2982 DAG.getConstant(DiffBits, TLI.getShiftAmountTy())); 2983 Results.push_back(Tmp1); 2984 break; 2985 } 2986 case ISD::FP_TO_UINT: 2987 case ISD::FP_TO_SINT: 2988 Tmp1 = PromoteLegalFP_TO_INT(Node->getOperand(0), Node->getValueType(0), 2989 Node->getOpcode() == ISD::FP_TO_SINT, dl); 2990 Results.push_back(Tmp1); 2991 break; 2992 case ISD::UINT_TO_FP: 2993 case ISD::SINT_TO_FP: 2994 Tmp1 = PromoteLegalINT_TO_FP(Node->getOperand(0), Node->getValueType(0), 2995 Node->getOpcode() == ISD::SINT_TO_FP, dl); 2996 Results.push_back(Tmp1); 2997 break; 2998 case ISD::AND: 2999 case ISD::OR: 3000 case ISD::XOR: { 3001 unsigned ExtOp, TruncOp; 3002 if (OVT.isVector()) { 3003 ExtOp = ISD::BIT_CONVERT; 3004 TruncOp = ISD::BIT_CONVERT; 3005 } else if (OVT.isInteger()) { 3006 ExtOp = ISD::ANY_EXTEND; 3007 TruncOp = ISD::TRUNCATE; 3008 } else { 3009 llvm_report_error("Cannot promote logic operation"); 3010 } 3011 // Promote each of the values to the new type. 3012 Tmp1 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(0)); 3013 Tmp2 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(1)); 3014 // Perform the larger operation, then convert back 3015 Tmp1 = DAG.getNode(Node->getOpcode(), dl, NVT, Tmp1, Tmp2); 3016 Results.push_back(DAG.getNode(TruncOp, dl, OVT, Tmp1)); 3017 break; 3018 } 3019 case ISD::SELECT: { 3020 unsigned ExtOp, TruncOp; 3021 if (Node->getValueType(0).isVector()) { 3022 ExtOp = ISD::BIT_CONVERT; 3023 TruncOp = ISD::BIT_CONVERT; 3024 } else if (Node->getValueType(0).isInteger()) { 3025 ExtOp = ISD::ANY_EXTEND; 3026 TruncOp = ISD::TRUNCATE; 3027 } else { 3028 ExtOp = ISD::FP_EXTEND; 3029 TruncOp = ISD::FP_ROUND; 3030 } 3031 Tmp1 = Node->getOperand(0); 3032 // Promote each of the values to the new type. 3033 Tmp2 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(1)); 3034 Tmp3 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(2)); 3035 // Perform the larger operation, then round down. 3036 Tmp1 = DAG.getNode(ISD::SELECT, dl, NVT, Tmp1, Tmp2, Tmp3); 3037 if (TruncOp != ISD::FP_ROUND) 3038 Tmp1 = DAG.getNode(TruncOp, dl, Node->getValueType(0), Tmp1); 3039 else 3040 Tmp1 = DAG.getNode(TruncOp, dl, Node->getValueType(0), Tmp1, 3041 DAG.getIntPtrConstant(0)); 3042 Results.push_back(Tmp1); 3043 break; 3044 } 3045 case ISD::VECTOR_SHUFFLE: { 3046 SmallVector<int, 8> Mask; 3047 cast<ShuffleVectorSDNode>(Node)->getMask(Mask); 3048 3049 // Cast the two input vectors. 3050 Tmp1 = DAG.getNode(ISD::BIT_CONVERT, dl, NVT, Node->getOperand(0)); 3051 Tmp2 = DAG.getNode(ISD::BIT_CONVERT, dl, NVT, Node->getOperand(1)); 3052 3053 // Convert the shuffle mask to the right # elements. 3054 Tmp1 = ShuffleWithNarrowerEltType(NVT, OVT, dl, Tmp1, Tmp2, Mask); 3055 Tmp1 = DAG.getNode(ISD::BIT_CONVERT, dl, OVT, Tmp1); 3056 Results.push_back(Tmp1); 3057 break; 3058 } 3059 case ISD::SETCC: { 3060 unsigned ExtOp = ISD::FP_EXTEND; 3061 if (NVT.isInteger()) { 3062 ISD::CondCode CCCode = 3063 cast<CondCodeSDNode>(Node->getOperand(2))->get(); 3064 ExtOp = isSignedIntSetCC(CCCode) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 3065 } 3066 Tmp1 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(0)); 3067 Tmp2 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(1)); 3068 Results.push_back(DAG.getNode(ISD::SETCC, dl, Node->getValueType(0), 3069 Tmp1, Tmp2, Node->getOperand(2))); 3070 break; 3071 } 3072 } 3073} 3074 3075// SelectionDAG::Legalize - This is the entry point for the file. 3076// 3077void SelectionDAG::Legalize(bool TypesNeedLegalizing, 3078 CodeGenOpt::Level OptLevel) { 3079 /// run - This is the main entry point to this class. 3080 /// 3081 SelectionDAGLegalize(*this, OptLevel).LegalizeDAG(); 3082} 3083 3084