SelectionDAGISel.cpp revision 61a1f1ceb9f0fd2cb796eba50663c79cbdda63f5
1//===-- SelectionDAGISel.cpp - Implement the SelectionDAGISel class -------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This implements the SelectionDAGISel class. 11// 12//===----------------------------------------------------------------------===// 13 14#define DEBUG_TYPE "isel" 15#include "llvm/CodeGen/SelectionDAGISel.h" 16#include "llvm/ADT/BitVector.h" 17#include "llvm/Analysis/AliasAnalysis.h" 18#include "llvm/Constants.h" 19#include "llvm/CallingConv.h" 20#include "llvm/DerivedTypes.h" 21#include "llvm/Function.h" 22#include "llvm/GlobalVariable.h" 23#include "llvm/InlineAsm.h" 24#include "llvm/Instructions.h" 25#include "llvm/Intrinsics.h" 26#include "llvm/IntrinsicInst.h" 27#include "llvm/ParameterAttributes.h" 28#include "llvm/CodeGen/FastISel.h" 29#include "llvm/CodeGen/GCStrategy.h" 30#include "llvm/CodeGen/GCMetadata.h" 31#include "llvm/CodeGen/MachineFunction.h" 32#include "llvm/CodeGen/MachineFrameInfo.h" 33#include "llvm/CodeGen/MachineInstrBuilder.h" 34#include "llvm/CodeGen/MachineJumpTableInfo.h" 35#include "llvm/CodeGen/MachineModuleInfo.h" 36#include "llvm/CodeGen/MachineRegisterInfo.h" 37#include "llvm/CodeGen/ScheduleDAG.h" 38#include "llvm/CodeGen/SchedulerRegistry.h" 39#include "llvm/CodeGen/SelectionDAG.h" 40#include "llvm/Target/TargetRegisterInfo.h" 41#include "llvm/Target/TargetData.h" 42#include "llvm/Target/TargetFrameInfo.h" 43#include "llvm/Target/TargetInstrInfo.h" 44#include "llvm/Target/TargetLowering.h" 45#include "llvm/Target/TargetMachine.h" 46#include "llvm/Target/TargetOptions.h" 47#include "llvm/Support/Compiler.h" 48#include "llvm/Support/Debug.h" 49#include "llvm/Support/MathExtras.h" 50#include "llvm/Support/Timer.h" 51#include <algorithm> 52using namespace llvm; 53 54static cl::opt<bool> 55EnableValueProp("enable-value-prop", cl::Hidden); 56static cl::opt<bool> 57EnableLegalizeTypes("enable-legalize-types", cl::Hidden); 58static cl::opt<bool> 59EnableFastISel("fast-isel", cl::Hidden, 60 cl::desc("Enable the experimental \"fast\" instruction selector")); 61static cl::opt<bool> 62DisableFastISelAbort("fast-isel-no-abort", cl::Hidden, 63 cl::desc("Use the SelectionDAGISel when \"fast\" instruction " 64 "selection fails")); 65 66#ifndef NDEBUG 67static cl::opt<bool> 68ViewDAGCombine1("view-dag-combine1-dags", cl::Hidden, 69 cl::desc("Pop up a window to show dags before the first " 70 "dag combine pass")); 71static cl::opt<bool> 72ViewLegalizeTypesDAGs("view-legalize-types-dags", cl::Hidden, 73 cl::desc("Pop up a window to show dags before legalize types")); 74static cl::opt<bool> 75ViewLegalizeDAGs("view-legalize-dags", cl::Hidden, 76 cl::desc("Pop up a window to show dags before legalize")); 77static cl::opt<bool> 78ViewDAGCombine2("view-dag-combine2-dags", cl::Hidden, 79 cl::desc("Pop up a window to show dags before the second " 80 "dag combine pass")); 81static cl::opt<bool> 82ViewISelDAGs("view-isel-dags", cl::Hidden, 83 cl::desc("Pop up a window to show isel dags as they are selected")); 84static cl::opt<bool> 85ViewSchedDAGs("view-sched-dags", cl::Hidden, 86 cl::desc("Pop up a window to show sched dags as they are processed")); 87static cl::opt<bool> 88ViewSUnitDAGs("view-sunit-dags", cl::Hidden, 89 cl::desc("Pop up a window to show SUnit dags after they are processed")); 90#else 91static const bool ViewDAGCombine1 = false, 92 ViewLegalizeTypesDAGs = false, ViewLegalizeDAGs = false, 93 ViewDAGCombine2 = false, 94 ViewISelDAGs = false, ViewSchedDAGs = false, 95 ViewSUnitDAGs = false; 96#endif 97 98//===---------------------------------------------------------------------===// 99/// 100/// RegisterScheduler class - Track the registration of instruction schedulers. 101/// 102//===---------------------------------------------------------------------===// 103MachinePassRegistry RegisterScheduler::Registry; 104 105//===---------------------------------------------------------------------===// 106/// 107/// ISHeuristic command line option for instruction schedulers. 108/// 109//===---------------------------------------------------------------------===// 110static cl::opt<RegisterScheduler::FunctionPassCtor, false, 111 RegisterPassParser<RegisterScheduler> > 112ISHeuristic("pre-RA-sched", 113 cl::init(&createDefaultScheduler), 114 cl::desc("Instruction schedulers available (before register" 115 " allocation):")); 116 117static RegisterScheduler 118defaultListDAGScheduler("default", " Best scheduler for the target", 119 createDefaultScheduler); 120 121namespace { struct SDISelAsmOperandInfo; } 122 123/// ComputeLinearIndex - Given an LLVM IR aggregate type and a sequence 124/// insertvalue or extractvalue indices that identify a member, return 125/// the linearized index of the start of the member. 126/// 127static unsigned ComputeLinearIndex(const TargetLowering &TLI, const Type *Ty, 128 const unsigned *Indices, 129 const unsigned *IndicesEnd, 130 unsigned CurIndex = 0) { 131 // Base case: We're done. 132 if (Indices && Indices == IndicesEnd) 133 return CurIndex; 134 135 // Given a struct type, recursively traverse the elements. 136 if (const StructType *STy = dyn_cast<StructType>(Ty)) { 137 for (StructType::element_iterator EB = STy->element_begin(), 138 EI = EB, 139 EE = STy->element_end(); 140 EI != EE; ++EI) { 141 if (Indices && *Indices == unsigned(EI - EB)) 142 return ComputeLinearIndex(TLI, *EI, Indices+1, IndicesEnd, CurIndex); 143 CurIndex = ComputeLinearIndex(TLI, *EI, 0, 0, CurIndex); 144 } 145 } 146 // Given an array type, recursively traverse the elements. 147 else if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 148 const Type *EltTy = ATy->getElementType(); 149 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) { 150 if (Indices && *Indices == i) 151 return ComputeLinearIndex(TLI, EltTy, Indices+1, IndicesEnd, CurIndex); 152 CurIndex = ComputeLinearIndex(TLI, EltTy, 0, 0, CurIndex); 153 } 154 } 155 // We haven't found the type we're looking for, so keep searching. 156 return CurIndex + 1; 157} 158 159/// ComputeValueVTs - Given an LLVM IR type, compute a sequence of 160/// MVTs that represent all the individual underlying 161/// non-aggregate types that comprise it. 162/// 163/// If Offsets is non-null, it points to a vector to be filled in 164/// with the in-memory offsets of each of the individual values. 165/// 166static void ComputeValueVTs(const TargetLowering &TLI, const Type *Ty, 167 SmallVectorImpl<MVT> &ValueVTs, 168 SmallVectorImpl<uint64_t> *Offsets = 0, 169 uint64_t StartingOffset = 0) { 170 // Given a struct type, recursively traverse the elements. 171 if (const StructType *STy = dyn_cast<StructType>(Ty)) { 172 const StructLayout *SL = TLI.getTargetData()->getStructLayout(STy); 173 for (StructType::element_iterator EB = STy->element_begin(), 174 EI = EB, 175 EE = STy->element_end(); 176 EI != EE; ++EI) 177 ComputeValueVTs(TLI, *EI, ValueVTs, Offsets, 178 StartingOffset + SL->getElementOffset(EI - EB)); 179 return; 180 } 181 // Given an array type, recursively traverse the elements. 182 if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 183 const Type *EltTy = ATy->getElementType(); 184 uint64_t EltSize = TLI.getTargetData()->getABITypeSize(EltTy); 185 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) 186 ComputeValueVTs(TLI, EltTy, ValueVTs, Offsets, 187 StartingOffset + i * EltSize); 188 return; 189 } 190 // Base case: we can get an MVT for this LLVM IR type. 191 ValueVTs.push_back(TLI.getValueType(Ty)); 192 if (Offsets) 193 Offsets->push_back(StartingOffset); 194} 195 196namespace { 197 /// RegsForValue - This struct represents the registers (physical or virtual) 198 /// that a particular set of values is assigned, and the type information about 199 /// the value. The most common situation is to represent one value at a time, 200 /// but struct or array values are handled element-wise as multiple values. 201 /// The splitting of aggregates is performed recursively, so that we never 202 /// have aggregate-typed registers. The values at this point do not necessarily 203 /// have legal types, so each value may require one or more registers of some 204 /// legal type. 205 /// 206 struct VISIBILITY_HIDDEN RegsForValue { 207 /// TLI - The TargetLowering object. 208 /// 209 const TargetLowering *TLI; 210 211 /// ValueVTs - The value types of the values, which may not be legal, and 212 /// may need be promoted or synthesized from one or more registers. 213 /// 214 SmallVector<MVT, 4> ValueVTs; 215 216 /// RegVTs - The value types of the registers. This is the same size as 217 /// ValueVTs and it records, for each value, what the type of the assigned 218 /// register or registers are. (Individual values are never synthesized 219 /// from more than one type of register.) 220 /// 221 /// With virtual registers, the contents of RegVTs is redundant with TLI's 222 /// getRegisterType member function, however when with physical registers 223 /// it is necessary to have a separate record of the types. 224 /// 225 SmallVector<MVT, 4> RegVTs; 226 227 /// Regs - This list holds the registers assigned to the values. 228 /// Each legal or promoted value requires one register, and each 229 /// expanded value requires multiple registers. 230 /// 231 SmallVector<unsigned, 4> Regs; 232 233 RegsForValue() : TLI(0) {} 234 235 RegsForValue(const TargetLowering &tli, 236 const SmallVector<unsigned, 4> ®s, 237 MVT regvt, MVT valuevt) 238 : TLI(&tli), ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs) {} 239 RegsForValue(const TargetLowering &tli, 240 const SmallVector<unsigned, 4> ®s, 241 const SmallVector<MVT, 4> ®vts, 242 const SmallVector<MVT, 4> &valuevts) 243 : TLI(&tli), ValueVTs(valuevts), RegVTs(regvts), Regs(regs) {} 244 RegsForValue(const TargetLowering &tli, 245 unsigned Reg, const Type *Ty) : TLI(&tli) { 246 ComputeValueVTs(tli, Ty, ValueVTs); 247 248 for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) { 249 MVT ValueVT = ValueVTs[Value]; 250 unsigned NumRegs = TLI->getNumRegisters(ValueVT); 251 MVT RegisterVT = TLI->getRegisterType(ValueVT); 252 for (unsigned i = 0; i != NumRegs; ++i) 253 Regs.push_back(Reg + i); 254 RegVTs.push_back(RegisterVT); 255 Reg += NumRegs; 256 } 257 } 258 259 /// append - Add the specified values to this one. 260 void append(const RegsForValue &RHS) { 261 TLI = RHS.TLI; 262 ValueVTs.append(RHS.ValueVTs.begin(), RHS.ValueVTs.end()); 263 RegVTs.append(RHS.RegVTs.begin(), RHS.RegVTs.end()); 264 Regs.append(RHS.Regs.begin(), RHS.Regs.end()); 265 } 266 267 268 /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from 269 /// this value and returns the result as a ValueVTs value. This uses 270 /// Chain/Flag as the input and updates them for the output Chain/Flag. 271 /// If the Flag pointer is NULL, no flag is used. 272 SDValue getCopyFromRegs(SelectionDAG &DAG, 273 SDValue &Chain, SDValue *Flag) const; 274 275 /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the 276 /// specified value into the registers specified by this object. This uses 277 /// Chain/Flag as the input and updates them for the output Chain/Flag. 278 /// If the Flag pointer is NULL, no flag is used. 279 void getCopyToRegs(SDValue Val, SelectionDAG &DAG, 280 SDValue &Chain, SDValue *Flag) const; 281 282 /// AddInlineAsmOperands - Add this value to the specified inlineasm node 283 /// operand list. This adds the code marker and includes the number of 284 /// values added into it. 285 void AddInlineAsmOperands(unsigned Code, SelectionDAG &DAG, 286 std::vector<SDValue> &Ops) const; 287 }; 288} 289 290namespace llvm { 291 //===--------------------------------------------------------------------===// 292 /// createDefaultScheduler - This creates an instruction scheduler appropriate 293 /// for the target. 294 ScheduleDAG* createDefaultScheduler(SelectionDAGISel *IS, 295 SelectionDAG *DAG, 296 MachineBasicBlock *BB, 297 bool Fast) { 298 TargetLowering &TLI = IS->getTargetLowering(); 299 300 if (TLI.getSchedulingPreference() == TargetLowering::SchedulingForLatency) { 301 return createTDListDAGScheduler(IS, DAG, BB, Fast); 302 } else { 303 assert(TLI.getSchedulingPreference() == 304 TargetLowering::SchedulingForRegPressure && "Unknown sched type!"); 305 return createBURRListDAGScheduler(IS, DAG, BB, Fast); 306 } 307 } 308 309 310 //===--------------------------------------------------------------------===// 311 /// FunctionLoweringInfo - This contains information that is global to a 312 /// function that is used when lowering a region of the function. 313 class FunctionLoweringInfo { 314 public: 315 TargetLowering &TLI; 316 Function *Fn; 317 MachineFunction *MF; 318 MachineRegisterInfo *RegInfo; 319 320 explicit FunctionLoweringInfo(TargetLowering &TLI); 321 322 /// set - Initialize this FunctionLoweringInfo with the given Function 323 /// and its associated MachineFunction. 324 /// 325 void set(Function &Fn, MachineFunction &MF); 326 327 /// MBBMap - A mapping from LLVM basic blocks to their machine code entry. 328 DenseMap<const BasicBlock*, MachineBasicBlock *> MBBMap; 329 330 /// ValueMap - Since we emit code for the function a basic block at a time, 331 /// we must remember which virtual registers hold the values for 332 /// cross-basic-block values. 333 DenseMap<const Value*, unsigned> ValueMap; 334 335 /// StaticAllocaMap - Keep track of frame indices for fixed sized allocas in 336 /// the entry block. This allows the allocas to be efficiently referenced 337 /// anywhere in the function. 338 DenseMap<const AllocaInst*, int> StaticAllocaMap; 339 340#ifndef NDEBUG 341 SmallSet<Instruction*, 8> CatchInfoLost; 342 SmallSet<Instruction*, 8> CatchInfoFound; 343#endif 344 345 unsigned MakeReg(MVT VT) { 346 return RegInfo->createVirtualRegister(TLI.getRegClassFor(VT)); 347 } 348 349 /// isExportedInst - Return true if the specified value is an instruction 350 /// exported from its block. 351 bool isExportedInst(const Value *V) { 352 return ValueMap.count(V); 353 } 354 355 unsigned CreateRegForValue(const Value *V); 356 357 unsigned InitializeRegForValue(const Value *V) { 358 unsigned &R = ValueMap[V]; 359 assert(R == 0 && "Already initialized this value register!"); 360 return R = CreateRegForValue(V); 361 } 362 363 struct LiveOutInfo { 364 unsigned NumSignBits; 365 APInt KnownOne, KnownZero; 366 LiveOutInfo() : NumSignBits(0) {} 367 }; 368 369 /// LiveOutRegInfo - Information about live out vregs, indexed by their 370 /// register number offset by 'FirstVirtualRegister'. 371 std::vector<LiveOutInfo> LiveOutRegInfo; 372 373 /// clear - Clear out all the function-specific state. This returns this 374 /// FunctionLoweringInfo to an empty state, ready to be used for a 375 /// different function. 376 void clear() { 377 MBBMap.clear(); 378 ValueMap.clear(); 379 StaticAllocaMap.clear(); 380#ifndef NDEBUG 381 CatchInfoLost.clear(); 382 CatchInfoFound.clear(); 383#endif 384 LiveOutRegInfo.clear(); 385 } 386 }; 387} 388 389/// isSelector - Return true if this instruction is a call to the 390/// eh.selector intrinsic. 391static bool isSelector(Instruction *I) { 392 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) 393 return (II->getIntrinsicID() == Intrinsic::eh_selector_i32 || 394 II->getIntrinsicID() == Intrinsic::eh_selector_i64); 395 return false; 396} 397 398/// isUsedOutsideOfDefiningBlock - Return true if this instruction is used by 399/// PHI nodes or outside of the basic block that defines it, or used by a 400/// switch or atomic instruction, which may expand to multiple basic blocks. 401static bool isUsedOutsideOfDefiningBlock(Instruction *I) { 402 if (isa<PHINode>(I)) return true; 403 BasicBlock *BB = I->getParent(); 404 for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E; ++UI) 405 if (cast<Instruction>(*UI)->getParent() != BB || isa<PHINode>(*UI) || 406 // FIXME: Remove switchinst special case. 407 isa<SwitchInst>(*UI)) 408 return true; 409 return false; 410} 411 412/// isOnlyUsedInEntryBlock - If the specified argument is only used in the 413/// entry block, return true. This includes arguments used by switches, since 414/// the switch may expand into multiple basic blocks. 415static bool isOnlyUsedInEntryBlock(Argument *A) { 416 // With FastISel active, we may be splitting blocks, so force creation 417 // of virtual registers for all non-dead arguments. 418 if (EnableFastISel) 419 return A->use_empty(); 420 421 BasicBlock *Entry = A->getParent()->begin(); 422 for (Value::use_iterator UI = A->use_begin(), E = A->use_end(); UI != E; ++UI) 423 if (cast<Instruction>(*UI)->getParent() != Entry || isa<SwitchInst>(*UI)) 424 return false; // Use not in entry block. 425 return true; 426} 427 428FunctionLoweringInfo::FunctionLoweringInfo(TargetLowering &tli) 429 : TLI(tli) { 430} 431 432void FunctionLoweringInfo::set(Function &fn, MachineFunction &mf) { 433 Fn = &fn; 434 MF = &mf; 435 RegInfo = &MF->getRegInfo(); 436 437 // Create a vreg for each argument register that is not dead and is used 438 // outside of the entry block for the function. 439 for (Function::arg_iterator AI = Fn->arg_begin(), E = Fn->arg_end(); 440 AI != E; ++AI) 441 if (!isOnlyUsedInEntryBlock(AI)) 442 InitializeRegForValue(AI); 443 444 // Initialize the mapping of values to registers. This is only set up for 445 // instruction values that are used outside of the block that defines 446 // them. 447 Function::iterator BB = Fn->begin(), EB = Fn->end(); 448 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) 449 if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) 450 if (ConstantInt *CUI = dyn_cast<ConstantInt>(AI->getArraySize())) { 451 const Type *Ty = AI->getAllocatedType(); 452 uint64_t TySize = TLI.getTargetData()->getABITypeSize(Ty); 453 unsigned Align = 454 std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty), 455 AI->getAlignment()); 456 457 TySize *= CUI->getZExtValue(); // Get total allocated size. 458 if (TySize == 0) TySize = 1; // Don't create zero-sized stack objects. 459 StaticAllocaMap[AI] = 460 MF->getFrameInfo()->CreateStackObject(TySize, Align); 461 } 462 463 for (; BB != EB; ++BB) 464 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) 465 if (!I->use_empty() && isUsedOutsideOfDefiningBlock(I)) 466 if (!isa<AllocaInst>(I) || 467 !StaticAllocaMap.count(cast<AllocaInst>(I))) 468 InitializeRegForValue(I); 469 470 // Create an initial MachineBasicBlock for each LLVM BasicBlock in F. This 471 // also creates the initial PHI MachineInstrs, though none of the input 472 // operands are populated. 473 for (BB = Fn->begin(), EB = Fn->end(); BB != EB; ++BB) { 474 MachineBasicBlock *MBB = mf.CreateMachineBasicBlock(BB); 475 MBBMap[BB] = MBB; 476 MF->push_back(MBB); 477 478 // Create Machine PHI nodes for LLVM PHI nodes, lowering them as 479 // appropriate. 480 PHINode *PN; 481 for (BasicBlock::iterator I = BB->begin();(PN = dyn_cast<PHINode>(I)); ++I){ 482 if (PN->use_empty()) continue; 483 484 unsigned PHIReg = ValueMap[PN]; 485 assert(PHIReg && "PHI node does not have an assigned virtual register!"); 486 487 SmallVector<MVT, 4> ValueVTs; 488 ComputeValueVTs(TLI, PN->getType(), ValueVTs); 489 for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) { 490 MVT VT = ValueVTs[vti]; 491 unsigned NumRegisters = TLI.getNumRegisters(VT); 492 const TargetInstrInfo *TII = TLI.getTargetMachine().getInstrInfo(); 493 for (unsigned i = 0; i != NumRegisters; ++i) 494 BuildMI(MBB, TII->get(TargetInstrInfo::PHI), PHIReg+i); 495 PHIReg += NumRegisters; 496 } 497 } 498 } 499} 500 501/// CreateRegForValue - Allocate the appropriate number of virtual registers of 502/// the correctly promoted or expanded types. Assign these registers 503/// consecutive vreg numbers and return the first assigned number. 504/// 505/// In the case that the given value has struct or array type, this function 506/// will assign registers for each member or element. 507/// 508unsigned FunctionLoweringInfo::CreateRegForValue(const Value *V) { 509 SmallVector<MVT, 4> ValueVTs; 510 ComputeValueVTs(TLI, V->getType(), ValueVTs); 511 512 unsigned FirstReg = 0; 513 for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) { 514 MVT ValueVT = ValueVTs[Value]; 515 MVT RegisterVT = TLI.getRegisterType(ValueVT); 516 517 unsigned NumRegs = TLI.getNumRegisters(ValueVT); 518 for (unsigned i = 0; i != NumRegs; ++i) { 519 unsigned R = MakeReg(RegisterVT); 520 if (!FirstReg) FirstReg = R; 521 } 522 } 523 return FirstReg; 524} 525 526//===----------------------------------------------------------------------===// 527/// SelectionDAGLowering - This is the common target-independent lowering 528/// implementation that is parameterized by a TargetLowering object. 529/// Also, targets can overload any lowering method. 530/// 531namespace llvm { 532class SelectionDAGLowering { 533 MachineBasicBlock *CurMBB; 534 535 DenseMap<const Value*, SDValue> NodeMap; 536 537 /// PendingLoads - Loads are not emitted to the program immediately. We bunch 538 /// them up and then emit token factor nodes when possible. This allows us to 539 /// get simple disambiguation between loads without worrying about alias 540 /// analysis. 541 SmallVector<SDValue, 8> PendingLoads; 542 543 /// PendingExports - CopyToReg nodes that copy values to virtual registers 544 /// for export to other blocks need to be emitted before any terminator 545 /// instruction, but they have no other ordering requirements. We bunch them 546 /// up and the emit a single tokenfactor for them just before terminator 547 /// instructions. 548 SmallVector<SDValue, 8> PendingExports; 549 550 /// Case - A struct to record the Value for a switch case, and the 551 /// case's target basic block. 552 struct Case { 553 Constant* Low; 554 Constant* High; 555 MachineBasicBlock* BB; 556 557 Case() : Low(0), High(0), BB(0) { } 558 Case(Constant* low, Constant* high, MachineBasicBlock* bb) : 559 Low(low), High(high), BB(bb) { } 560 uint64_t size() const { 561 uint64_t rHigh = cast<ConstantInt>(High)->getSExtValue(); 562 uint64_t rLow = cast<ConstantInt>(Low)->getSExtValue(); 563 return (rHigh - rLow + 1ULL); 564 } 565 }; 566 567 struct CaseBits { 568 uint64_t Mask; 569 MachineBasicBlock* BB; 570 unsigned Bits; 571 572 CaseBits(uint64_t mask, MachineBasicBlock* bb, unsigned bits): 573 Mask(mask), BB(bb), Bits(bits) { } 574 }; 575 576 typedef std::vector<Case> CaseVector; 577 typedef std::vector<CaseBits> CaseBitsVector; 578 typedef CaseVector::iterator CaseItr; 579 typedef std::pair<CaseItr, CaseItr> CaseRange; 580 581 /// CaseRec - A struct with ctor used in lowering switches to a binary tree 582 /// of conditional branches. 583 struct CaseRec { 584 CaseRec(MachineBasicBlock *bb, Constant *lt, Constant *ge, CaseRange r) : 585 CaseBB(bb), LT(lt), GE(ge), Range(r) {} 586 587 /// CaseBB - The MBB in which to emit the compare and branch 588 MachineBasicBlock *CaseBB; 589 /// LT, GE - If nonzero, we know the current case value must be less-than or 590 /// greater-than-or-equal-to these Constants. 591 Constant *LT; 592 Constant *GE; 593 /// Range - A pair of iterators representing the range of case values to be 594 /// processed at this point in the binary search tree. 595 CaseRange Range; 596 }; 597 598 typedef std::vector<CaseRec> CaseRecVector; 599 600 /// The comparison function for sorting the switch case values in the vector. 601 /// WARNING: Case ranges should be disjoint! 602 struct CaseCmp { 603 bool operator () (const Case& C1, const Case& C2) { 604 assert(isa<ConstantInt>(C1.Low) && isa<ConstantInt>(C2.High)); 605 const ConstantInt* CI1 = cast<const ConstantInt>(C1.Low); 606 const ConstantInt* CI2 = cast<const ConstantInt>(C2.High); 607 return CI1->getValue().slt(CI2->getValue()); 608 } 609 }; 610 611 struct CaseBitsCmp { 612 bool operator () (const CaseBits& C1, const CaseBits& C2) { 613 return C1.Bits > C2.Bits; 614 } 615 }; 616 617 unsigned Clusterify(CaseVector& Cases, const SwitchInst &SI); 618 619 /// CaseBlock - This structure is used to communicate between SDLowering and 620 /// SDISel for the code generation of additional basic blocks needed by multi- 621 /// case switch statements. 622 struct CaseBlock { 623 CaseBlock(ISD::CondCode cc, Value *cmplhs, Value *cmprhs, Value *cmpmiddle, 624 MachineBasicBlock *truebb, MachineBasicBlock *falsebb, 625 MachineBasicBlock *me) 626 : CC(cc), CmpLHS(cmplhs), CmpMHS(cmpmiddle), CmpRHS(cmprhs), 627 TrueBB(truebb), FalseBB(falsebb), ThisBB(me) {} 628 // CC - the condition code to use for the case block's setcc node 629 ISD::CondCode CC; 630 // CmpLHS/CmpRHS/CmpMHS - The LHS/MHS/RHS of the comparison to emit. 631 // Emit by default LHS op RHS. MHS is used for range comparisons: 632 // If MHS is not null: (LHS <= MHS) and (MHS <= RHS). 633 Value *CmpLHS, *CmpMHS, *CmpRHS; 634 // TrueBB/FalseBB - the block to branch to if the setcc is true/false. 635 MachineBasicBlock *TrueBB, *FalseBB; 636 // ThisBB - the block into which to emit the code for the setcc and branches 637 MachineBasicBlock *ThisBB; 638 }; 639 struct JumpTable { 640 JumpTable(unsigned R, unsigned J, MachineBasicBlock *M, 641 MachineBasicBlock *D): Reg(R), JTI(J), MBB(M), Default(D) {} 642 643 /// Reg - the virtual register containing the index of the jump table entry 644 //. to jump to. 645 unsigned Reg; 646 /// JTI - the JumpTableIndex for this jump table in the function. 647 unsigned JTI; 648 /// MBB - the MBB into which to emit the code for the indirect jump. 649 MachineBasicBlock *MBB; 650 /// Default - the MBB of the default bb, which is a successor of the range 651 /// check MBB. This is when updating PHI nodes in successors. 652 MachineBasicBlock *Default; 653 }; 654 struct JumpTableHeader { 655 JumpTableHeader(uint64_t F, uint64_t L, Value* SV, MachineBasicBlock* H, 656 bool E = false): 657 First(F), Last(L), SValue(SV), HeaderBB(H), Emitted(E) {} 658 uint64_t First; 659 uint64_t Last; 660 Value *SValue; 661 MachineBasicBlock *HeaderBB; 662 bool Emitted; 663 }; 664 typedef std::pair<JumpTableHeader, JumpTable> JumpTableBlock; 665 666 struct BitTestCase { 667 BitTestCase(uint64_t M, MachineBasicBlock* T, MachineBasicBlock* Tr): 668 Mask(M), ThisBB(T), TargetBB(Tr) { } 669 uint64_t Mask; 670 MachineBasicBlock* ThisBB; 671 MachineBasicBlock* TargetBB; 672 }; 673 674 typedef SmallVector<BitTestCase, 3> BitTestInfo; 675 676 struct BitTestBlock { 677 BitTestBlock(uint64_t F, uint64_t R, Value* SV, 678 unsigned Rg, bool E, 679 MachineBasicBlock* P, MachineBasicBlock* D, 680 const BitTestInfo& C): 681 First(F), Range(R), SValue(SV), Reg(Rg), Emitted(E), 682 Parent(P), Default(D), Cases(C) { } 683 uint64_t First; 684 uint64_t Range; 685 Value *SValue; 686 unsigned Reg; 687 bool Emitted; 688 MachineBasicBlock *Parent; 689 MachineBasicBlock *Default; 690 BitTestInfo Cases; 691 }; 692 693public: 694 // TLI - This is information that describes the available target features we 695 // need for lowering. This indicates when operations are unavailable, 696 // implemented with a libcall, etc. 697 TargetLowering &TLI; 698 SelectionDAG &DAG; 699 const TargetData *TD; 700 AliasAnalysis *AA; 701 702 /// SwitchCases - Vector of CaseBlock structures used to communicate 703 /// SwitchInst code generation information. 704 std::vector<CaseBlock> SwitchCases; 705 /// JTCases - Vector of JumpTable structures used to communicate 706 /// SwitchInst code generation information. 707 std::vector<JumpTableBlock> JTCases; 708 /// BitTestCases - Vector of BitTestBlock structures used to communicate 709 /// SwitchInst code generation information. 710 std::vector<BitTestBlock> BitTestCases; 711 712 std::vector<std::pair<MachineInstr*, unsigned> > PHINodesToUpdate; 713 714 // Emit PHI-node-operand constants only once even if used by multiple 715 // PHI nodes. 716 DenseMap<Constant*, unsigned> ConstantsOut; 717 718 /// FuncInfo - Information about the function as a whole. 719 /// 720 FunctionLoweringInfo &FuncInfo; 721 722 /// GFI - Garbage collection metadata for the function. 723 GCFunctionInfo *GFI; 724 725 SelectionDAGLowering(SelectionDAG &dag, TargetLowering &tli, 726 FunctionLoweringInfo &funcinfo) 727 : TLI(tli), DAG(dag), FuncInfo(funcinfo) { 728 } 729 730 void init(GCFunctionInfo *gfi, AliasAnalysis &aa) { 731 AA = &aa; 732 GFI = gfi; 733 TD = DAG.getTarget().getTargetData(); 734 } 735 736 /// clear - Clear out the curret SelectionDAG and the associated 737 /// state and prepare this SelectionDAGLowering object to be used 738 /// for a new block. This doesn't clear out information about 739 /// additional blocks that are needed to complete switch lowering 740 /// or PHI node updating; that information is cleared out as it is 741 /// consumed. 742 void clear() { 743 NodeMap.clear(); 744 PendingLoads.clear(); 745 PendingExports.clear(); 746 DAG.clear(); 747 } 748 749 /// getRoot - Return the current virtual root of the Selection DAG, 750 /// flushing any PendingLoad items. This must be done before emitting 751 /// a store or any other node that may need to be ordered after any 752 /// prior load instructions. 753 /// 754 SDValue getRoot() { 755 if (PendingLoads.empty()) 756 return DAG.getRoot(); 757 758 if (PendingLoads.size() == 1) { 759 SDValue Root = PendingLoads[0]; 760 DAG.setRoot(Root); 761 PendingLoads.clear(); 762 return Root; 763 } 764 765 // Otherwise, we have to make a token factor node. 766 SDValue Root = DAG.getNode(ISD::TokenFactor, MVT::Other, 767 &PendingLoads[0], PendingLoads.size()); 768 PendingLoads.clear(); 769 DAG.setRoot(Root); 770 return Root; 771 } 772 773 /// getControlRoot - Similar to getRoot, but instead of flushing all the 774 /// PendingLoad items, flush all the PendingExports items. It is necessary 775 /// to do this before emitting a terminator instruction. 776 /// 777 SDValue getControlRoot() { 778 SDValue Root = DAG.getRoot(); 779 780 if (PendingExports.empty()) 781 return Root; 782 783 // Turn all of the CopyToReg chains into one factored node. 784 if (Root.getOpcode() != ISD::EntryToken) { 785 unsigned i = 0, e = PendingExports.size(); 786 for (; i != e; ++i) { 787 assert(PendingExports[i].getNode()->getNumOperands() > 1); 788 if (PendingExports[i].getNode()->getOperand(0) == Root) 789 break; // Don't add the root if we already indirectly depend on it. 790 } 791 792 if (i == e) 793 PendingExports.push_back(Root); 794 } 795 796 Root = DAG.getNode(ISD::TokenFactor, MVT::Other, 797 &PendingExports[0], 798 PendingExports.size()); 799 PendingExports.clear(); 800 DAG.setRoot(Root); 801 return Root; 802 } 803 804 void CopyValueToVirtualRegister(Value *V, unsigned Reg); 805 806 void visit(Instruction &I) { visit(I.getOpcode(), I); } 807 808 void visit(unsigned Opcode, User &I) { 809 // Note: this doesn't use InstVisitor, because it has to work with 810 // ConstantExpr's in addition to instructions. 811 switch (Opcode) { 812 default: assert(0 && "Unknown instruction type encountered!"); 813 abort(); 814 // Build the switch statement using the Instruction.def file. 815#define HANDLE_INST(NUM, OPCODE, CLASS) \ 816 case Instruction::OPCODE:return visit##OPCODE((CLASS&)I); 817#include "llvm/Instruction.def" 818 } 819 } 820 821 void setCurrentBasicBlock(MachineBasicBlock *MBB) { CurMBB = MBB; } 822 823 SDValue getValue(const Value *V); 824 825 void setValue(const Value *V, SDValue NewN) { 826 SDValue &N = NodeMap[V]; 827 assert(N.getNode() == 0 && "Already set a value for this node!"); 828 N = NewN; 829 } 830 831 void GetRegistersForValue(SDISelAsmOperandInfo &OpInfo, bool HasEarlyClobber, 832 std::set<unsigned> &OutputRegs, 833 std::set<unsigned> &InputRegs); 834 835 void FindMergedConditions(Value *Cond, MachineBasicBlock *TBB, 836 MachineBasicBlock *FBB, MachineBasicBlock *CurBB, 837 unsigned Opc); 838 bool ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases); 839 bool isExportableFromCurrentBlock(Value *V, const BasicBlock *FromBB); 840 void ExportFromCurrentBlock(Value *V); 841 void LowerCallTo(CallSite CS, SDValue Callee, bool IsTailCall, 842 MachineBasicBlock *LandingPad = NULL); 843 844 // Terminator instructions. 845 void visitRet(ReturnInst &I); 846 void visitBr(BranchInst &I); 847 void visitSwitch(SwitchInst &I); 848 void visitUnreachable(UnreachableInst &I) { /* noop */ } 849 850 // Helpers for visitSwitch 851 bool handleSmallSwitchRange(CaseRec& CR, 852 CaseRecVector& WorkList, 853 Value* SV, 854 MachineBasicBlock* Default); 855 bool handleJTSwitchCase(CaseRec& CR, 856 CaseRecVector& WorkList, 857 Value* SV, 858 MachineBasicBlock* Default); 859 bool handleBTSplitSwitchCase(CaseRec& CR, 860 CaseRecVector& WorkList, 861 Value* SV, 862 MachineBasicBlock* Default); 863 bool handleBitTestsSwitchCase(CaseRec& CR, 864 CaseRecVector& WorkList, 865 Value* SV, 866 MachineBasicBlock* Default); 867 void visitSwitchCase(CaseBlock &CB); 868 void visitBitTestHeader(BitTestBlock &B); 869 void visitBitTestCase(MachineBasicBlock* NextMBB, 870 unsigned Reg, 871 BitTestCase &B); 872 void visitJumpTable(JumpTable &JT); 873 void visitJumpTableHeader(JumpTable &JT, JumpTableHeader &JTH); 874 875 // These all get lowered before this pass. 876 void visitInvoke(InvokeInst &I); 877 void visitUnwind(UnwindInst &I); 878 879 void visitBinary(User &I, unsigned OpCode); 880 void visitShift(User &I, unsigned Opcode); 881 void visitAdd(User &I) { 882 if (I.getType()->isFPOrFPVector()) 883 visitBinary(I, ISD::FADD); 884 else 885 visitBinary(I, ISD::ADD); 886 } 887 void visitSub(User &I); 888 void visitMul(User &I) { 889 if (I.getType()->isFPOrFPVector()) 890 visitBinary(I, ISD::FMUL); 891 else 892 visitBinary(I, ISD::MUL); 893 } 894 void visitURem(User &I) { visitBinary(I, ISD::UREM); } 895 void visitSRem(User &I) { visitBinary(I, ISD::SREM); } 896 void visitFRem(User &I) { visitBinary(I, ISD::FREM); } 897 void visitUDiv(User &I) { visitBinary(I, ISD::UDIV); } 898 void visitSDiv(User &I) { visitBinary(I, ISD::SDIV); } 899 void visitFDiv(User &I) { visitBinary(I, ISD::FDIV); } 900 void visitAnd (User &I) { visitBinary(I, ISD::AND); } 901 void visitOr (User &I) { visitBinary(I, ISD::OR); } 902 void visitXor (User &I) { visitBinary(I, ISD::XOR); } 903 void visitShl (User &I) { visitShift(I, ISD::SHL); } 904 void visitLShr(User &I) { visitShift(I, ISD::SRL); } 905 void visitAShr(User &I) { visitShift(I, ISD::SRA); } 906 void visitICmp(User &I); 907 void visitFCmp(User &I); 908 void visitVICmp(User &I); 909 void visitVFCmp(User &I); 910 // Visit the conversion instructions 911 void visitTrunc(User &I); 912 void visitZExt(User &I); 913 void visitSExt(User &I); 914 void visitFPTrunc(User &I); 915 void visitFPExt(User &I); 916 void visitFPToUI(User &I); 917 void visitFPToSI(User &I); 918 void visitUIToFP(User &I); 919 void visitSIToFP(User &I); 920 void visitPtrToInt(User &I); 921 void visitIntToPtr(User &I); 922 void visitBitCast(User &I); 923 924 void visitExtractElement(User &I); 925 void visitInsertElement(User &I); 926 void visitShuffleVector(User &I); 927 928 void visitExtractValue(ExtractValueInst &I); 929 void visitInsertValue(InsertValueInst &I); 930 931 void visitGetElementPtr(User &I); 932 void visitSelect(User &I); 933 934 void visitMalloc(MallocInst &I); 935 void visitFree(FreeInst &I); 936 void visitAlloca(AllocaInst &I); 937 void visitLoad(LoadInst &I); 938 void visitStore(StoreInst &I); 939 void visitPHI(PHINode &I) { } // PHI nodes are handled specially. 940 void visitCall(CallInst &I); 941 void visitInlineAsm(CallSite CS); 942 const char *visitIntrinsicCall(CallInst &I, unsigned Intrinsic); 943 void visitTargetIntrinsic(CallInst &I, unsigned Intrinsic); 944 945 void visitVAStart(CallInst &I); 946 void visitVAArg(VAArgInst &I); 947 void visitVAEnd(CallInst &I); 948 void visitVACopy(CallInst &I); 949 950 void visitUserOp1(Instruction &I) { 951 assert(0 && "UserOp1 should not exist at instruction selection time!"); 952 abort(); 953 } 954 void visitUserOp2(Instruction &I) { 955 assert(0 && "UserOp2 should not exist at instruction selection time!"); 956 abort(); 957 } 958 959private: 960 inline const char *implVisitBinaryAtomic(CallInst& I, ISD::NodeType Op); 961 962}; 963} // end namespace llvm 964 965 966/// getCopyFromParts - Create a value that contains the specified legal parts 967/// combined into the value they represent. If the parts combine to a type 968/// larger then ValueVT then AssertOp can be used to specify whether the extra 969/// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT 970/// (ISD::AssertSext). 971static SDValue getCopyFromParts(SelectionDAG &DAG, 972 const SDValue *Parts, 973 unsigned NumParts, 974 MVT PartVT, 975 MVT ValueVT, 976 ISD::NodeType AssertOp = ISD::DELETED_NODE) { 977 assert(NumParts > 0 && "No parts to assemble!"); 978 TargetLowering &TLI = DAG.getTargetLoweringInfo(); 979 SDValue Val = Parts[0]; 980 981 if (NumParts > 1) { 982 // Assemble the value from multiple parts. 983 if (!ValueVT.isVector()) { 984 unsigned PartBits = PartVT.getSizeInBits(); 985 unsigned ValueBits = ValueVT.getSizeInBits(); 986 987 // Assemble the power of 2 part. 988 unsigned RoundParts = NumParts & (NumParts - 1) ? 989 1 << Log2_32(NumParts) : NumParts; 990 unsigned RoundBits = PartBits * RoundParts; 991 MVT RoundVT = RoundBits == ValueBits ? 992 ValueVT : MVT::getIntegerVT(RoundBits); 993 SDValue Lo, Hi; 994 995 if (RoundParts > 2) { 996 MVT HalfVT = MVT::getIntegerVT(RoundBits/2); 997 Lo = getCopyFromParts(DAG, Parts, RoundParts/2, PartVT, HalfVT); 998 Hi = getCopyFromParts(DAG, Parts+RoundParts/2, RoundParts/2, 999 PartVT, HalfVT); 1000 } else { 1001 Lo = Parts[0]; 1002 Hi = Parts[1]; 1003 } 1004 if (TLI.isBigEndian()) 1005 std::swap(Lo, Hi); 1006 Val = DAG.getNode(ISD::BUILD_PAIR, RoundVT, Lo, Hi); 1007 1008 if (RoundParts < NumParts) { 1009 // Assemble the trailing non-power-of-2 part. 1010 unsigned OddParts = NumParts - RoundParts; 1011 MVT OddVT = MVT::getIntegerVT(OddParts * PartBits); 1012 Hi = getCopyFromParts(DAG, Parts+RoundParts, OddParts, PartVT, OddVT); 1013 1014 // Combine the round and odd parts. 1015 Lo = Val; 1016 if (TLI.isBigEndian()) 1017 std::swap(Lo, Hi); 1018 MVT TotalVT = MVT::getIntegerVT(NumParts * PartBits); 1019 Hi = DAG.getNode(ISD::ANY_EXTEND, TotalVT, Hi); 1020 Hi = DAG.getNode(ISD::SHL, TotalVT, Hi, 1021 DAG.getConstant(Lo.getValueType().getSizeInBits(), 1022 TLI.getShiftAmountTy())); 1023 Lo = DAG.getNode(ISD::ZERO_EXTEND, TotalVT, Lo); 1024 Val = DAG.getNode(ISD::OR, TotalVT, Lo, Hi); 1025 } 1026 } else { 1027 // Handle a multi-element vector. 1028 MVT IntermediateVT, RegisterVT; 1029 unsigned NumIntermediates; 1030 unsigned NumRegs = 1031 TLI.getVectorTypeBreakdown(ValueVT, IntermediateVT, NumIntermediates, 1032 RegisterVT); 1033 assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!"); 1034 NumParts = NumRegs; // Silence a compiler warning. 1035 assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!"); 1036 assert(RegisterVT == Parts[0].getValueType() && 1037 "Part type doesn't match part!"); 1038 1039 // Assemble the parts into intermediate operands. 1040 SmallVector<SDValue, 8> Ops(NumIntermediates); 1041 if (NumIntermediates == NumParts) { 1042 // If the register was not expanded, truncate or copy the value, 1043 // as appropriate. 1044 for (unsigned i = 0; i != NumParts; ++i) 1045 Ops[i] = getCopyFromParts(DAG, &Parts[i], 1, 1046 PartVT, IntermediateVT); 1047 } else if (NumParts > 0) { 1048 // If the intermediate type was expanded, build the intermediate operands 1049 // from the parts. 1050 assert(NumParts % NumIntermediates == 0 && 1051 "Must expand into a divisible number of parts!"); 1052 unsigned Factor = NumParts / NumIntermediates; 1053 for (unsigned i = 0; i != NumIntermediates; ++i) 1054 Ops[i] = getCopyFromParts(DAG, &Parts[i * Factor], Factor, 1055 PartVT, IntermediateVT); 1056 } 1057 1058 // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the intermediate 1059 // operands. 1060 Val = DAG.getNode(IntermediateVT.isVector() ? 1061 ISD::CONCAT_VECTORS : ISD::BUILD_VECTOR, 1062 ValueVT, &Ops[0], NumIntermediates); 1063 } 1064 } 1065 1066 // There is now one part, held in Val. Correct it to match ValueVT. 1067 PartVT = Val.getValueType(); 1068 1069 if (PartVT == ValueVT) 1070 return Val; 1071 1072 if (PartVT.isVector()) { 1073 assert(ValueVT.isVector() && "Unknown vector conversion!"); 1074 return DAG.getNode(ISD::BIT_CONVERT, ValueVT, Val); 1075 } 1076 1077 if (ValueVT.isVector()) { 1078 assert(ValueVT.getVectorElementType() == PartVT && 1079 ValueVT.getVectorNumElements() == 1 && 1080 "Only trivial scalar-to-vector conversions should get here!"); 1081 return DAG.getNode(ISD::BUILD_VECTOR, ValueVT, Val); 1082 } 1083 1084 if (PartVT.isInteger() && 1085 ValueVT.isInteger()) { 1086 if (ValueVT.bitsLT(PartVT)) { 1087 // For a truncate, see if we have any information to 1088 // indicate whether the truncated bits will always be 1089 // zero or sign-extension. 1090 if (AssertOp != ISD::DELETED_NODE) 1091 Val = DAG.getNode(AssertOp, PartVT, Val, 1092 DAG.getValueType(ValueVT)); 1093 return DAG.getNode(ISD::TRUNCATE, ValueVT, Val); 1094 } else { 1095 return DAG.getNode(ISD::ANY_EXTEND, ValueVT, Val); 1096 } 1097 } 1098 1099 if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) { 1100 if (ValueVT.bitsLT(Val.getValueType())) 1101 // FP_ROUND's are always exact here. 1102 return DAG.getNode(ISD::FP_ROUND, ValueVT, Val, 1103 DAG.getIntPtrConstant(1)); 1104 return DAG.getNode(ISD::FP_EXTEND, ValueVT, Val); 1105 } 1106 1107 if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) 1108 return DAG.getNode(ISD::BIT_CONVERT, ValueVT, Val); 1109 1110 assert(0 && "Unknown mismatch!"); 1111 return SDValue(); 1112} 1113 1114/// getCopyToParts - Create a series of nodes that contain the specified value 1115/// split into legal parts. If the parts contain more bits than Val, then, for 1116/// integers, ExtendKind can be used to specify how to generate the extra bits. 1117static void getCopyToParts(SelectionDAG &DAG, 1118 SDValue Val, 1119 SDValue *Parts, 1120 unsigned NumParts, 1121 MVT PartVT, 1122 ISD::NodeType ExtendKind = ISD::ANY_EXTEND) { 1123 TargetLowering &TLI = DAG.getTargetLoweringInfo(); 1124 MVT PtrVT = TLI.getPointerTy(); 1125 MVT ValueVT = Val.getValueType(); 1126 unsigned PartBits = PartVT.getSizeInBits(); 1127 assert(TLI.isTypeLegal(PartVT) && "Copying to an illegal type!"); 1128 1129 if (!NumParts) 1130 return; 1131 1132 if (!ValueVT.isVector()) { 1133 if (PartVT == ValueVT) { 1134 assert(NumParts == 1 && "No-op copy with multiple parts!"); 1135 Parts[0] = Val; 1136 return; 1137 } 1138 1139 if (NumParts * PartBits > ValueVT.getSizeInBits()) { 1140 // If the parts cover more bits than the value has, promote the value. 1141 if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) { 1142 assert(NumParts == 1 && "Do not know what to promote to!"); 1143 Val = DAG.getNode(ISD::FP_EXTEND, PartVT, Val); 1144 } else if (PartVT.isInteger() && ValueVT.isInteger()) { 1145 ValueVT = MVT::getIntegerVT(NumParts * PartBits); 1146 Val = DAG.getNode(ExtendKind, ValueVT, Val); 1147 } else { 1148 assert(0 && "Unknown mismatch!"); 1149 } 1150 } else if (PartBits == ValueVT.getSizeInBits()) { 1151 // Different types of the same size. 1152 assert(NumParts == 1 && PartVT != ValueVT); 1153 Val = DAG.getNode(ISD::BIT_CONVERT, PartVT, Val); 1154 } else if (NumParts * PartBits < ValueVT.getSizeInBits()) { 1155 // If the parts cover less bits than value has, truncate the value. 1156 if (PartVT.isInteger() && ValueVT.isInteger()) { 1157 ValueVT = MVT::getIntegerVT(NumParts * PartBits); 1158 Val = DAG.getNode(ISD::TRUNCATE, ValueVT, Val); 1159 } else { 1160 assert(0 && "Unknown mismatch!"); 1161 } 1162 } 1163 1164 // The value may have changed - recompute ValueVT. 1165 ValueVT = Val.getValueType(); 1166 assert(NumParts * PartBits == ValueVT.getSizeInBits() && 1167 "Failed to tile the value with PartVT!"); 1168 1169 if (NumParts == 1) { 1170 assert(PartVT == ValueVT && "Type conversion failed!"); 1171 Parts[0] = Val; 1172 return; 1173 } 1174 1175 // Expand the value into multiple parts. 1176 if (NumParts & (NumParts - 1)) { 1177 // The number of parts is not a power of 2. Split off and copy the tail. 1178 assert(PartVT.isInteger() && ValueVT.isInteger() && 1179 "Do not know what to expand to!"); 1180 unsigned RoundParts = 1 << Log2_32(NumParts); 1181 unsigned RoundBits = RoundParts * PartBits; 1182 unsigned OddParts = NumParts - RoundParts; 1183 SDValue OddVal = DAG.getNode(ISD::SRL, ValueVT, Val, 1184 DAG.getConstant(RoundBits, 1185 TLI.getShiftAmountTy())); 1186 getCopyToParts(DAG, OddVal, Parts + RoundParts, OddParts, PartVT); 1187 if (TLI.isBigEndian()) 1188 // The odd parts were reversed by getCopyToParts - unreverse them. 1189 std::reverse(Parts + RoundParts, Parts + NumParts); 1190 NumParts = RoundParts; 1191 ValueVT = MVT::getIntegerVT(NumParts * PartBits); 1192 Val = DAG.getNode(ISD::TRUNCATE, ValueVT, Val); 1193 } 1194 1195 // The number of parts is a power of 2. Repeatedly bisect the value using 1196 // EXTRACT_ELEMENT. 1197 Parts[0] = DAG.getNode(ISD::BIT_CONVERT, 1198 MVT::getIntegerVT(ValueVT.getSizeInBits()), 1199 Val); 1200 for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) { 1201 for (unsigned i = 0; i < NumParts; i += StepSize) { 1202 unsigned ThisBits = StepSize * PartBits / 2; 1203 MVT ThisVT = MVT::getIntegerVT (ThisBits); 1204 SDValue &Part0 = Parts[i]; 1205 SDValue &Part1 = Parts[i+StepSize/2]; 1206 1207 Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, ThisVT, Part0, 1208 DAG.getConstant(1, PtrVT)); 1209 Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, ThisVT, Part0, 1210 DAG.getConstant(0, PtrVT)); 1211 1212 if (ThisBits == PartBits && ThisVT != PartVT) { 1213 Part0 = DAG.getNode(ISD::BIT_CONVERT, PartVT, Part0); 1214 Part1 = DAG.getNode(ISD::BIT_CONVERT, PartVT, Part1); 1215 } 1216 } 1217 } 1218 1219 if (TLI.isBigEndian()) 1220 std::reverse(Parts, Parts + NumParts); 1221 1222 return; 1223 } 1224 1225 // Vector ValueVT. 1226 if (NumParts == 1) { 1227 if (PartVT != ValueVT) { 1228 if (PartVT.isVector()) { 1229 Val = DAG.getNode(ISD::BIT_CONVERT, PartVT, Val); 1230 } else { 1231 assert(ValueVT.getVectorElementType() == PartVT && 1232 ValueVT.getVectorNumElements() == 1 && 1233 "Only trivial vector-to-scalar conversions should get here!"); 1234 Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, PartVT, Val, 1235 DAG.getConstant(0, PtrVT)); 1236 } 1237 } 1238 1239 Parts[0] = Val; 1240 return; 1241 } 1242 1243 // Handle a multi-element vector. 1244 MVT IntermediateVT, RegisterVT; 1245 unsigned NumIntermediates; 1246 unsigned NumRegs = 1247 DAG.getTargetLoweringInfo() 1248 .getVectorTypeBreakdown(ValueVT, IntermediateVT, NumIntermediates, 1249 RegisterVT); 1250 unsigned NumElements = ValueVT.getVectorNumElements(); 1251 1252 assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!"); 1253 NumParts = NumRegs; // Silence a compiler warning. 1254 assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!"); 1255 1256 // Split the vector into intermediate operands. 1257 SmallVector<SDValue, 8> Ops(NumIntermediates); 1258 for (unsigned i = 0; i != NumIntermediates; ++i) 1259 if (IntermediateVT.isVector()) 1260 Ops[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, 1261 IntermediateVT, Val, 1262 DAG.getConstant(i * (NumElements / NumIntermediates), 1263 PtrVT)); 1264 else 1265 Ops[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, 1266 IntermediateVT, Val, 1267 DAG.getConstant(i, PtrVT)); 1268 1269 // Split the intermediate operands into legal parts. 1270 if (NumParts == NumIntermediates) { 1271 // If the register was not expanded, promote or copy the value, 1272 // as appropriate. 1273 for (unsigned i = 0; i != NumParts; ++i) 1274 getCopyToParts(DAG, Ops[i], &Parts[i], 1, PartVT); 1275 } else if (NumParts > 0) { 1276 // If the intermediate type was expanded, split each the value into 1277 // legal parts. 1278 assert(NumParts % NumIntermediates == 0 && 1279 "Must expand into a divisible number of parts!"); 1280 unsigned Factor = NumParts / NumIntermediates; 1281 for (unsigned i = 0; i != NumIntermediates; ++i) 1282 getCopyToParts(DAG, Ops[i], &Parts[i * Factor], Factor, PartVT); 1283 } 1284} 1285 1286 1287SDValue SelectionDAGLowering::getValue(const Value *V) { 1288 SDValue &N = NodeMap[V]; 1289 if (N.getNode()) return N; 1290 1291 if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(V))) { 1292 MVT VT = TLI.getValueType(V->getType(), true); 1293 1294 if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) 1295 return N = DAG.getConstant(CI->getValue(), VT); 1296 1297 if (GlobalValue *GV = dyn_cast<GlobalValue>(C)) 1298 return N = DAG.getGlobalAddress(GV, VT); 1299 1300 if (isa<ConstantPointerNull>(C)) 1301 return N = DAG.getConstant(0, TLI.getPointerTy()); 1302 1303 if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) 1304 return N = DAG.getConstantFP(CFP->getValueAPF(), VT); 1305 1306 if (isa<UndefValue>(C) && !isa<VectorType>(V->getType()) && 1307 !V->getType()->isAggregateType()) 1308 return N = DAG.getNode(ISD::UNDEF, VT); 1309 1310 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) { 1311 visit(CE->getOpcode(), *CE); 1312 SDValue N1 = NodeMap[V]; 1313 assert(N1.getNode() && "visit didn't populate the ValueMap!"); 1314 return N1; 1315 } 1316 1317 if (isa<ConstantStruct>(C) || isa<ConstantArray>(C)) { 1318 SmallVector<SDValue, 4> Constants; 1319 for (User::const_op_iterator OI = C->op_begin(), OE = C->op_end(); 1320 OI != OE; ++OI) { 1321 SDNode *Val = getValue(*OI).getNode(); 1322 for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i) 1323 Constants.push_back(SDValue(Val, i)); 1324 } 1325 return DAG.getMergeValues(&Constants[0], Constants.size()); 1326 } 1327 1328 if (isa<StructType>(C->getType()) || isa<ArrayType>(C->getType())) { 1329 assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) && 1330 "Unknown struct or array constant!"); 1331 1332 SmallVector<MVT, 4> ValueVTs; 1333 ComputeValueVTs(TLI, C->getType(), ValueVTs); 1334 unsigned NumElts = ValueVTs.size(); 1335 if (NumElts == 0) 1336 return SDValue(); // empty struct 1337 SmallVector<SDValue, 4> Constants(NumElts); 1338 for (unsigned i = 0; i != NumElts; ++i) { 1339 MVT EltVT = ValueVTs[i]; 1340 if (isa<UndefValue>(C)) 1341 Constants[i] = DAG.getNode(ISD::UNDEF, EltVT); 1342 else if (EltVT.isFloatingPoint()) 1343 Constants[i] = DAG.getConstantFP(0, EltVT); 1344 else 1345 Constants[i] = DAG.getConstant(0, EltVT); 1346 } 1347 return DAG.getMergeValues(&Constants[0], NumElts); 1348 } 1349 1350 const VectorType *VecTy = cast<VectorType>(V->getType()); 1351 unsigned NumElements = VecTy->getNumElements(); 1352 1353 // Now that we know the number and type of the elements, get that number of 1354 // elements into the Ops array based on what kind of constant it is. 1355 SmallVector<SDValue, 16> Ops; 1356 if (ConstantVector *CP = dyn_cast<ConstantVector>(C)) { 1357 for (unsigned i = 0; i != NumElements; ++i) 1358 Ops.push_back(getValue(CP->getOperand(i))); 1359 } else { 1360 assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) && 1361 "Unknown vector constant!"); 1362 MVT EltVT = TLI.getValueType(VecTy->getElementType()); 1363 1364 SDValue Op; 1365 if (isa<UndefValue>(C)) 1366 Op = DAG.getNode(ISD::UNDEF, EltVT); 1367 else if (EltVT.isFloatingPoint()) 1368 Op = DAG.getConstantFP(0, EltVT); 1369 else 1370 Op = DAG.getConstant(0, EltVT); 1371 Ops.assign(NumElements, Op); 1372 } 1373 1374 // Create a BUILD_VECTOR node. 1375 return NodeMap[V] = DAG.getNode(ISD::BUILD_VECTOR, VT, &Ops[0], Ops.size()); 1376 } 1377 1378 // If this is a static alloca, generate it as the frameindex instead of 1379 // computation. 1380 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) { 1381 DenseMap<const AllocaInst*, int>::iterator SI = 1382 FuncInfo.StaticAllocaMap.find(AI); 1383 if (SI != FuncInfo.StaticAllocaMap.end()) 1384 return DAG.getFrameIndex(SI->second, TLI.getPointerTy()); 1385 } 1386 1387 unsigned InReg = FuncInfo.ValueMap[V]; 1388 assert(InReg && "Value not in map!"); 1389 1390 RegsForValue RFV(TLI, InReg, V->getType()); 1391 SDValue Chain = DAG.getEntryNode(); 1392 return RFV.getCopyFromRegs(DAG, Chain, NULL); 1393} 1394 1395 1396void SelectionDAGLowering::visitRet(ReturnInst &I) { 1397 if (I.getNumOperands() == 0) { 1398 DAG.setRoot(DAG.getNode(ISD::RET, MVT::Other, getControlRoot())); 1399 return; 1400 } 1401 1402 SmallVector<SDValue, 8> NewValues; 1403 NewValues.push_back(getControlRoot()); 1404 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) { 1405 SDValue RetOp = getValue(I.getOperand(i)); 1406 1407 SmallVector<MVT, 4> ValueVTs; 1408 ComputeValueVTs(TLI, I.getOperand(i)->getType(), ValueVTs); 1409 for (unsigned j = 0, f = ValueVTs.size(); j != f; ++j) { 1410 MVT VT = ValueVTs[j]; 1411 1412 // FIXME: C calling convention requires the return type to be promoted to 1413 // at least 32-bit. But this is not necessary for non-C calling conventions. 1414 if (VT.isInteger()) { 1415 MVT MinVT = TLI.getRegisterType(MVT::i32); 1416 if (VT.bitsLT(MinVT)) 1417 VT = MinVT; 1418 } 1419 1420 unsigned NumParts = TLI.getNumRegisters(VT); 1421 MVT PartVT = TLI.getRegisterType(VT); 1422 SmallVector<SDValue, 4> Parts(NumParts); 1423 ISD::NodeType ExtendKind = ISD::ANY_EXTEND; 1424 1425 const Function *F = I.getParent()->getParent(); 1426 if (F->paramHasAttr(0, ParamAttr::SExt)) 1427 ExtendKind = ISD::SIGN_EXTEND; 1428 else if (F->paramHasAttr(0, ParamAttr::ZExt)) 1429 ExtendKind = ISD::ZERO_EXTEND; 1430 1431 getCopyToParts(DAG, SDValue(RetOp.getNode(), RetOp.getResNo() + j), 1432 &Parts[0], NumParts, PartVT, ExtendKind); 1433 1434 for (unsigned i = 0; i < NumParts; ++i) { 1435 NewValues.push_back(Parts[i]); 1436 NewValues.push_back(DAG.getArgFlags(ISD::ArgFlagsTy())); 1437 } 1438 } 1439 } 1440 DAG.setRoot(DAG.getNode(ISD::RET, MVT::Other, 1441 &NewValues[0], NewValues.size())); 1442} 1443 1444/// ExportFromCurrentBlock - If this condition isn't known to be exported from 1445/// the current basic block, add it to ValueMap now so that we'll get a 1446/// CopyTo/FromReg. 1447void SelectionDAGLowering::ExportFromCurrentBlock(Value *V) { 1448 // No need to export constants. 1449 if (!isa<Instruction>(V) && !isa<Argument>(V)) return; 1450 1451 // Already exported? 1452 if (FuncInfo.isExportedInst(V)) return; 1453 1454 unsigned Reg = FuncInfo.InitializeRegForValue(V); 1455 CopyValueToVirtualRegister(V, Reg); 1456} 1457 1458bool SelectionDAGLowering::isExportableFromCurrentBlock(Value *V, 1459 const BasicBlock *FromBB) { 1460 // The operands of the setcc have to be in this block. We don't know 1461 // how to export them from some other block. 1462 if (Instruction *VI = dyn_cast<Instruction>(V)) { 1463 // Can export from current BB. 1464 if (VI->getParent() == FromBB) 1465 return true; 1466 1467 // Is already exported, noop. 1468 return FuncInfo.isExportedInst(V); 1469 } 1470 1471 // If this is an argument, we can export it if the BB is the entry block or 1472 // if it is already exported. 1473 if (isa<Argument>(V)) { 1474 if (FromBB == &FromBB->getParent()->getEntryBlock()) 1475 return true; 1476 1477 // Otherwise, can only export this if it is already exported. 1478 return FuncInfo.isExportedInst(V); 1479 } 1480 1481 // Otherwise, constants can always be exported. 1482 return true; 1483} 1484 1485static bool InBlock(const Value *V, const BasicBlock *BB) { 1486 if (const Instruction *I = dyn_cast<Instruction>(V)) 1487 return I->getParent() == BB; 1488 return true; 1489} 1490 1491/// FindMergedConditions - If Cond is an expression like 1492void SelectionDAGLowering::FindMergedConditions(Value *Cond, 1493 MachineBasicBlock *TBB, 1494 MachineBasicBlock *FBB, 1495 MachineBasicBlock *CurBB, 1496 unsigned Opc) { 1497 // If this node is not part of the or/and tree, emit it as a branch. 1498 Instruction *BOp = dyn_cast<Instruction>(Cond); 1499 1500 if (!BOp || !(isa<BinaryOperator>(BOp) || isa<CmpInst>(BOp)) || 1501 (unsigned)BOp->getOpcode() != Opc || !BOp->hasOneUse() || 1502 BOp->getParent() != CurBB->getBasicBlock() || 1503 !InBlock(BOp->getOperand(0), CurBB->getBasicBlock()) || 1504 !InBlock(BOp->getOperand(1), CurBB->getBasicBlock())) { 1505 const BasicBlock *BB = CurBB->getBasicBlock(); 1506 1507 // If the leaf of the tree is a comparison, merge the condition into 1508 // the caseblock. 1509 if ((isa<ICmpInst>(Cond) || isa<FCmpInst>(Cond)) && 1510 // The operands of the cmp have to be in this block. We don't know 1511 // how to export them from some other block. If this is the first block 1512 // of the sequence, no exporting is needed. 1513 (CurBB == CurMBB || 1514 (isExportableFromCurrentBlock(BOp->getOperand(0), BB) && 1515 isExportableFromCurrentBlock(BOp->getOperand(1), BB)))) { 1516 BOp = cast<Instruction>(Cond); 1517 ISD::CondCode Condition; 1518 if (ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) { 1519 switch (IC->getPredicate()) { 1520 default: assert(0 && "Unknown icmp predicate opcode!"); 1521 case ICmpInst::ICMP_EQ: Condition = ISD::SETEQ; break; 1522 case ICmpInst::ICMP_NE: Condition = ISD::SETNE; break; 1523 case ICmpInst::ICMP_SLE: Condition = ISD::SETLE; break; 1524 case ICmpInst::ICMP_ULE: Condition = ISD::SETULE; break; 1525 case ICmpInst::ICMP_SGE: Condition = ISD::SETGE; break; 1526 case ICmpInst::ICMP_UGE: Condition = ISD::SETUGE; break; 1527 case ICmpInst::ICMP_SLT: Condition = ISD::SETLT; break; 1528 case ICmpInst::ICMP_ULT: Condition = ISD::SETULT; break; 1529 case ICmpInst::ICMP_SGT: Condition = ISD::SETGT; break; 1530 case ICmpInst::ICMP_UGT: Condition = ISD::SETUGT; break; 1531 } 1532 } else if (FCmpInst *FC = dyn_cast<FCmpInst>(Cond)) { 1533 ISD::CondCode FPC, FOC; 1534 switch (FC->getPredicate()) { 1535 default: assert(0 && "Unknown fcmp predicate opcode!"); 1536 case FCmpInst::FCMP_FALSE: FOC = FPC = ISD::SETFALSE; break; 1537 case FCmpInst::FCMP_OEQ: FOC = ISD::SETEQ; FPC = ISD::SETOEQ; break; 1538 case FCmpInst::FCMP_OGT: FOC = ISD::SETGT; FPC = ISD::SETOGT; break; 1539 case FCmpInst::FCMP_OGE: FOC = ISD::SETGE; FPC = ISD::SETOGE; break; 1540 case FCmpInst::FCMP_OLT: FOC = ISD::SETLT; FPC = ISD::SETOLT; break; 1541 case FCmpInst::FCMP_OLE: FOC = ISD::SETLE; FPC = ISD::SETOLE; break; 1542 case FCmpInst::FCMP_ONE: FOC = ISD::SETNE; FPC = ISD::SETONE; break; 1543 case FCmpInst::FCMP_ORD: FOC = FPC = ISD::SETO; break; 1544 case FCmpInst::FCMP_UNO: FOC = FPC = ISD::SETUO; break; 1545 case FCmpInst::FCMP_UEQ: FOC = ISD::SETEQ; FPC = ISD::SETUEQ; break; 1546 case FCmpInst::FCMP_UGT: FOC = ISD::SETGT; FPC = ISD::SETUGT; break; 1547 case FCmpInst::FCMP_UGE: FOC = ISD::SETGE; FPC = ISD::SETUGE; break; 1548 case FCmpInst::FCMP_ULT: FOC = ISD::SETLT; FPC = ISD::SETULT; break; 1549 case FCmpInst::FCMP_ULE: FOC = ISD::SETLE; FPC = ISD::SETULE; break; 1550 case FCmpInst::FCMP_UNE: FOC = ISD::SETNE; FPC = ISD::SETUNE; break; 1551 case FCmpInst::FCMP_TRUE: FOC = FPC = ISD::SETTRUE; break; 1552 } 1553 if (FiniteOnlyFPMath()) 1554 Condition = FOC; 1555 else 1556 Condition = FPC; 1557 } else { 1558 Condition = ISD::SETEQ; // silence warning. 1559 assert(0 && "Unknown compare instruction"); 1560 } 1561 1562 CaseBlock CB(Condition, BOp->getOperand(0), 1563 BOp->getOperand(1), NULL, TBB, FBB, CurBB); 1564 SwitchCases.push_back(CB); 1565 return; 1566 } 1567 1568 // Create a CaseBlock record representing this branch. 1569 CaseBlock CB(ISD::SETEQ, Cond, ConstantInt::getTrue(), 1570 NULL, TBB, FBB, CurBB); 1571 SwitchCases.push_back(CB); 1572 return; 1573 } 1574 1575 1576 // Create TmpBB after CurBB. 1577 MachineFunction::iterator BBI = CurBB; 1578 MachineFunction &MF = DAG.getMachineFunction(); 1579 MachineBasicBlock *TmpBB = MF.CreateMachineBasicBlock(CurBB->getBasicBlock()); 1580 CurBB->getParent()->insert(++BBI, TmpBB); 1581 1582 if (Opc == Instruction::Or) { 1583 // Codegen X | Y as: 1584 // jmp_if_X TBB 1585 // jmp TmpBB 1586 // TmpBB: 1587 // jmp_if_Y TBB 1588 // jmp FBB 1589 // 1590 1591 // Emit the LHS condition. 1592 FindMergedConditions(BOp->getOperand(0), TBB, TmpBB, CurBB, Opc); 1593 1594 // Emit the RHS condition into TmpBB. 1595 FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, Opc); 1596 } else { 1597 assert(Opc == Instruction::And && "Unknown merge op!"); 1598 // Codegen X & Y as: 1599 // jmp_if_X TmpBB 1600 // jmp FBB 1601 // TmpBB: 1602 // jmp_if_Y TBB 1603 // jmp FBB 1604 // 1605 // This requires creation of TmpBB after CurBB. 1606 1607 // Emit the LHS condition. 1608 FindMergedConditions(BOp->getOperand(0), TmpBB, FBB, CurBB, Opc); 1609 1610 // Emit the RHS condition into TmpBB. 1611 FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, Opc); 1612 } 1613} 1614 1615/// If the set of cases should be emitted as a series of branches, return true. 1616/// If we should emit this as a bunch of and/or'd together conditions, return 1617/// false. 1618bool 1619SelectionDAGLowering::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases){ 1620 if (Cases.size() != 2) return true; 1621 1622 // If this is two comparisons of the same values or'd or and'd together, they 1623 // will get folded into a single comparison, so don't emit two blocks. 1624 if ((Cases[0].CmpLHS == Cases[1].CmpLHS && 1625 Cases[0].CmpRHS == Cases[1].CmpRHS) || 1626 (Cases[0].CmpRHS == Cases[1].CmpLHS && 1627 Cases[0].CmpLHS == Cases[1].CmpRHS)) { 1628 return false; 1629 } 1630 1631 return true; 1632} 1633 1634void SelectionDAGLowering::visitBr(BranchInst &I) { 1635 // Update machine-CFG edges. 1636 MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)]; 1637 1638 // Figure out which block is immediately after the current one. 1639 MachineBasicBlock *NextBlock = 0; 1640 MachineFunction::iterator BBI = CurMBB; 1641 if (++BBI != CurMBB->getParent()->end()) 1642 NextBlock = BBI; 1643 1644 if (I.isUnconditional()) { 1645 // Update machine-CFG edges. 1646 CurMBB->addSuccessor(Succ0MBB); 1647 1648 // If this is not a fall-through branch, emit the branch. 1649 if (Succ0MBB != NextBlock) 1650 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, getControlRoot(), 1651 DAG.getBasicBlock(Succ0MBB))); 1652 return; 1653 } 1654 1655 // If this condition is one of the special cases we handle, do special stuff 1656 // now. 1657 Value *CondVal = I.getCondition(); 1658 MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)]; 1659 1660 // If this is a series of conditions that are or'd or and'd together, emit 1661 // this as a sequence of branches instead of setcc's with and/or operations. 1662 // For example, instead of something like: 1663 // cmp A, B 1664 // C = seteq 1665 // cmp D, E 1666 // F = setle 1667 // or C, F 1668 // jnz foo 1669 // Emit: 1670 // cmp A, B 1671 // je foo 1672 // cmp D, E 1673 // jle foo 1674 // 1675 if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(CondVal)) { 1676 if (BOp->hasOneUse() && 1677 (BOp->getOpcode() == Instruction::And || 1678 BOp->getOpcode() == Instruction::Or)) { 1679 FindMergedConditions(BOp, Succ0MBB, Succ1MBB, CurMBB, BOp->getOpcode()); 1680 // If the compares in later blocks need to use values not currently 1681 // exported from this block, export them now. This block should always 1682 // be the first entry. 1683 assert(SwitchCases[0].ThisBB == CurMBB && "Unexpected lowering!"); 1684 1685 // Allow some cases to be rejected. 1686 if (ShouldEmitAsBranches(SwitchCases)) { 1687 for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i) { 1688 ExportFromCurrentBlock(SwitchCases[i].CmpLHS); 1689 ExportFromCurrentBlock(SwitchCases[i].CmpRHS); 1690 } 1691 1692 // Emit the branch for this block. 1693 visitSwitchCase(SwitchCases[0]); 1694 SwitchCases.erase(SwitchCases.begin()); 1695 return; 1696 } 1697 1698 // Okay, we decided not to do this, remove any inserted MBB's and clear 1699 // SwitchCases. 1700 for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i) 1701 CurMBB->getParent()->erase(SwitchCases[i].ThisBB); 1702 1703 SwitchCases.clear(); 1704 } 1705 } 1706 1707 // Create a CaseBlock record representing this branch. 1708 CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(), 1709 NULL, Succ0MBB, Succ1MBB, CurMBB); 1710 // Use visitSwitchCase to actually insert the fast branch sequence for this 1711 // cond branch. 1712 visitSwitchCase(CB); 1713} 1714 1715/// visitSwitchCase - Emits the necessary code to represent a single node in 1716/// the binary search tree resulting from lowering a switch instruction. 1717void SelectionDAGLowering::visitSwitchCase(CaseBlock &CB) { 1718 SDValue Cond; 1719 SDValue CondLHS = getValue(CB.CmpLHS); 1720 1721 // Build the setcc now. 1722 if (CB.CmpMHS == NULL) { 1723 // Fold "(X == true)" to X and "(X == false)" to !X to 1724 // handle common cases produced by branch lowering. 1725 if (CB.CmpRHS == ConstantInt::getTrue() && CB.CC == ISD::SETEQ) 1726 Cond = CondLHS; 1727 else if (CB.CmpRHS == ConstantInt::getFalse() && CB.CC == ISD::SETEQ) { 1728 SDValue True = DAG.getConstant(1, CondLHS.getValueType()); 1729 Cond = DAG.getNode(ISD::XOR, CondLHS.getValueType(), CondLHS, True); 1730 } else 1731 Cond = DAG.getSetCC(MVT::i1, CondLHS, getValue(CB.CmpRHS), CB.CC); 1732 } else { 1733 assert(CB.CC == ISD::SETLE && "Can handle only LE ranges now"); 1734 1735 uint64_t Low = cast<ConstantInt>(CB.CmpLHS)->getSExtValue(); 1736 uint64_t High = cast<ConstantInt>(CB.CmpRHS)->getSExtValue(); 1737 1738 SDValue CmpOp = getValue(CB.CmpMHS); 1739 MVT VT = CmpOp.getValueType(); 1740 1741 if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) { 1742 Cond = DAG.getSetCC(MVT::i1, CmpOp, DAG.getConstant(High, VT), ISD::SETLE); 1743 } else { 1744 SDValue SUB = DAG.getNode(ISD::SUB, VT, CmpOp, DAG.getConstant(Low, VT)); 1745 Cond = DAG.getSetCC(MVT::i1, SUB, 1746 DAG.getConstant(High-Low, VT), ISD::SETULE); 1747 } 1748 } 1749 1750 // Update successor info 1751 CurMBB->addSuccessor(CB.TrueBB); 1752 CurMBB->addSuccessor(CB.FalseBB); 1753 1754 // Set NextBlock to be the MBB immediately after the current one, if any. 1755 // This is used to avoid emitting unnecessary branches to the next block. 1756 MachineBasicBlock *NextBlock = 0; 1757 MachineFunction::iterator BBI = CurMBB; 1758 if (++BBI != CurMBB->getParent()->end()) 1759 NextBlock = BBI; 1760 1761 // If the lhs block is the next block, invert the condition so that we can 1762 // fall through to the lhs instead of the rhs block. 1763 if (CB.TrueBB == NextBlock) { 1764 std::swap(CB.TrueBB, CB.FalseBB); 1765 SDValue True = DAG.getConstant(1, Cond.getValueType()); 1766 Cond = DAG.getNode(ISD::XOR, Cond.getValueType(), Cond, True); 1767 } 1768 SDValue BrCond = DAG.getNode(ISD::BRCOND, MVT::Other, getControlRoot(), Cond, 1769 DAG.getBasicBlock(CB.TrueBB)); 1770 1771 // If the branch was constant folded, fix up the CFG. 1772 if (BrCond.getOpcode() == ISD::BR) { 1773 CurMBB->removeSuccessor(CB.FalseBB); 1774 DAG.setRoot(BrCond); 1775 } else { 1776 // Otherwise, go ahead and insert the false branch. 1777 if (BrCond == getControlRoot()) 1778 CurMBB->removeSuccessor(CB.TrueBB); 1779 1780 if (CB.FalseBB == NextBlock) 1781 DAG.setRoot(BrCond); 1782 else 1783 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, BrCond, 1784 DAG.getBasicBlock(CB.FalseBB))); 1785 } 1786} 1787 1788/// visitJumpTable - Emit JumpTable node in the current MBB 1789void SelectionDAGLowering::visitJumpTable(JumpTable &JT) { 1790 // Emit the code for the jump table 1791 assert(JT.Reg != -1U && "Should lower JT Header first!"); 1792 MVT PTy = TLI.getPointerTy(); 1793 SDValue Index = DAG.getCopyFromReg(getControlRoot(), JT.Reg, PTy); 1794 SDValue Table = DAG.getJumpTable(JT.JTI, PTy); 1795 DAG.setRoot(DAG.getNode(ISD::BR_JT, MVT::Other, Index.getValue(1), 1796 Table, Index)); 1797 return; 1798} 1799 1800/// visitJumpTableHeader - This function emits necessary code to produce index 1801/// in the JumpTable from switch case. 1802void SelectionDAGLowering::visitJumpTableHeader(JumpTable &JT, 1803 JumpTableHeader &JTH) { 1804 // Subtract the lowest switch case value from the value being switched on 1805 // and conditional branch to default mbb if the result is greater than the 1806 // difference between smallest and largest cases. 1807 SDValue SwitchOp = getValue(JTH.SValue); 1808 MVT VT = SwitchOp.getValueType(); 1809 SDValue SUB = DAG.getNode(ISD::SUB, VT, SwitchOp, 1810 DAG.getConstant(JTH.First, VT)); 1811 1812 // The SDNode we just created, which holds the value being switched on 1813 // minus the the smallest case value, needs to be copied to a virtual 1814 // register so it can be used as an index into the jump table in a 1815 // subsequent basic block. This value may be smaller or larger than the 1816 // target's pointer type, and therefore require extension or truncating. 1817 if (VT.bitsGT(TLI.getPointerTy())) 1818 SwitchOp = DAG.getNode(ISD::TRUNCATE, TLI.getPointerTy(), SUB); 1819 else 1820 SwitchOp = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(), SUB); 1821 1822 unsigned JumpTableReg = FuncInfo.MakeReg(TLI.getPointerTy()); 1823 SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), JumpTableReg, SwitchOp); 1824 JT.Reg = JumpTableReg; 1825 1826 // Emit the range check for the jump table, and branch to the default 1827 // block for the switch statement if the value being switched on exceeds 1828 // the largest case in the switch. 1829 SDValue CMP = DAG.getSetCC(TLI.getSetCCResultType(SUB), SUB, 1830 DAG.getConstant(JTH.Last-JTH.First,VT), 1831 ISD::SETUGT); 1832 1833 // Set NextBlock to be the MBB immediately after the current one, if any. 1834 // This is used to avoid emitting unnecessary branches to the next block. 1835 MachineBasicBlock *NextBlock = 0; 1836 MachineFunction::iterator BBI = CurMBB; 1837 if (++BBI != CurMBB->getParent()->end()) 1838 NextBlock = BBI; 1839 1840 SDValue BrCond = DAG.getNode(ISD::BRCOND, MVT::Other, CopyTo, CMP, 1841 DAG.getBasicBlock(JT.Default)); 1842 1843 if (JT.MBB == NextBlock) 1844 DAG.setRoot(BrCond); 1845 else 1846 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, BrCond, 1847 DAG.getBasicBlock(JT.MBB))); 1848 1849 return; 1850} 1851 1852/// visitBitTestHeader - This function emits necessary code to produce value 1853/// suitable for "bit tests" 1854void SelectionDAGLowering::visitBitTestHeader(BitTestBlock &B) { 1855 // Subtract the minimum value 1856 SDValue SwitchOp = getValue(B.SValue); 1857 MVT VT = SwitchOp.getValueType(); 1858 SDValue SUB = DAG.getNode(ISD::SUB, VT, SwitchOp, 1859 DAG.getConstant(B.First, VT)); 1860 1861 // Check range 1862 SDValue RangeCmp = DAG.getSetCC(TLI.getSetCCResultType(SUB), SUB, 1863 DAG.getConstant(B.Range, VT), 1864 ISD::SETUGT); 1865 1866 SDValue ShiftOp; 1867 if (VT.bitsGT(TLI.getShiftAmountTy())) 1868 ShiftOp = DAG.getNode(ISD::TRUNCATE, TLI.getShiftAmountTy(), SUB); 1869 else 1870 ShiftOp = DAG.getNode(ISD::ZERO_EXTEND, TLI.getShiftAmountTy(), SUB); 1871 1872 // Make desired shift 1873 SDValue SwitchVal = DAG.getNode(ISD::SHL, TLI.getPointerTy(), 1874 DAG.getConstant(1, TLI.getPointerTy()), 1875 ShiftOp); 1876 1877 unsigned SwitchReg = FuncInfo.MakeReg(TLI.getPointerTy()); 1878 SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), SwitchReg, SwitchVal); 1879 B.Reg = SwitchReg; 1880 1881 // Set NextBlock to be the MBB immediately after the current one, if any. 1882 // This is used to avoid emitting unnecessary branches to the next block. 1883 MachineBasicBlock *NextBlock = 0; 1884 MachineFunction::iterator BBI = CurMBB; 1885 if (++BBI != CurMBB->getParent()->end()) 1886 NextBlock = BBI; 1887 1888 MachineBasicBlock* MBB = B.Cases[0].ThisBB; 1889 1890 CurMBB->addSuccessor(B.Default); 1891 CurMBB->addSuccessor(MBB); 1892 1893 SDValue BrRange = DAG.getNode(ISD::BRCOND, MVT::Other, CopyTo, RangeCmp, 1894 DAG.getBasicBlock(B.Default)); 1895 1896 if (MBB == NextBlock) 1897 DAG.setRoot(BrRange); 1898 else 1899 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, CopyTo, 1900 DAG.getBasicBlock(MBB))); 1901 1902 return; 1903} 1904 1905/// visitBitTestCase - this function produces one "bit test" 1906void SelectionDAGLowering::visitBitTestCase(MachineBasicBlock* NextMBB, 1907 unsigned Reg, 1908 BitTestCase &B) { 1909 // Emit bit tests and jumps 1910 SDValue SwitchVal = DAG.getCopyFromReg(getControlRoot(), Reg, 1911 TLI.getPointerTy()); 1912 1913 SDValue AndOp = DAG.getNode(ISD::AND, TLI.getPointerTy(), SwitchVal, 1914 DAG.getConstant(B.Mask, TLI.getPointerTy())); 1915 SDValue AndCmp = DAG.getSetCC(TLI.getSetCCResultType(AndOp), AndOp, 1916 DAG.getConstant(0, TLI.getPointerTy()), 1917 ISD::SETNE); 1918 1919 CurMBB->addSuccessor(B.TargetBB); 1920 CurMBB->addSuccessor(NextMBB); 1921 1922 SDValue BrAnd = DAG.getNode(ISD::BRCOND, MVT::Other, getControlRoot(), 1923 AndCmp, DAG.getBasicBlock(B.TargetBB)); 1924 1925 // Set NextBlock to be the MBB immediately after the current one, if any. 1926 // This is used to avoid emitting unnecessary branches to the next block. 1927 MachineBasicBlock *NextBlock = 0; 1928 MachineFunction::iterator BBI = CurMBB; 1929 if (++BBI != CurMBB->getParent()->end()) 1930 NextBlock = BBI; 1931 1932 if (NextMBB == NextBlock) 1933 DAG.setRoot(BrAnd); 1934 else 1935 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, BrAnd, 1936 DAG.getBasicBlock(NextMBB))); 1937 1938 return; 1939} 1940 1941void SelectionDAGLowering::visitInvoke(InvokeInst &I) { 1942 // Retrieve successors. 1943 MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)]; 1944 MachineBasicBlock *LandingPad = FuncInfo.MBBMap[I.getSuccessor(1)]; 1945 1946 if (isa<InlineAsm>(I.getCalledValue())) 1947 visitInlineAsm(&I); 1948 else 1949 LowerCallTo(&I, getValue(I.getOperand(0)), false, LandingPad); 1950 1951 // If the value of the invoke is used outside of its defining block, make it 1952 // available as a virtual register. 1953 if (!I.use_empty()) { 1954 DenseMap<const Value*, unsigned>::iterator VMI = FuncInfo.ValueMap.find(&I); 1955 if (VMI != FuncInfo.ValueMap.end()) 1956 CopyValueToVirtualRegister(&I, VMI->second); 1957 } 1958 1959 // Update successor info 1960 CurMBB->addSuccessor(Return); 1961 CurMBB->addSuccessor(LandingPad); 1962 1963 // Drop into normal successor. 1964 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, getControlRoot(), 1965 DAG.getBasicBlock(Return))); 1966} 1967 1968void SelectionDAGLowering::visitUnwind(UnwindInst &I) { 1969} 1970 1971/// handleSmallSwitchCaseRange - Emit a series of specific tests (suitable for 1972/// small case ranges). 1973bool SelectionDAGLowering::handleSmallSwitchRange(CaseRec& CR, 1974 CaseRecVector& WorkList, 1975 Value* SV, 1976 MachineBasicBlock* Default) { 1977 Case& BackCase = *(CR.Range.second-1); 1978 1979 // Size is the number of Cases represented by this range. 1980 unsigned Size = CR.Range.second - CR.Range.first; 1981 if (Size > 3) 1982 return false; 1983 1984 // Get the MachineFunction which holds the current MBB. This is used when 1985 // inserting any additional MBBs necessary to represent the switch. 1986 MachineFunction *CurMF = CurMBB->getParent(); 1987 1988 // Figure out which block is immediately after the current one. 1989 MachineBasicBlock *NextBlock = 0; 1990 MachineFunction::iterator BBI = CR.CaseBB; 1991 1992 if (++BBI != CurMBB->getParent()->end()) 1993 NextBlock = BBI; 1994 1995 // TODO: If any two of the cases has the same destination, and if one value 1996 // is the same as the other, but has one bit unset that the other has set, 1997 // use bit manipulation to do two compares at once. For example: 1998 // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)" 1999 2000 // Rearrange the case blocks so that the last one falls through if possible. 2001 if (NextBlock && Default != NextBlock && BackCase.BB != NextBlock) { 2002 // The last case block won't fall through into 'NextBlock' if we emit the 2003 // branches in this order. See if rearranging a case value would help. 2004 for (CaseItr I = CR.Range.first, E = CR.Range.second-1; I != E; ++I) { 2005 if (I->BB == NextBlock) { 2006 std::swap(*I, BackCase); 2007 break; 2008 } 2009 } 2010 } 2011 2012 // Create a CaseBlock record representing a conditional branch to 2013 // the Case's target mbb if the value being switched on SV is equal 2014 // to C. 2015 MachineBasicBlock *CurBlock = CR.CaseBB; 2016 for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++I) { 2017 MachineBasicBlock *FallThrough; 2018 if (I != E-1) { 2019 FallThrough = CurMF->CreateMachineBasicBlock(CurBlock->getBasicBlock()); 2020 CurMF->insert(BBI, FallThrough); 2021 } else { 2022 // If the last case doesn't match, go to the default block. 2023 FallThrough = Default; 2024 } 2025 2026 Value *RHS, *LHS, *MHS; 2027 ISD::CondCode CC; 2028 if (I->High == I->Low) { 2029 // This is just small small case range :) containing exactly 1 case 2030 CC = ISD::SETEQ; 2031 LHS = SV; RHS = I->High; MHS = NULL; 2032 } else { 2033 CC = ISD::SETLE; 2034 LHS = I->Low; MHS = SV; RHS = I->High; 2035 } 2036 CaseBlock CB(CC, LHS, RHS, MHS, I->BB, FallThrough, CurBlock); 2037 2038 // If emitting the first comparison, just call visitSwitchCase to emit the 2039 // code into the current block. Otherwise, push the CaseBlock onto the 2040 // vector to be later processed by SDISel, and insert the node's MBB 2041 // before the next MBB. 2042 if (CurBlock == CurMBB) 2043 visitSwitchCase(CB); 2044 else 2045 SwitchCases.push_back(CB); 2046 2047 CurBlock = FallThrough; 2048 } 2049 2050 return true; 2051} 2052 2053static inline bool areJTsAllowed(const TargetLowering &TLI) { 2054 return !DisableJumpTables && 2055 (TLI.isOperationLegal(ISD::BR_JT, MVT::Other) || 2056 TLI.isOperationLegal(ISD::BRIND, MVT::Other)); 2057} 2058 2059/// handleJTSwitchCase - Emit jumptable for current switch case range 2060bool SelectionDAGLowering::handleJTSwitchCase(CaseRec& CR, 2061 CaseRecVector& WorkList, 2062 Value* SV, 2063 MachineBasicBlock* Default) { 2064 Case& FrontCase = *CR.Range.first; 2065 Case& BackCase = *(CR.Range.second-1); 2066 2067 int64_t First = cast<ConstantInt>(FrontCase.Low)->getSExtValue(); 2068 int64_t Last = cast<ConstantInt>(BackCase.High)->getSExtValue(); 2069 2070 uint64_t TSize = 0; 2071 for (CaseItr I = CR.Range.first, E = CR.Range.second; 2072 I!=E; ++I) 2073 TSize += I->size(); 2074 2075 if (!areJTsAllowed(TLI) || TSize <= 3) 2076 return false; 2077 2078 double Density = (double)TSize / (double)((Last - First) + 1ULL); 2079 if (Density < 0.4) 2080 return false; 2081 2082 DOUT << "Lowering jump table\n" 2083 << "First entry: " << First << ". Last entry: " << Last << "\n" 2084 << "Size: " << TSize << ". Density: " << Density << "\n\n"; 2085 2086 // Get the MachineFunction which holds the current MBB. This is used when 2087 // inserting any additional MBBs necessary to represent the switch. 2088 MachineFunction *CurMF = CurMBB->getParent(); 2089 2090 // Figure out which block is immediately after the current one. 2091 MachineBasicBlock *NextBlock = 0; 2092 MachineFunction::iterator BBI = CR.CaseBB; 2093 2094 if (++BBI != CurMBB->getParent()->end()) 2095 NextBlock = BBI; 2096 2097 const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock(); 2098 2099 // Create a new basic block to hold the code for loading the address 2100 // of the jump table, and jumping to it. Update successor information; 2101 // we will either branch to the default case for the switch, or the jump 2102 // table. 2103 MachineBasicBlock *JumpTableBB = CurMF->CreateMachineBasicBlock(LLVMBB); 2104 CurMF->insert(BBI, JumpTableBB); 2105 CR.CaseBB->addSuccessor(Default); 2106 CR.CaseBB->addSuccessor(JumpTableBB); 2107 2108 // Build a vector of destination BBs, corresponding to each target 2109 // of the jump table. If the value of the jump table slot corresponds to 2110 // a case statement, push the case's BB onto the vector, otherwise, push 2111 // the default BB. 2112 std::vector<MachineBasicBlock*> DestBBs; 2113 int64_t TEI = First; 2114 for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++TEI) { 2115 int64_t Low = cast<ConstantInt>(I->Low)->getSExtValue(); 2116 int64_t High = cast<ConstantInt>(I->High)->getSExtValue(); 2117 2118 if ((Low <= TEI) && (TEI <= High)) { 2119 DestBBs.push_back(I->BB); 2120 if (TEI==High) 2121 ++I; 2122 } else { 2123 DestBBs.push_back(Default); 2124 } 2125 } 2126 2127 // Update successor info. Add one edge to each unique successor. 2128 BitVector SuccsHandled(CR.CaseBB->getParent()->getNumBlockIDs()); 2129 for (std::vector<MachineBasicBlock*>::iterator I = DestBBs.begin(), 2130 E = DestBBs.end(); I != E; ++I) { 2131 if (!SuccsHandled[(*I)->getNumber()]) { 2132 SuccsHandled[(*I)->getNumber()] = true; 2133 JumpTableBB->addSuccessor(*I); 2134 } 2135 } 2136 2137 // Create a jump table index for this jump table, or return an existing 2138 // one. 2139 unsigned JTI = CurMF->getJumpTableInfo()->getJumpTableIndex(DestBBs); 2140 2141 // Set the jump table information so that we can codegen it as a second 2142 // MachineBasicBlock 2143 JumpTable JT(-1U, JTI, JumpTableBB, Default); 2144 JumpTableHeader JTH(First, Last, SV, CR.CaseBB, (CR.CaseBB == CurMBB)); 2145 if (CR.CaseBB == CurMBB) 2146 visitJumpTableHeader(JT, JTH); 2147 2148 JTCases.push_back(JumpTableBlock(JTH, JT)); 2149 2150 return true; 2151} 2152 2153/// handleBTSplitSwitchCase - emit comparison and split binary search tree into 2154/// 2 subtrees. 2155bool SelectionDAGLowering::handleBTSplitSwitchCase(CaseRec& CR, 2156 CaseRecVector& WorkList, 2157 Value* SV, 2158 MachineBasicBlock* Default) { 2159 // Get the MachineFunction which holds the current MBB. This is used when 2160 // inserting any additional MBBs necessary to represent the switch. 2161 MachineFunction *CurMF = CurMBB->getParent(); 2162 2163 // Figure out which block is immediately after the current one. 2164 MachineBasicBlock *NextBlock = 0; 2165 MachineFunction::iterator BBI = CR.CaseBB; 2166 2167 if (++BBI != CurMBB->getParent()->end()) 2168 NextBlock = BBI; 2169 2170 Case& FrontCase = *CR.Range.first; 2171 Case& BackCase = *(CR.Range.second-1); 2172 const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock(); 2173 2174 // Size is the number of Cases represented by this range. 2175 unsigned Size = CR.Range.second - CR.Range.first; 2176 2177 int64_t First = cast<ConstantInt>(FrontCase.Low)->getSExtValue(); 2178 int64_t Last = cast<ConstantInt>(BackCase.High)->getSExtValue(); 2179 double FMetric = 0; 2180 CaseItr Pivot = CR.Range.first + Size/2; 2181 2182 // Select optimal pivot, maximizing sum density of LHS and RHS. This will 2183 // (heuristically) allow us to emit JumpTable's later. 2184 uint64_t TSize = 0; 2185 for (CaseItr I = CR.Range.first, E = CR.Range.second; 2186 I!=E; ++I) 2187 TSize += I->size(); 2188 2189 uint64_t LSize = FrontCase.size(); 2190 uint64_t RSize = TSize-LSize; 2191 DOUT << "Selecting best pivot: \n" 2192 << "First: " << First << ", Last: " << Last <<"\n" 2193 << "LSize: " << LSize << ", RSize: " << RSize << "\n"; 2194 for (CaseItr I = CR.Range.first, J=I+1, E = CR.Range.second; 2195 J!=E; ++I, ++J) { 2196 int64_t LEnd = cast<ConstantInt>(I->High)->getSExtValue(); 2197 int64_t RBegin = cast<ConstantInt>(J->Low)->getSExtValue(); 2198 assert((RBegin-LEnd>=1) && "Invalid case distance"); 2199 double LDensity = (double)LSize / (double)((LEnd - First) + 1ULL); 2200 double RDensity = (double)RSize / (double)((Last - RBegin) + 1ULL); 2201 double Metric = Log2_64(RBegin-LEnd)*(LDensity+RDensity); 2202 // Should always split in some non-trivial place 2203 DOUT <<"=>Step\n" 2204 << "LEnd: " << LEnd << ", RBegin: " << RBegin << "\n" 2205 << "LDensity: " << LDensity << ", RDensity: " << RDensity << "\n" 2206 << "Metric: " << Metric << "\n"; 2207 if (FMetric < Metric) { 2208 Pivot = J; 2209 FMetric = Metric; 2210 DOUT << "Current metric set to: " << FMetric << "\n"; 2211 } 2212 2213 LSize += J->size(); 2214 RSize -= J->size(); 2215 } 2216 if (areJTsAllowed(TLI)) { 2217 // If our case is dense we *really* should handle it earlier! 2218 assert((FMetric > 0) && "Should handle dense range earlier!"); 2219 } else { 2220 Pivot = CR.Range.first + Size/2; 2221 } 2222 2223 CaseRange LHSR(CR.Range.first, Pivot); 2224 CaseRange RHSR(Pivot, CR.Range.second); 2225 Constant *C = Pivot->Low; 2226 MachineBasicBlock *FalseBB = 0, *TrueBB = 0; 2227 2228 // We know that we branch to the LHS if the Value being switched on is 2229 // less than the Pivot value, C. We use this to optimize our binary 2230 // tree a bit, by recognizing that if SV is greater than or equal to the 2231 // LHS's Case Value, and that Case Value is exactly one less than the 2232 // Pivot's Value, then we can branch directly to the LHS's Target, 2233 // rather than creating a leaf node for it. 2234 if ((LHSR.second - LHSR.first) == 1 && 2235 LHSR.first->High == CR.GE && 2236 cast<ConstantInt>(C)->getSExtValue() == 2237 (cast<ConstantInt>(CR.GE)->getSExtValue() + 1LL)) { 2238 TrueBB = LHSR.first->BB; 2239 } else { 2240 TrueBB = CurMF->CreateMachineBasicBlock(LLVMBB); 2241 CurMF->insert(BBI, TrueBB); 2242 WorkList.push_back(CaseRec(TrueBB, C, CR.GE, LHSR)); 2243 } 2244 2245 // Similar to the optimization above, if the Value being switched on is 2246 // known to be less than the Constant CR.LT, and the current Case Value 2247 // is CR.LT - 1, then we can branch directly to the target block for 2248 // the current Case Value, rather than emitting a RHS leaf node for it. 2249 if ((RHSR.second - RHSR.first) == 1 && CR.LT && 2250 cast<ConstantInt>(RHSR.first->Low)->getSExtValue() == 2251 (cast<ConstantInt>(CR.LT)->getSExtValue() - 1LL)) { 2252 FalseBB = RHSR.first->BB; 2253 } else { 2254 FalseBB = CurMF->CreateMachineBasicBlock(LLVMBB); 2255 CurMF->insert(BBI, FalseBB); 2256 WorkList.push_back(CaseRec(FalseBB,CR.LT,C,RHSR)); 2257 } 2258 2259 // Create a CaseBlock record representing a conditional branch to 2260 // the LHS node if the value being switched on SV is less than C. 2261 // Otherwise, branch to LHS. 2262 CaseBlock CB(ISD::SETLT, SV, C, NULL, TrueBB, FalseBB, CR.CaseBB); 2263 2264 if (CR.CaseBB == CurMBB) 2265 visitSwitchCase(CB); 2266 else 2267 SwitchCases.push_back(CB); 2268 2269 return true; 2270} 2271 2272/// handleBitTestsSwitchCase - if current case range has few destination and 2273/// range span less, than machine word bitwidth, encode case range into series 2274/// of masks and emit bit tests with these masks. 2275bool SelectionDAGLowering::handleBitTestsSwitchCase(CaseRec& CR, 2276 CaseRecVector& WorkList, 2277 Value* SV, 2278 MachineBasicBlock* Default){ 2279 unsigned IntPtrBits = TLI.getPointerTy().getSizeInBits(); 2280 2281 Case& FrontCase = *CR.Range.first; 2282 Case& BackCase = *(CR.Range.second-1); 2283 2284 // Get the MachineFunction which holds the current MBB. This is used when 2285 // inserting any additional MBBs necessary to represent the switch. 2286 MachineFunction *CurMF = CurMBB->getParent(); 2287 2288 unsigned numCmps = 0; 2289 for (CaseItr I = CR.Range.first, E = CR.Range.second; 2290 I!=E; ++I) { 2291 // Single case counts one, case range - two. 2292 if (I->Low == I->High) 2293 numCmps +=1; 2294 else 2295 numCmps +=2; 2296 } 2297 2298 // Count unique destinations 2299 SmallSet<MachineBasicBlock*, 4> Dests; 2300 for (CaseItr I = CR.Range.first, E = CR.Range.second; I!=E; ++I) { 2301 Dests.insert(I->BB); 2302 if (Dests.size() > 3) 2303 // Don't bother the code below, if there are too much unique destinations 2304 return false; 2305 } 2306 DOUT << "Total number of unique destinations: " << Dests.size() << "\n" 2307 << "Total number of comparisons: " << numCmps << "\n"; 2308 2309 // Compute span of values. 2310 Constant* minValue = FrontCase.Low; 2311 Constant* maxValue = BackCase.High; 2312 uint64_t range = cast<ConstantInt>(maxValue)->getSExtValue() - 2313 cast<ConstantInt>(minValue)->getSExtValue(); 2314 DOUT << "Compare range: " << range << "\n" 2315 << "Low bound: " << cast<ConstantInt>(minValue)->getSExtValue() << "\n" 2316 << "High bound: " << cast<ConstantInt>(maxValue)->getSExtValue() << "\n"; 2317 2318 if (range>=IntPtrBits || 2319 (!(Dests.size() == 1 && numCmps >= 3) && 2320 !(Dests.size() == 2 && numCmps >= 5) && 2321 !(Dests.size() >= 3 && numCmps >= 6))) 2322 return false; 2323 2324 DOUT << "Emitting bit tests\n"; 2325 int64_t lowBound = 0; 2326 2327 // Optimize the case where all the case values fit in a 2328 // word without having to subtract minValue. In this case, 2329 // we can optimize away the subtraction. 2330 if (cast<ConstantInt>(minValue)->getSExtValue() >= 0 && 2331 cast<ConstantInt>(maxValue)->getSExtValue() < IntPtrBits) { 2332 range = cast<ConstantInt>(maxValue)->getSExtValue(); 2333 } else { 2334 lowBound = cast<ConstantInt>(minValue)->getSExtValue(); 2335 } 2336 2337 CaseBitsVector CasesBits; 2338 unsigned i, count = 0; 2339 2340 for (CaseItr I = CR.Range.first, E = CR.Range.second; I!=E; ++I) { 2341 MachineBasicBlock* Dest = I->BB; 2342 for (i = 0; i < count; ++i) 2343 if (Dest == CasesBits[i].BB) 2344 break; 2345 2346 if (i == count) { 2347 assert((count < 3) && "Too much destinations to test!"); 2348 CasesBits.push_back(CaseBits(0, Dest, 0)); 2349 count++; 2350 } 2351 2352 uint64_t lo = cast<ConstantInt>(I->Low)->getSExtValue() - lowBound; 2353 uint64_t hi = cast<ConstantInt>(I->High)->getSExtValue() - lowBound; 2354 2355 for (uint64_t j = lo; j <= hi; j++) { 2356 CasesBits[i].Mask |= 1ULL << j; 2357 CasesBits[i].Bits++; 2358 } 2359 2360 } 2361 std::sort(CasesBits.begin(), CasesBits.end(), CaseBitsCmp()); 2362 2363 BitTestInfo BTC; 2364 2365 // Figure out which block is immediately after the current one. 2366 MachineFunction::iterator BBI = CR.CaseBB; 2367 ++BBI; 2368 2369 const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock(); 2370 2371 DOUT << "Cases:\n"; 2372 for (unsigned i = 0, e = CasesBits.size(); i!=e; ++i) { 2373 DOUT << "Mask: " << CasesBits[i].Mask << ", Bits: " << CasesBits[i].Bits 2374 << ", BB: " << CasesBits[i].BB << "\n"; 2375 2376 MachineBasicBlock *CaseBB = CurMF->CreateMachineBasicBlock(LLVMBB); 2377 CurMF->insert(BBI, CaseBB); 2378 BTC.push_back(BitTestCase(CasesBits[i].Mask, 2379 CaseBB, 2380 CasesBits[i].BB)); 2381 } 2382 2383 BitTestBlock BTB(lowBound, range, SV, 2384 -1U, (CR.CaseBB == CurMBB), 2385 CR.CaseBB, Default, BTC); 2386 2387 if (CR.CaseBB == CurMBB) 2388 visitBitTestHeader(BTB); 2389 2390 BitTestCases.push_back(BTB); 2391 2392 return true; 2393} 2394 2395 2396/// Clusterify - Transform simple list of Cases into list of CaseRange's 2397unsigned SelectionDAGLowering::Clusterify(CaseVector& Cases, 2398 const SwitchInst& SI) { 2399 unsigned numCmps = 0; 2400 2401 // Start with "simple" cases 2402 for (unsigned i = 1; i < SI.getNumSuccessors(); ++i) { 2403 MachineBasicBlock *SMBB = FuncInfo.MBBMap[SI.getSuccessor(i)]; 2404 Cases.push_back(Case(SI.getSuccessorValue(i), 2405 SI.getSuccessorValue(i), 2406 SMBB)); 2407 } 2408 std::sort(Cases.begin(), Cases.end(), CaseCmp()); 2409 2410 // Merge case into clusters 2411 if (Cases.size()>=2) 2412 // Must recompute end() each iteration because it may be 2413 // invalidated by erase if we hold on to it 2414 for (CaseItr I=Cases.begin(), J=++(Cases.begin()); J!=Cases.end(); ) { 2415 int64_t nextValue = cast<ConstantInt>(J->Low)->getSExtValue(); 2416 int64_t currentValue = cast<ConstantInt>(I->High)->getSExtValue(); 2417 MachineBasicBlock* nextBB = J->BB; 2418 MachineBasicBlock* currentBB = I->BB; 2419 2420 // If the two neighboring cases go to the same destination, merge them 2421 // into a single case. 2422 if ((nextValue-currentValue==1) && (currentBB == nextBB)) { 2423 I->High = J->High; 2424 J = Cases.erase(J); 2425 } else { 2426 I = J++; 2427 } 2428 } 2429 2430 for (CaseItr I=Cases.begin(), E=Cases.end(); I!=E; ++I, ++numCmps) { 2431 if (I->Low != I->High) 2432 // A range counts double, since it requires two compares. 2433 ++numCmps; 2434 } 2435 2436 return numCmps; 2437} 2438 2439void SelectionDAGLowering::visitSwitch(SwitchInst &SI) { 2440 // Figure out which block is immediately after the current one. 2441 MachineBasicBlock *NextBlock = 0; 2442 MachineFunction::iterator BBI = CurMBB; 2443 2444 MachineBasicBlock *Default = FuncInfo.MBBMap[SI.getDefaultDest()]; 2445 2446 // If there is only the default destination, branch to it if it is not the 2447 // next basic block. Otherwise, just fall through. 2448 if (SI.getNumOperands() == 2) { 2449 // Update machine-CFG edges. 2450 2451 // If this is not a fall-through branch, emit the branch. 2452 CurMBB->addSuccessor(Default); 2453 if (Default != NextBlock) 2454 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, getControlRoot(), 2455 DAG.getBasicBlock(Default))); 2456 2457 return; 2458 } 2459 2460 // If there are any non-default case statements, create a vector of Cases 2461 // representing each one, and sort the vector so that we can efficiently 2462 // create a binary search tree from them. 2463 CaseVector Cases; 2464 unsigned numCmps = Clusterify(Cases, SI); 2465 DOUT << "Clusterify finished. Total clusters: " << Cases.size() 2466 << ". Total compares: " << numCmps << "\n"; 2467 2468 // Get the Value to be switched on and default basic blocks, which will be 2469 // inserted into CaseBlock records, representing basic blocks in the binary 2470 // search tree. 2471 Value *SV = SI.getOperand(0); 2472 2473 // Push the initial CaseRec onto the worklist 2474 CaseRecVector WorkList; 2475 WorkList.push_back(CaseRec(CurMBB,0,0,CaseRange(Cases.begin(),Cases.end()))); 2476 2477 while (!WorkList.empty()) { 2478 // Grab a record representing a case range to process off the worklist 2479 CaseRec CR = WorkList.back(); 2480 WorkList.pop_back(); 2481 2482 if (handleBitTestsSwitchCase(CR, WorkList, SV, Default)) 2483 continue; 2484 2485 // If the range has few cases (two or less) emit a series of specific 2486 // tests. 2487 if (handleSmallSwitchRange(CR, WorkList, SV, Default)) 2488 continue; 2489 2490 // If the switch has more than 5 blocks, and at least 40% dense, and the 2491 // target supports indirect branches, then emit a jump table rather than 2492 // lowering the switch to a binary tree of conditional branches. 2493 if (handleJTSwitchCase(CR, WorkList, SV, Default)) 2494 continue; 2495 2496 // Emit binary tree. We need to pick a pivot, and push left and right ranges 2497 // onto the worklist. Leafs are handled via handleSmallSwitchRange() call. 2498 handleBTSplitSwitchCase(CR, WorkList, SV, Default); 2499 } 2500} 2501 2502 2503void SelectionDAGLowering::visitSub(User &I) { 2504 // -0.0 - X --> fneg 2505 const Type *Ty = I.getType(); 2506 if (isa<VectorType>(Ty)) { 2507 if (ConstantVector *CV = dyn_cast<ConstantVector>(I.getOperand(0))) { 2508 const VectorType *DestTy = cast<VectorType>(I.getType()); 2509 const Type *ElTy = DestTy->getElementType(); 2510 if (ElTy->isFloatingPoint()) { 2511 unsigned VL = DestTy->getNumElements(); 2512 std::vector<Constant*> NZ(VL, ConstantFP::getNegativeZero(ElTy)); 2513 Constant *CNZ = ConstantVector::get(&NZ[0], NZ.size()); 2514 if (CV == CNZ) { 2515 SDValue Op2 = getValue(I.getOperand(1)); 2516 setValue(&I, DAG.getNode(ISD::FNEG, Op2.getValueType(), Op2)); 2517 return; 2518 } 2519 } 2520 } 2521 } 2522 if (Ty->isFloatingPoint()) { 2523 if (ConstantFP *CFP = dyn_cast<ConstantFP>(I.getOperand(0))) 2524 if (CFP->isExactlyValue(ConstantFP::getNegativeZero(Ty)->getValueAPF())) { 2525 SDValue Op2 = getValue(I.getOperand(1)); 2526 setValue(&I, DAG.getNode(ISD::FNEG, Op2.getValueType(), Op2)); 2527 return; 2528 } 2529 } 2530 2531 visitBinary(I, Ty->isFPOrFPVector() ? ISD::FSUB : ISD::SUB); 2532} 2533 2534void SelectionDAGLowering::visitBinary(User &I, unsigned OpCode) { 2535 SDValue Op1 = getValue(I.getOperand(0)); 2536 SDValue Op2 = getValue(I.getOperand(1)); 2537 2538 setValue(&I, DAG.getNode(OpCode, Op1.getValueType(), Op1, Op2)); 2539} 2540 2541void SelectionDAGLowering::visitShift(User &I, unsigned Opcode) { 2542 SDValue Op1 = getValue(I.getOperand(0)); 2543 SDValue Op2 = getValue(I.getOperand(1)); 2544 if (!isa<VectorType>(I.getType())) { 2545 if (TLI.getShiftAmountTy().bitsLT(Op2.getValueType())) 2546 Op2 = DAG.getNode(ISD::TRUNCATE, TLI.getShiftAmountTy(), Op2); 2547 else if (TLI.getShiftAmountTy().bitsGT(Op2.getValueType())) 2548 Op2 = DAG.getNode(ISD::ANY_EXTEND, TLI.getShiftAmountTy(), Op2); 2549 } 2550 2551 setValue(&I, DAG.getNode(Opcode, Op1.getValueType(), Op1, Op2)); 2552} 2553 2554void SelectionDAGLowering::visitICmp(User &I) { 2555 ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE; 2556 if (ICmpInst *IC = dyn_cast<ICmpInst>(&I)) 2557 predicate = IC->getPredicate(); 2558 else if (ConstantExpr *IC = dyn_cast<ConstantExpr>(&I)) 2559 predicate = ICmpInst::Predicate(IC->getPredicate()); 2560 SDValue Op1 = getValue(I.getOperand(0)); 2561 SDValue Op2 = getValue(I.getOperand(1)); 2562 ISD::CondCode Opcode; 2563 switch (predicate) { 2564 case ICmpInst::ICMP_EQ : Opcode = ISD::SETEQ; break; 2565 case ICmpInst::ICMP_NE : Opcode = ISD::SETNE; break; 2566 case ICmpInst::ICMP_UGT : Opcode = ISD::SETUGT; break; 2567 case ICmpInst::ICMP_UGE : Opcode = ISD::SETUGE; break; 2568 case ICmpInst::ICMP_ULT : Opcode = ISD::SETULT; break; 2569 case ICmpInst::ICMP_ULE : Opcode = ISD::SETULE; break; 2570 case ICmpInst::ICMP_SGT : Opcode = ISD::SETGT; break; 2571 case ICmpInst::ICMP_SGE : Opcode = ISD::SETGE; break; 2572 case ICmpInst::ICMP_SLT : Opcode = ISD::SETLT; break; 2573 case ICmpInst::ICMP_SLE : Opcode = ISD::SETLE; break; 2574 default: 2575 assert(!"Invalid ICmp predicate value"); 2576 Opcode = ISD::SETEQ; 2577 break; 2578 } 2579 setValue(&I, DAG.getSetCC(MVT::i1, Op1, Op2, Opcode)); 2580} 2581 2582void SelectionDAGLowering::visitFCmp(User &I) { 2583 FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE; 2584 if (FCmpInst *FC = dyn_cast<FCmpInst>(&I)) 2585 predicate = FC->getPredicate(); 2586 else if (ConstantExpr *FC = dyn_cast<ConstantExpr>(&I)) 2587 predicate = FCmpInst::Predicate(FC->getPredicate()); 2588 SDValue Op1 = getValue(I.getOperand(0)); 2589 SDValue Op2 = getValue(I.getOperand(1)); 2590 ISD::CondCode Condition, FOC, FPC; 2591 switch (predicate) { 2592 case FCmpInst::FCMP_FALSE: FOC = FPC = ISD::SETFALSE; break; 2593 case FCmpInst::FCMP_OEQ: FOC = ISD::SETEQ; FPC = ISD::SETOEQ; break; 2594 case FCmpInst::FCMP_OGT: FOC = ISD::SETGT; FPC = ISD::SETOGT; break; 2595 case FCmpInst::FCMP_OGE: FOC = ISD::SETGE; FPC = ISD::SETOGE; break; 2596 case FCmpInst::FCMP_OLT: FOC = ISD::SETLT; FPC = ISD::SETOLT; break; 2597 case FCmpInst::FCMP_OLE: FOC = ISD::SETLE; FPC = ISD::SETOLE; break; 2598 case FCmpInst::FCMP_ONE: FOC = ISD::SETNE; FPC = ISD::SETONE; break; 2599 case FCmpInst::FCMP_ORD: FOC = FPC = ISD::SETO; break; 2600 case FCmpInst::FCMP_UNO: FOC = FPC = ISD::SETUO; break; 2601 case FCmpInst::FCMP_UEQ: FOC = ISD::SETEQ; FPC = ISD::SETUEQ; break; 2602 case FCmpInst::FCMP_UGT: FOC = ISD::SETGT; FPC = ISD::SETUGT; break; 2603 case FCmpInst::FCMP_UGE: FOC = ISD::SETGE; FPC = ISD::SETUGE; break; 2604 case FCmpInst::FCMP_ULT: FOC = ISD::SETLT; FPC = ISD::SETULT; break; 2605 case FCmpInst::FCMP_ULE: FOC = ISD::SETLE; FPC = ISD::SETULE; break; 2606 case FCmpInst::FCMP_UNE: FOC = ISD::SETNE; FPC = ISD::SETUNE; break; 2607 case FCmpInst::FCMP_TRUE: FOC = FPC = ISD::SETTRUE; break; 2608 default: 2609 assert(!"Invalid FCmp predicate value"); 2610 FOC = FPC = ISD::SETFALSE; 2611 break; 2612 } 2613 if (FiniteOnlyFPMath()) 2614 Condition = FOC; 2615 else 2616 Condition = FPC; 2617 setValue(&I, DAG.getSetCC(MVT::i1, Op1, Op2, Condition)); 2618} 2619 2620void SelectionDAGLowering::visitVICmp(User &I) { 2621 ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE; 2622 if (VICmpInst *IC = dyn_cast<VICmpInst>(&I)) 2623 predicate = IC->getPredicate(); 2624 else if (ConstantExpr *IC = dyn_cast<ConstantExpr>(&I)) 2625 predicate = ICmpInst::Predicate(IC->getPredicate()); 2626 SDValue Op1 = getValue(I.getOperand(0)); 2627 SDValue Op2 = getValue(I.getOperand(1)); 2628 ISD::CondCode Opcode; 2629 switch (predicate) { 2630 case ICmpInst::ICMP_EQ : Opcode = ISD::SETEQ; break; 2631 case ICmpInst::ICMP_NE : Opcode = ISD::SETNE; break; 2632 case ICmpInst::ICMP_UGT : Opcode = ISD::SETUGT; break; 2633 case ICmpInst::ICMP_UGE : Opcode = ISD::SETUGE; break; 2634 case ICmpInst::ICMP_ULT : Opcode = ISD::SETULT; break; 2635 case ICmpInst::ICMP_ULE : Opcode = ISD::SETULE; break; 2636 case ICmpInst::ICMP_SGT : Opcode = ISD::SETGT; break; 2637 case ICmpInst::ICMP_SGE : Opcode = ISD::SETGE; break; 2638 case ICmpInst::ICMP_SLT : Opcode = ISD::SETLT; break; 2639 case ICmpInst::ICMP_SLE : Opcode = ISD::SETLE; break; 2640 default: 2641 assert(!"Invalid ICmp predicate value"); 2642 Opcode = ISD::SETEQ; 2643 break; 2644 } 2645 setValue(&I, DAG.getVSetCC(Op1.getValueType(), Op1, Op2, Opcode)); 2646} 2647 2648void SelectionDAGLowering::visitVFCmp(User &I) { 2649 FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE; 2650 if (VFCmpInst *FC = dyn_cast<VFCmpInst>(&I)) 2651 predicate = FC->getPredicate(); 2652 else if (ConstantExpr *FC = dyn_cast<ConstantExpr>(&I)) 2653 predicate = FCmpInst::Predicate(FC->getPredicate()); 2654 SDValue Op1 = getValue(I.getOperand(0)); 2655 SDValue Op2 = getValue(I.getOperand(1)); 2656 ISD::CondCode Condition, FOC, FPC; 2657 switch (predicate) { 2658 case FCmpInst::FCMP_FALSE: FOC = FPC = ISD::SETFALSE; break; 2659 case FCmpInst::FCMP_OEQ: FOC = ISD::SETEQ; FPC = ISD::SETOEQ; break; 2660 case FCmpInst::FCMP_OGT: FOC = ISD::SETGT; FPC = ISD::SETOGT; break; 2661 case FCmpInst::FCMP_OGE: FOC = ISD::SETGE; FPC = ISD::SETOGE; break; 2662 case FCmpInst::FCMP_OLT: FOC = ISD::SETLT; FPC = ISD::SETOLT; break; 2663 case FCmpInst::FCMP_OLE: FOC = ISD::SETLE; FPC = ISD::SETOLE; break; 2664 case FCmpInst::FCMP_ONE: FOC = ISD::SETNE; FPC = ISD::SETONE; break; 2665 case FCmpInst::FCMP_ORD: FOC = FPC = ISD::SETO; break; 2666 case FCmpInst::FCMP_UNO: FOC = FPC = ISD::SETUO; break; 2667 case FCmpInst::FCMP_UEQ: FOC = ISD::SETEQ; FPC = ISD::SETUEQ; break; 2668 case FCmpInst::FCMP_UGT: FOC = ISD::SETGT; FPC = ISD::SETUGT; break; 2669 case FCmpInst::FCMP_UGE: FOC = ISD::SETGE; FPC = ISD::SETUGE; break; 2670 case FCmpInst::FCMP_ULT: FOC = ISD::SETLT; FPC = ISD::SETULT; break; 2671 case FCmpInst::FCMP_ULE: FOC = ISD::SETLE; FPC = ISD::SETULE; break; 2672 case FCmpInst::FCMP_UNE: FOC = ISD::SETNE; FPC = ISD::SETUNE; break; 2673 case FCmpInst::FCMP_TRUE: FOC = FPC = ISD::SETTRUE; break; 2674 default: 2675 assert(!"Invalid VFCmp predicate value"); 2676 FOC = FPC = ISD::SETFALSE; 2677 break; 2678 } 2679 if (FiniteOnlyFPMath()) 2680 Condition = FOC; 2681 else 2682 Condition = FPC; 2683 2684 MVT DestVT = TLI.getValueType(I.getType()); 2685 2686 setValue(&I, DAG.getVSetCC(DestVT, Op1, Op2, Condition)); 2687} 2688 2689void SelectionDAGLowering::visitSelect(User &I) { 2690 SDValue Cond = getValue(I.getOperand(0)); 2691 SDValue TrueVal = getValue(I.getOperand(1)); 2692 SDValue FalseVal = getValue(I.getOperand(2)); 2693 setValue(&I, DAG.getNode(ISD::SELECT, TrueVal.getValueType(), Cond, 2694 TrueVal, FalseVal)); 2695} 2696 2697 2698void SelectionDAGLowering::visitTrunc(User &I) { 2699 // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest). 2700 SDValue N = getValue(I.getOperand(0)); 2701 MVT DestVT = TLI.getValueType(I.getType()); 2702 setValue(&I, DAG.getNode(ISD::TRUNCATE, DestVT, N)); 2703} 2704 2705void SelectionDAGLowering::visitZExt(User &I) { 2706 // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest). 2707 // ZExt also can't be a cast to bool for same reason. So, nothing much to do 2708 SDValue N = getValue(I.getOperand(0)); 2709 MVT DestVT = TLI.getValueType(I.getType()); 2710 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, DestVT, N)); 2711} 2712 2713void SelectionDAGLowering::visitSExt(User &I) { 2714 // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest). 2715 // SExt also can't be a cast to bool for same reason. So, nothing much to do 2716 SDValue N = getValue(I.getOperand(0)); 2717 MVT DestVT = TLI.getValueType(I.getType()); 2718 setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, DestVT, N)); 2719} 2720 2721void SelectionDAGLowering::visitFPTrunc(User &I) { 2722 // FPTrunc is never a no-op cast, no need to check 2723 SDValue N = getValue(I.getOperand(0)); 2724 MVT DestVT = TLI.getValueType(I.getType()); 2725 setValue(&I, DAG.getNode(ISD::FP_ROUND, DestVT, N, DAG.getIntPtrConstant(0))); 2726} 2727 2728void SelectionDAGLowering::visitFPExt(User &I){ 2729 // FPTrunc is never a no-op cast, no need to check 2730 SDValue N = getValue(I.getOperand(0)); 2731 MVT DestVT = TLI.getValueType(I.getType()); 2732 setValue(&I, DAG.getNode(ISD::FP_EXTEND, DestVT, N)); 2733} 2734 2735void SelectionDAGLowering::visitFPToUI(User &I) { 2736 // FPToUI is never a no-op cast, no need to check 2737 SDValue N = getValue(I.getOperand(0)); 2738 MVT DestVT = TLI.getValueType(I.getType()); 2739 setValue(&I, DAG.getNode(ISD::FP_TO_UINT, DestVT, N)); 2740} 2741 2742void SelectionDAGLowering::visitFPToSI(User &I) { 2743 // FPToSI is never a no-op cast, no need to check 2744 SDValue N = getValue(I.getOperand(0)); 2745 MVT DestVT = TLI.getValueType(I.getType()); 2746 setValue(&I, DAG.getNode(ISD::FP_TO_SINT, DestVT, N)); 2747} 2748 2749void SelectionDAGLowering::visitUIToFP(User &I) { 2750 // UIToFP is never a no-op cast, no need to check 2751 SDValue N = getValue(I.getOperand(0)); 2752 MVT DestVT = TLI.getValueType(I.getType()); 2753 setValue(&I, DAG.getNode(ISD::UINT_TO_FP, DestVT, N)); 2754} 2755 2756void SelectionDAGLowering::visitSIToFP(User &I){ 2757 // UIToFP is never a no-op cast, no need to check 2758 SDValue N = getValue(I.getOperand(0)); 2759 MVT DestVT = TLI.getValueType(I.getType()); 2760 setValue(&I, DAG.getNode(ISD::SINT_TO_FP, DestVT, N)); 2761} 2762 2763void SelectionDAGLowering::visitPtrToInt(User &I) { 2764 // What to do depends on the size of the integer and the size of the pointer. 2765 // We can either truncate, zero extend, or no-op, accordingly. 2766 SDValue N = getValue(I.getOperand(0)); 2767 MVT SrcVT = N.getValueType(); 2768 MVT DestVT = TLI.getValueType(I.getType()); 2769 SDValue Result; 2770 if (DestVT.bitsLT(SrcVT)) 2771 Result = DAG.getNode(ISD::TRUNCATE, DestVT, N); 2772 else 2773 // Note: ZERO_EXTEND can handle cases where the sizes are equal too 2774 Result = DAG.getNode(ISD::ZERO_EXTEND, DestVT, N); 2775 setValue(&I, Result); 2776} 2777 2778void SelectionDAGLowering::visitIntToPtr(User &I) { 2779 // What to do depends on the size of the integer and the size of the pointer. 2780 // We can either truncate, zero extend, or no-op, accordingly. 2781 SDValue N = getValue(I.getOperand(0)); 2782 MVT SrcVT = N.getValueType(); 2783 MVT DestVT = TLI.getValueType(I.getType()); 2784 if (DestVT.bitsLT(SrcVT)) 2785 setValue(&I, DAG.getNode(ISD::TRUNCATE, DestVT, N)); 2786 else 2787 // Note: ZERO_EXTEND can handle cases where the sizes are equal too 2788 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, DestVT, N)); 2789} 2790 2791void SelectionDAGLowering::visitBitCast(User &I) { 2792 SDValue N = getValue(I.getOperand(0)); 2793 MVT DestVT = TLI.getValueType(I.getType()); 2794 2795 // BitCast assures us that source and destination are the same size so this 2796 // is either a BIT_CONVERT or a no-op. 2797 if (DestVT != N.getValueType()) 2798 setValue(&I, DAG.getNode(ISD::BIT_CONVERT, DestVT, N)); // convert types 2799 else 2800 setValue(&I, N); // noop cast. 2801} 2802 2803void SelectionDAGLowering::visitInsertElement(User &I) { 2804 SDValue InVec = getValue(I.getOperand(0)); 2805 SDValue InVal = getValue(I.getOperand(1)); 2806 SDValue InIdx = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(), 2807 getValue(I.getOperand(2))); 2808 2809 setValue(&I, DAG.getNode(ISD::INSERT_VECTOR_ELT, 2810 TLI.getValueType(I.getType()), 2811 InVec, InVal, InIdx)); 2812} 2813 2814void SelectionDAGLowering::visitExtractElement(User &I) { 2815 SDValue InVec = getValue(I.getOperand(0)); 2816 SDValue InIdx = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(), 2817 getValue(I.getOperand(1))); 2818 setValue(&I, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, 2819 TLI.getValueType(I.getType()), InVec, InIdx)); 2820} 2821 2822void SelectionDAGLowering::visitShuffleVector(User &I) { 2823 SDValue V1 = getValue(I.getOperand(0)); 2824 SDValue V2 = getValue(I.getOperand(1)); 2825 SDValue Mask = getValue(I.getOperand(2)); 2826 2827 setValue(&I, DAG.getNode(ISD::VECTOR_SHUFFLE, 2828 TLI.getValueType(I.getType()), 2829 V1, V2, Mask)); 2830} 2831 2832void SelectionDAGLowering::visitInsertValue(InsertValueInst &I) { 2833 const Value *Op0 = I.getOperand(0); 2834 const Value *Op1 = I.getOperand(1); 2835 const Type *AggTy = I.getType(); 2836 const Type *ValTy = Op1->getType(); 2837 bool IntoUndef = isa<UndefValue>(Op0); 2838 bool FromUndef = isa<UndefValue>(Op1); 2839 2840 unsigned LinearIndex = ComputeLinearIndex(TLI, AggTy, 2841 I.idx_begin(), I.idx_end()); 2842 2843 SmallVector<MVT, 4> AggValueVTs; 2844 ComputeValueVTs(TLI, AggTy, AggValueVTs); 2845 SmallVector<MVT, 4> ValValueVTs; 2846 ComputeValueVTs(TLI, ValTy, ValValueVTs); 2847 2848 unsigned NumAggValues = AggValueVTs.size(); 2849 unsigned NumValValues = ValValueVTs.size(); 2850 SmallVector<SDValue, 4> Values(NumAggValues); 2851 2852 SDValue Agg = getValue(Op0); 2853 SDValue Val = getValue(Op1); 2854 unsigned i = 0; 2855 // Copy the beginning value(s) from the original aggregate. 2856 for (; i != LinearIndex; ++i) 2857 Values[i] = IntoUndef ? DAG.getNode(ISD::UNDEF, AggValueVTs[i]) : 2858 SDValue(Agg.getNode(), Agg.getResNo() + i); 2859 // Copy values from the inserted value(s). 2860 for (; i != LinearIndex + NumValValues; ++i) 2861 Values[i] = FromUndef ? DAG.getNode(ISD::UNDEF, AggValueVTs[i]) : 2862 SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex); 2863 // Copy remaining value(s) from the original aggregate. 2864 for (; i != NumAggValues; ++i) 2865 Values[i] = IntoUndef ? DAG.getNode(ISD::UNDEF, AggValueVTs[i]) : 2866 SDValue(Agg.getNode(), Agg.getResNo() + i); 2867 2868 setValue(&I, DAG.getMergeValues(DAG.getVTList(&AggValueVTs[0], NumAggValues), 2869 &Values[0], NumAggValues)); 2870} 2871 2872void SelectionDAGLowering::visitExtractValue(ExtractValueInst &I) { 2873 const Value *Op0 = I.getOperand(0); 2874 const Type *AggTy = Op0->getType(); 2875 const Type *ValTy = I.getType(); 2876 bool OutOfUndef = isa<UndefValue>(Op0); 2877 2878 unsigned LinearIndex = ComputeLinearIndex(TLI, AggTy, 2879 I.idx_begin(), I.idx_end()); 2880 2881 SmallVector<MVT, 4> ValValueVTs; 2882 ComputeValueVTs(TLI, ValTy, ValValueVTs); 2883 2884 unsigned NumValValues = ValValueVTs.size(); 2885 SmallVector<SDValue, 4> Values(NumValValues); 2886 2887 SDValue Agg = getValue(Op0); 2888 // Copy out the selected value(s). 2889 for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i) 2890 Values[i - LinearIndex] = 2891 OutOfUndef ? DAG.getNode(ISD::UNDEF, Agg.getNode()->getValueType(Agg.getResNo() + i)) : 2892 SDValue(Agg.getNode(), Agg.getResNo() + i); 2893 2894 setValue(&I, DAG.getMergeValues(DAG.getVTList(&ValValueVTs[0], NumValValues), 2895 &Values[0], NumValValues)); 2896} 2897 2898 2899void SelectionDAGLowering::visitGetElementPtr(User &I) { 2900 SDValue N = getValue(I.getOperand(0)); 2901 const Type *Ty = I.getOperand(0)->getType(); 2902 2903 for (GetElementPtrInst::op_iterator OI = I.op_begin()+1, E = I.op_end(); 2904 OI != E; ++OI) { 2905 Value *Idx = *OI; 2906 if (const StructType *StTy = dyn_cast<StructType>(Ty)) { 2907 unsigned Field = cast<ConstantInt>(Idx)->getZExtValue(); 2908 if (Field) { 2909 // N = N + Offset 2910 uint64_t Offset = TD->getStructLayout(StTy)->getElementOffset(Field); 2911 N = DAG.getNode(ISD::ADD, N.getValueType(), N, 2912 DAG.getIntPtrConstant(Offset)); 2913 } 2914 Ty = StTy->getElementType(Field); 2915 } else { 2916 Ty = cast<SequentialType>(Ty)->getElementType(); 2917 2918 // If this is a constant subscript, handle it quickly. 2919 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) { 2920 if (CI->getZExtValue() == 0) continue; 2921 uint64_t Offs = 2922 TD->getABITypeSize(Ty)*cast<ConstantInt>(CI)->getSExtValue(); 2923 N = DAG.getNode(ISD::ADD, N.getValueType(), N, 2924 DAG.getIntPtrConstant(Offs)); 2925 continue; 2926 } 2927 2928 // N = N + Idx * ElementSize; 2929 uint64_t ElementSize = TD->getABITypeSize(Ty); 2930 SDValue IdxN = getValue(Idx); 2931 2932 // If the index is smaller or larger than intptr_t, truncate or extend 2933 // it. 2934 if (IdxN.getValueType().bitsLT(N.getValueType())) 2935 IdxN = DAG.getNode(ISD::SIGN_EXTEND, N.getValueType(), IdxN); 2936 else if (IdxN.getValueType().bitsGT(N.getValueType())) 2937 IdxN = DAG.getNode(ISD::TRUNCATE, N.getValueType(), IdxN); 2938 2939 // If this is a multiply by a power of two, turn it into a shl 2940 // immediately. This is a very common case. 2941 if (ElementSize != 1) { 2942 if (isPowerOf2_64(ElementSize)) { 2943 unsigned Amt = Log2_64(ElementSize); 2944 IdxN = DAG.getNode(ISD::SHL, N.getValueType(), IdxN, 2945 DAG.getConstant(Amt, TLI.getShiftAmountTy())); 2946 } else { 2947 SDValue Scale = DAG.getIntPtrConstant(ElementSize); 2948 IdxN = DAG.getNode(ISD::MUL, N.getValueType(), IdxN, Scale); 2949 } 2950 } 2951 2952 N = DAG.getNode(ISD::ADD, N.getValueType(), N, IdxN); 2953 } 2954 } 2955 setValue(&I, N); 2956} 2957 2958void SelectionDAGLowering::visitAlloca(AllocaInst &I) { 2959 // If this is a fixed sized alloca in the entry block of the function, 2960 // allocate it statically on the stack. 2961 if (FuncInfo.StaticAllocaMap.count(&I)) 2962 return; // getValue will auto-populate this. 2963 2964 const Type *Ty = I.getAllocatedType(); 2965 uint64_t TySize = TLI.getTargetData()->getABITypeSize(Ty); 2966 unsigned Align = 2967 std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty), 2968 I.getAlignment()); 2969 2970 SDValue AllocSize = getValue(I.getArraySize()); 2971 MVT IntPtr = TLI.getPointerTy(); 2972 if (IntPtr.bitsLT(AllocSize.getValueType())) 2973 AllocSize = DAG.getNode(ISD::TRUNCATE, IntPtr, AllocSize); 2974 else if (IntPtr.bitsGT(AllocSize.getValueType())) 2975 AllocSize = DAG.getNode(ISD::ZERO_EXTEND, IntPtr, AllocSize); 2976 2977 AllocSize = DAG.getNode(ISD::MUL, IntPtr, AllocSize, 2978 DAG.getIntPtrConstant(TySize)); 2979 2980 // Handle alignment. If the requested alignment is less than or equal to 2981 // the stack alignment, ignore it. If the size is greater than or equal to 2982 // the stack alignment, we note this in the DYNAMIC_STACKALLOC node. 2983 unsigned StackAlign = 2984 TLI.getTargetMachine().getFrameInfo()->getStackAlignment(); 2985 if (Align <= StackAlign) 2986 Align = 0; 2987 2988 // Round the size of the allocation up to the stack alignment size 2989 // by add SA-1 to the size. 2990 AllocSize = DAG.getNode(ISD::ADD, AllocSize.getValueType(), AllocSize, 2991 DAG.getIntPtrConstant(StackAlign-1)); 2992 // Mask out the low bits for alignment purposes. 2993 AllocSize = DAG.getNode(ISD::AND, AllocSize.getValueType(), AllocSize, 2994 DAG.getIntPtrConstant(~(uint64_t)(StackAlign-1))); 2995 2996 SDValue Ops[] = { getRoot(), AllocSize, DAG.getIntPtrConstant(Align) }; 2997 const MVT *VTs = DAG.getNodeValueTypes(AllocSize.getValueType(), 2998 MVT::Other); 2999 SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, VTs, 2, Ops, 3); 3000 setValue(&I, DSA); 3001 DAG.setRoot(DSA.getValue(1)); 3002 3003 // Inform the Frame Information that we have just allocated a variable-sized 3004 // object. 3005 CurMBB->getParent()->getFrameInfo()->CreateVariableSizedObject(); 3006} 3007 3008void SelectionDAGLowering::visitLoad(LoadInst &I) { 3009 const Value *SV = I.getOperand(0); 3010 SDValue Ptr = getValue(SV); 3011 3012 const Type *Ty = I.getType(); 3013 bool isVolatile = I.isVolatile(); 3014 unsigned Alignment = I.getAlignment(); 3015 3016 SmallVector<MVT, 4> ValueVTs; 3017 SmallVector<uint64_t, 4> Offsets; 3018 ComputeValueVTs(TLI, Ty, ValueVTs, &Offsets); 3019 unsigned NumValues = ValueVTs.size(); 3020 if (NumValues == 0) 3021 return; 3022 3023 SDValue Root; 3024 bool ConstantMemory = false; 3025 if (I.isVolatile()) 3026 // Serialize volatile loads with other side effects. 3027 Root = getRoot(); 3028 else if (AA->pointsToConstantMemory(SV)) { 3029 // Do not serialize (non-volatile) loads of constant memory with anything. 3030 Root = DAG.getEntryNode(); 3031 ConstantMemory = true; 3032 } else { 3033 // Do not serialize non-volatile loads against each other. 3034 Root = DAG.getRoot(); 3035 } 3036 3037 SmallVector<SDValue, 4> Values(NumValues); 3038 SmallVector<SDValue, 4> Chains(NumValues); 3039 MVT PtrVT = Ptr.getValueType(); 3040 for (unsigned i = 0; i != NumValues; ++i) { 3041 SDValue L = DAG.getLoad(ValueVTs[i], Root, 3042 DAG.getNode(ISD::ADD, PtrVT, Ptr, 3043 DAG.getConstant(Offsets[i], PtrVT)), 3044 SV, Offsets[i], 3045 isVolatile, Alignment); 3046 Values[i] = L; 3047 Chains[i] = L.getValue(1); 3048 } 3049 3050 if (!ConstantMemory) { 3051 SDValue Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 3052 &Chains[0], NumValues); 3053 if (isVolatile) 3054 DAG.setRoot(Chain); 3055 else 3056 PendingLoads.push_back(Chain); 3057 } 3058 3059 setValue(&I, DAG.getMergeValues(DAG.getVTList(&ValueVTs[0], NumValues), 3060 &Values[0], NumValues)); 3061} 3062 3063 3064void SelectionDAGLowering::visitStore(StoreInst &I) { 3065 Value *SrcV = I.getOperand(0); 3066 Value *PtrV = I.getOperand(1); 3067 3068 SmallVector<MVT, 4> ValueVTs; 3069 SmallVector<uint64_t, 4> Offsets; 3070 ComputeValueVTs(TLI, SrcV->getType(), ValueVTs, &Offsets); 3071 unsigned NumValues = ValueVTs.size(); 3072 if (NumValues == 0) 3073 return; 3074 3075 // Get the lowered operands. Note that we do this after 3076 // checking if NumResults is zero, because with zero results 3077 // the operands won't have values in the map. 3078 SDValue Src = getValue(SrcV); 3079 SDValue Ptr = getValue(PtrV); 3080 3081 SDValue Root = getRoot(); 3082 SmallVector<SDValue, 4> Chains(NumValues); 3083 MVT PtrVT = Ptr.getValueType(); 3084 bool isVolatile = I.isVolatile(); 3085 unsigned Alignment = I.getAlignment(); 3086 for (unsigned i = 0; i != NumValues; ++i) 3087 Chains[i] = DAG.getStore(Root, SDValue(Src.getNode(), Src.getResNo() + i), 3088 DAG.getNode(ISD::ADD, PtrVT, Ptr, 3089 DAG.getConstant(Offsets[i], PtrVT)), 3090 PtrV, Offsets[i], 3091 isVolatile, Alignment); 3092 3093 DAG.setRoot(DAG.getNode(ISD::TokenFactor, MVT::Other, &Chains[0], NumValues)); 3094} 3095 3096/// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC 3097/// node. 3098void SelectionDAGLowering::visitTargetIntrinsic(CallInst &I, 3099 unsigned Intrinsic) { 3100 bool HasChain = !I.doesNotAccessMemory(); 3101 bool OnlyLoad = HasChain && I.onlyReadsMemory(); 3102 3103 // Build the operand list. 3104 SmallVector<SDValue, 8> Ops; 3105 if (HasChain) { // If this intrinsic has side-effects, chainify it. 3106 if (OnlyLoad) { 3107 // We don't need to serialize loads against other loads. 3108 Ops.push_back(DAG.getRoot()); 3109 } else { 3110 Ops.push_back(getRoot()); 3111 } 3112 } 3113 3114 // Add the intrinsic ID as an integer operand. 3115 Ops.push_back(DAG.getConstant(Intrinsic, TLI.getPointerTy())); 3116 3117 // Add all operands of the call to the operand list. 3118 for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) { 3119 SDValue Op = getValue(I.getOperand(i)); 3120 assert(TLI.isTypeLegal(Op.getValueType()) && 3121 "Intrinsic uses a non-legal type?"); 3122 Ops.push_back(Op); 3123 } 3124 3125 std::vector<MVT> VTs; 3126 if (I.getType() != Type::VoidTy) { 3127 MVT VT = TLI.getValueType(I.getType()); 3128 if (VT.isVector()) { 3129 const VectorType *DestTy = cast<VectorType>(I.getType()); 3130 MVT EltVT = TLI.getValueType(DestTy->getElementType()); 3131 3132 VT = MVT::getVectorVT(EltVT, DestTy->getNumElements()); 3133 assert(VT != MVT::Other && "Intrinsic uses a non-legal type?"); 3134 } 3135 3136 assert(TLI.isTypeLegal(VT) && "Intrinsic uses a non-legal type?"); 3137 VTs.push_back(VT); 3138 } 3139 if (HasChain) 3140 VTs.push_back(MVT::Other); 3141 3142 const MVT *VTList = DAG.getNodeValueTypes(VTs); 3143 3144 // Create the node. 3145 SDValue Result; 3146 if (!HasChain) 3147 Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, VTList, VTs.size(), 3148 &Ops[0], Ops.size()); 3149 else if (I.getType() != Type::VoidTy) 3150 Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, VTList, VTs.size(), 3151 &Ops[0], Ops.size()); 3152 else 3153 Result = DAG.getNode(ISD::INTRINSIC_VOID, VTList, VTs.size(), 3154 &Ops[0], Ops.size()); 3155 3156 if (HasChain) { 3157 SDValue Chain = Result.getValue(Result.getNode()->getNumValues()-1); 3158 if (OnlyLoad) 3159 PendingLoads.push_back(Chain); 3160 else 3161 DAG.setRoot(Chain); 3162 } 3163 if (I.getType() != Type::VoidTy) { 3164 if (const VectorType *PTy = dyn_cast<VectorType>(I.getType())) { 3165 MVT VT = TLI.getValueType(PTy); 3166 Result = DAG.getNode(ISD::BIT_CONVERT, VT, Result); 3167 } 3168 setValue(&I, Result); 3169 } 3170} 3171 3172/// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V. 3173static GlobalVariable *ExtractTypeInfo (Value *V) { 3174 V = V->stripPointerCasts(); 3175 GlobalVariable *GV = dyn_cast<GlobalVariable>(V); 3176 assert ((GV || isa<ConstantPointerNull>(V)) && 3177 "TypeInfo must be a global variable or NULL"); 3178 return GV; 3179} 3180 3181/// addCatchInfo - Extract the personality and type infos from an eh.selector 3182/// call, and add them to the specified machine basic block. 3183static void addCatchInfo(CallInst &I, MachineModuleInfo *MMI, 3184 MachineBasicBlock *MBB) { 3185 // Inform the MachineModuleInfo of the personality for this landing pad. 3186 ConstantExpr *CE = cast<ConstantExpr>(I.getOperand(2)); 3187 assert(CE->getOpcode() == Instruction::BitCast && 3188 isa<Function>(CE->getOperand(0)) && 3189 "Personality should be a function"); 3190 MMI->addPersonality(MBB, cast<Function>(CE->getOperand(0))); 3191 3192 // Gather all the type infos for this landing pad and pass them along to 3193 // MachineModuleInfo. 3194 std::vector<GlobalVariable *> TyInfo; 3195 unsigned N = I.getNumOperands(); 3196 3197 for (unsigned i = N - 1; i > 2; --i) { 3198 if (ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(i))) { 3199 unsigned FilterLength = CI->getZExtValue(); 3200 unsigned FirstCatch = i + FilterLength + !FilterLength; 3201 assert (FirstCatch <= N && "Invalid filter length"); 3202 3203 if (FirstCatch < N) { 3204 TyInfo.reserve(N - FirstCatch); 3205 for (unsigned j = FirstCatch; j < N; ++j) 3206 TyInfo.push_back(ExtractTypeInfo(I.getOperand(j))); 3207 MMI->addCatchTypeInfo(MBB, TyInfo); 3208 TyInfo.clear(); 3209 } 3210 3211 if (!FilterLength) { 3212 // Cleanup. 3213 MMI->addCleanup(MBB); 3214 } else { 3215 // Filter. 3216 TyInfo.reserve(FilterLength - 1); 3217 for (unsigned j = i + 1; j < FirstCatch; ++j) 3218 TyInfo.push_back(ExtractTypeInfo(I.getOperand(j))); 3219 MMI->addFilterTypeInfo(MBB, TyInfo); 3220 TyInfo.clear(); 3221 } 3222 3223 N = i; 3224 } 3225 } 3226 3227 if (N > 3) { 3228 TyInfo.reserve(N - 3); 3229 for (unsigned j = 3; j < N; ++j) 3230 TyInfo.push_back(ExtractTypeInfo(I.getOperand(j))); 3231 MMI->addCatchTypeInfo(MBB, TyInfo); 3232 } 3233} 3234 3235 3236/// Inlined utility function to implement binary input atomic intrinsics for 3237// visitIntrinsicCall: I is a call instruction 3238// Op is the associated NodeType for I 3239const char * 3240SelectionDAGLowering::implVisitBinaryAtomic(CallInst& I, ISD::NodeType Op) { 3241 SDValue Root = getRoot(); 3242 SDValue L = DAG.getAtomic(Op, Root, 3243 getValue(I.getOperand(1)), 3244 getValue(I.getOperand(2)), 3245 I.getOperand(1)); 3246 setValue(&I, L); 3247 DAG.setRoot(L.getValue(1)); 3248 return 0; 3249} 3250 3251/// visitIntrinsicCall - Lower the call to the specified intrinsic function. If 3252/// we want to emit this as a call to a named external function, return the name 3253/// otherwise lower it and return null. 3254const char * 3255SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) { 3256 switch (Intrinsic) { 3257 default: 3258 // By default, turn this into a target intrinsic node. 3259 visitTargetIntrinsic(I, Intrinsic); 3260 return 0; 3261 case Intrinsic::vastart: visitVAStart(I); return 0; 3262 case Intrinsic::vaend: visitVAEnd(I); return 0; 3263 case Intrinsic::vacopy: visitVACopy(I); return 0; 3264 case Intrinsic::returnaddress: 3265 setValue(&I, DAG.getNode(ISD::RETURNADDR, TLI.getPointerTy(), 3266 getValue(I.getOperand(1)))); 3267 return 0; 3268 case Intrinsic::frameaddress: 3269 setValue(&I, DAG.getNode(ISD::FRAMEADDR, TLI.getPointerTy(), 3270 getValue(I.getOperand(1)))); 3271 return 0; 3272 case Intrinsic::setjmp: 3273 return "_setjmp"+!TLI.usesUnderscoreSetJmp(); 3274 break; 3275 case Intrinsic::longjmp: 3276 return "_longjmp"+!TLI.usesUnderscoreLongJmp(); 3277 break; 3278 case Intrinsic::memcpy_i32: 3279 case Intrinsic::memcpy_i64: { 3280 SDValue Op1 = getValue(I.getOperand(1)); 3281 SDValue Op2 = getValue(I.getOperand(2)); 3282 SDValue Op3 = getValue(I.getOperand(3)); 3283 unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue(); 3284 DAG.setRoot(DAG.getMemcpy(getRoot(), Op1, Op2, Op3, Align, false, 3285 I.getOperand(1), 0, I.getOperand(2), 0)); 3286 return 0; 3287 } 3288 case Intrinsic::memset_i32: 3289 case Intrinsic::memset_i64: { 3290 SDValue Op1 = getValue(I.getOperand(1)); 3291 SDValue Op2 = getValue(I.getOperand(2)); 3292 SDValue Op3 = getValue(I.getOperand(3)); 3293 unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue(); 3294 DAG.setRoot(DAG.getMemset(getRoot(), Op1, Op2, Op3, Align, 3295 I.getOperand(1), 0)); 3296 return 0; 3297 } 3298 case Intrinsic::memmove_i32: 3299 case Intrinsic::memmove_i64: { 3300 SDValue Op1 = getValue(I.getOperand(1)); 3301 SDValue Op2 = getValue(I.getOperand(2)); 3302 SDValue Op3 = getValue(I.getOperand(3)); 3303 unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue(); 3304 3305 // If the source and destination are known to not be aliases, we can 3306 // lower memmove as memcpy. 3307 uint64_t Size = -1ULL; 3308 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op3)) 3309 Size = C->getValue(); 3310 if (AA->alias(I.getOperand(1), Size, I.getOperand(2), Size) == 3311 AliasAnalysis::NoAlias) { 3312 DAG.setRoot(DAG.getMemcpy(getRoot(), Op1, Op2, Op3, Align, false, 3313 I.getOperand(1), 0, I.getOperand(2), 0)); 3314 return 0; 3315 } 3316 3317 DAG.setRoot(DAG.getMemmove(getRoot(), Op1, Op2, Op3, Align, 3318 I.getOperand(1), 0, I.getOperand(2), 0)); 3319 return 0; 3320 } 3321 case Intrinsic::dbg_stoppoint: { 3322 MachineModuleInfo *MMI = DAG.getMachineModuleInfo(); 3323 DbgStopPointInst &SPI = cast<DbgStopPointInst>(I); 3324 if (MMI && SPI.getContext() && MMI->Verify(SPI.getContext())) { 3325 DebugInfoDesc *DD = MMI->getDescFor(SPI.getContext()); 3326 assert(DD && "Not a debug information descriptor"); 3327 DAG.setRoot(DAG.getDbgStopPoint(getRoot(), 3328 SPI.getLine(), 3329 SPI.getColumn(), 3330 cast<CompileUnitDesc>(DD))); 3331 } 3332 3333 return 0; 3334 } 3335 case Intrinsic::dbg_region_start: { 3336 MachineModuleInfo *MMI = DAG.getMachineModuleInfo(); 3337 DbgRegionStartInst &RSI = cast<DbgRegionStartInst>(I); 3338 if (MMI && RSI.getContext() && MMI->Verify(RSI.getContext())) { 3339 unsigned LabelID = MMI->RecordRegionStart(RSI.getContext()); 3340 DAG.setRoot(DAG.getLabel(ISD::DBG_LABEL, getRoot(), LabelID)); 3341 } 3342 3343 return 0; 3344 } 3345 case Intrinsic::dbg_region_end: { 3346 MachineModuleInfo *MMI = DAG.getMachineModuleInfo(); 3347 DbgRegionEndInst &REI = cast<DbgRegionEndInst>(I); 3348 if (MMI && REI.getContext() && MMI->Verify(REI.getContext())) { 3349 unsigned LabelID = MMI->RecordRegionEnd(REI.getContext()); 3350 DAG.setRoot(DAG.getLabel(ISD::DBG_LABEL, getRoot(), LabelID)); 3351 } 3352 3353 return 0; 3354 } 3355 case Intrinsic::dbg_func_start: { 3356 MachineModuleInfo *MMI = DAG.getMachineModuleInfo(); 3357 if (!MMI) return 0; 3358 DbgFuncStartInst &FSI = cast<DbgFuncStartInst>(I); 3359 Value *SP = FSI.getSubprogram(); 3360 if (SP && MMI->Verify(SP)) { 3361 // llvm.dbg.func.start implicitly defines a dbg_stoppoint which is 3362 // what (most?) gdb expects. 3363 DebugInfoDesc *DD = MMI->getDescFor(SP); 3364 assert(DD && "Not a debug information descriptor"); 3365 SubprogramDesc *Subprogram = cast<SubprogramDesc>(DD); 3366 const CompileUnitDesc *CompileUnit = Subprogram->getFile(); 3367 unsigned SrcFile = MMI->RecordSource(CompileUnit); 3368 // Record the source line but does create a label. It will be emitted 3369 // at asm emission time. 3370 MMI->RecordSourceLine(Subprogram->getLine(), 0, SrcFile); 3371 } 3372 3373 return 0; 3374 } 3375 case Intrinsic::dbg_declare: { 3376 MachineModuleInfo *MMI = DAG.getMachineModuleInfo(); 3377 DbgDeclareInst &DI = cast<DbgDeclareInst>(I); 3378 Value *Variable = DI.getVariable(); 3379 if (MMI && Variable && MMI->Verify(Variable)) 3380 DAG.setRoot(DAG.getNode(ISD::DECLARE, MVT::Other, getRoot(), 3381 getValue(DI.getAddress()), getValue(Variable))); 3382 return 0; 3383 } 3384 3385 case Intrinsic::eh_exception: { 3386 if (!CurMBB->isLandingPad()) { 3387 // FIXME: Mark exception register as live in. Hack for PR1508. 3388 unsigned Reg = TLI.getExceptionAddressRegister(); 3389 if (Reg) CurMBB->addLiveIn(Reg); 3390 } 3391 // Insert the EXCEPTIONADDR instruction. 3392 SDVTList VTs = DAG.getVTList(TLI.getPointerTy(), MVT::Other); 3393 SDValue Ops[1]; 3394 Ops[0] = DAG.getRoot(); 3395 SDValue Op = DAG.getNode(ISD::EXCEPTIONADDR, VTs, Ops, 1); 3396 setValue(&I, Op); 3397 DAG.setRoot(Op.getValue(1)); 3398 return 0; 3399 } 3400 3401 case Intrinsic::eh_selector_i32: 3402 case Intrinsic::eh_selector_i64: { 3403 MachineModuleInfo *MMI = DAG.getMachineModuleInfo(); 3404 MVT VT = (Intrinsic == Intrinsic::eh_selector_i32 ? 3405 MVT::i32 : MVT::i64); 3406 3407 if (MMI) { 3408 if (CurMBB->isLandingPad()) 3409 addCatchInfo(I, MMI, CurMBB); 3410 else { 3411#ifndef NDEBUG 3412 FuncInfo.CatchInfoLost.insert(&I); 3413#endif 3414 // FIXME: Mark exception selector register as live in. Hack for PR1508. 3415 unsigned Reg = TLI.getExceptionSelectorRegister(); 3416 if (Reg) CurMBB->addLiveIn(Reg); 3417 } 3418 3419 // Insert the EHSELECTION instruction. 3420 SDVTList VTs = DAG.getVTList(VT, MVT::Other); 3421 SDValue Ops[2]; 3422 Ops[0] = getValue(I.getOperand(1)); 3423 Ops[1] = getRoot(); 3424 SDValue Op = DAG.getNode(ISD::EHSELECTION, VTs, Ops, 2); 3425 setValue(&I, Op); 3426 DAG.setRoot(Op.getValue(1)); 3427 } else { 3428 setValue(&I, DAG.getConstant(0, VT)); 3429 } 3430 3431 return 0; 3432 } 3433 3434 case Intrinsic::eh_typeid_for_i32: 3435 case Intrinsic::eh_typeid_for_i64: { 3436 MachineModuleInfo *MMI = DAG.getMachineModuleInfo(); 3437 MVT VT = (Intrinsic == Intrinsic::eh_typeid_for_i32 ? 3438 MVT::i32 : MVT::i64); 3439 3440 if (MMI) { 3441 // Find the type id for the given typeinfo. 3442 GlobalVariable *GV = ExtractTypeInfo(I.getOperand(1)); 3443 3444 unsigned TypeID = MMI->getTypeIDFor(GV); 3445 setValue(&I, DAG.getConstant(TypeID, VT)); 3446 } else { 3447 // Return something different to eh_selector. 3448 setValue(&I, DAG.getConstant(1, VT)); 3449 } 3450 3451 return 0; 3452 } 3453 3454 case Intrinsic::eh_return: { 3455 MachineModuleInfo *MMI = DAG.getMachineModuleInfo(); 3456 3457 if (MMI) { 3458 MMI->setCallsEHReturn(true); 3459 DAG.setRoot(DAG.getNode(ISD::EH_RETURN, 3460 MVT::Other, 3461 getControlRoot(), 3462 getValue(I.getOperand(1)), 3463 getValue(I.getOperand(2)))); 3464 } else { 3465 setValue(&I, DAG.getConstant(0, TLI.getPointerTy())); 3466 } 3467 3468 return 0; 3469 } 3470 3471 case Intrinsic::eh_unwind_init: { 3472 if (MachineModuleInfo *MMI = DAG.getMachineModuleInfo()) { 3473 MMI->setCallsUnwindInit(true); 3474 } 3475 3476 return 0; 3477 } 3478 3479 case Intrinsic::eh_dwarf_cfa: { 3480 MVT VT = getValue(I.getOperand(1)).getValueType(); 3481 SDValue CfaArg; 3482 if (VT.bitsGT(TLI.getPointerTy())) 3483 CfaArg = DAG.getNode(ISD::TRUNCATE, 3484 TLI.getPointerTy(), getValue(I.getOperand(1))); 3485 else 3486 CfaArg = DAG.getNode(ISD::SIGN_EXTEND, 3487 TLI.getPointerTy(), getValue(I.getOperand(1))); 3488 3489 SDValue Offset = DAG.getNode(ISD::ADD, 3490 TLI.getPointerTy(), 3491 DAG.getNode(ISD::FRAME_TO_ARGS_OFFSET, 3492 TLI.getPointerTy()), 3493 CfaArg); 3494 setValue(&I, DAG.getNode(ISD::ADD, 3495 TLI.getPointerTy(), 3496 DAG.getNode(ISD::FRAMEADDR, 3497 TLI.getPointerTy(), 3498 DAG.getConstant(0, 3499 TLI.getPointerTy())), 3500 Offset)); 3501 return 0; 3502 } 3503 3504 case Intrinsic::sqrt: 3505 setValue(&I, DAG.getNode(ISD::FSQRT, 3506 getValue(I.getOperand(1)).getValueType(), 3507 getValue(I.getOperand(1)))); 3508 return 0; 3509 case Intrinsic::powi: 3510 setValue(&I, DAG.getNode(ISD::FPOWI, 3511 getValue(I.getOperand(1)).getValueType(), 3512 getValue(I.getOperand(1)), 3513 getValue(I.getOperand(2)))); 3514 return 0; 3515 case Intrinsic::sin: 3516 setValue(&I, DAG.getNode(ISD::FSIN, 3517 getValue(I.getOperand(1)).getValueType(), 3518 getValue(I.getOperand(1)))); 3519 return 0; 3520 case Intrinsic::cos: 3521 setValue(&I, DAG.getNode(ISD::FCOS, 3522 getValue(I.getOperand(1)).getValueType(), 3523 getValue(I.getOperand(1)))); 3524 return 0; 3525 case Intrinsic::pow: 3526 setValue(&I, DAG.getNode(ISD::FPOW, 3527 getValue(I.getOperand(1)).getValueType(), 3528 getValue(I.getOperand(1)), 3529 getValue(I.getOperand(2)))); 3530 return 0; 3531 case Intrinsic::pcmarker: { 3532 SDValue Tmp = getValue(I.getOperand(1)); 3533 DAG.setRoot(DAG.getNode(ISD::PCMARKER, MVT::Other, getRoot(), Tmp)); 3534 return 0; 3535 } 3536 case Intrinsic::readcyclecounter: { 3537 SDValue Op = getRoot(); 3538 SDValue Tmp = DAG.getNode(ISD::READCYCLECOUNTER, 3539 DAG.getNodeValueTypes(MVT::i64, MVT::Other), 2, 3540 &Op, 1); 3541 setValue(&I, Tmp); 3542 DAG.setRoot(Tmp.getValue(1)); 3543 return 0; 3544 } 3545 case Intrinsic::part_select: { 3546 // Currently not implemented: just abort 3547 assert(0 && "part_select intrinsic not implemented"); 3548 abort(); 3549 } 3550 case Intrinsic::part_set: { 3551 // Currently not implemented: just abort 3552 assert(0 && "part_set intrinsic not implemented"); 3553 abort(); 3554 } 3555 case Intrinsic::bswap: 3556 setValue(&I, DAG.getNode(ISD::BSWAP, 3557 getValue(I.getOperand(1)).getValueType(), 3558 getValue(I.getOperand(1)))); 3559 return 0; 3560 case Intrinsic::cttz: { 3561 SDValue Arg = getValue(I.getOperand(1)); 3562 MVT Ty = Arg.getValueType(); 3563 SDValue result = DAG.getNode(ISD::CTTZ, Ty, Arg); 3564 setValue(&I, result); 3565 return 0; 3566 } 3567 case Intrinsic::ctlz: { 3568 SDValue Arg = getValue(I.getOperand(1)); 3569 MVT Ty = Arg.getValueType(); 3570 SDValue result = DAG.getNode(ISD::CTLZ, Ty, Arg); 3571 setValue(&I, result); 3572 return 0; 3573 } 3574 case Intrinsic::ctpop: { 3575 SDValue Arg = getValue(I.getOperand(1)); 3576 MVT Ty = Arg.getValueType(); 3577 SDValue result = DAG.getNode(ISD::CTPOP, Ty, Arg); 3578 setValue(&I, result); 3579 return 0; 3580 } 3581 case Intrinsic::stacksave: { 3582 SDValue Op = getRoot(); 3583 SDValue Tmp = DAG.getNode(ISD::STACKSAVE, 3584 DAG.getNodeValueTypes(TLI.getPointerTy(), MVT::Other), 2, &Op, 1); 3585 setValue(&I, Tmp); 3586 DAG.setRoot(Tmp.getValue(1)); 3587 return 0; 3588 } 3589 case Intrinsic::stackrestore: { 3590 SDValue Tmp = getValue(I.getOperand(1)); 3591 DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, MVT::Other, getRoot(), Tmp)); 3592 return 0; 3593 } 3594 case Intrinsic::var_annotation: 3595 // Discard annotate attributes 3596 return 0; 3597 3598 case Intrinsic::init_trampoline: { 3599 const Function *F = cast<Function>(I.getOperand(2)->stripPointerCasts()); 3600 3601 SDValue Ops[6]; 3602 Ops[0] = getRoot(); 3603 Ops[1] = getValue(I.getOperand(1)); 3604 Ops[2] = getValue(I.getOperand(2)); 3605 Ops[3] = getValue(I.getOperand(3)); 3606 Ops[4] = DAG.getSrcValue(I.getOperand(1)); 3607 Ops[5] = DAG.getSrcValue(F); 3608 3609 SDValue Tmp = DAG.getNode(ISD::TRAMPOLINE, 3610 DAG.getNodeValueTypes(TLI.getPointerTy(), 3611 MVT::Other), 2, 3612 Ops, 6); 3613 3614 setValue(&I, Tmp); 3615 DAG.setRoot(Tmp.getValue(1)); 3616 return 0; 3617 } 3618 3619 case Intrinsic::gcroot: 3620 if (GFI) { 3621 Value *Alloca = I.getOperand(1); 3622 Constant *TypeMap = cast<Constant>(I.getOperand(2)); 3623 3624 FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode()); 3625 GFI->addStackRoot(FI->getIndex(), TypeMap); 3626 } 3627 return 0; 3628 3629 case Intrinsic::gcread: 3630 case Intrinsic::gcwrite: 3631 assert(0 && "GC failed to lower gcread/gcwrite intrinsics!"); 3632 return 0; 3633 3634 case Intrinsic::flt_rounds: { 3635 setValue(&I, DAG.getNode(ISD::FLT_ROUNDS_, MVT::i32)); 3636 return 0; 3637 } 3638 3639 case Intrinsic::trap: { 3640 DAG.setRoot(DAG.getNode(ISD::TRAP, MVT::Other, getRoot())); 3641 return 0; 3642 } 3643 case Intrinsic::prefetch: { 3644 SDValue Ops[4]; 3645 Ops[0] = getRoot(); 3646 Ops[1] = getValue(I.getOperand(1)); 3647 Ops[2] = getValue(I.getOperand(2)); 3648 Ops[3] = getValue(I.getOperand(3)); 3649 DAG.setRoot(DAG.getNode(ISD::PREFETCH, MVT::Other, &Ops[0], 4)); 3650 return 0; 3651 } 3652 3653 case Intrinsic::memory_barrier: { 3654 SDValue Ops[6]; 3655 Ops[0] = getRoot(); 3656 for (int x = 1; x < 6; ++x) 3657 Ops[x] = getValue(I.getOperand(x)); 3658 3659 DAG.setRoot(DAG.getNode(ISD::MEMBARRIER, MVT::Other, &Ops[0], 6)); 3660 return 0; 3661 } 3662 case Intrinsic::atomic_cmp_swap: { 3663 SDValue Root = getRoot(); 3664 SDValue L; 3665 switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) { 3666 case MVT::i8: 3667 L = DAG.getAtomic(ISD::ATOMIC_CMP_SWAP_8, Root, 3668 getValue(I.getOperand(1)), 3669 getValue(I.getOperand(2)), 3670 getValue(I.getOperand(3)), 3671 I.getOperand(1)); 3672 break; 3673 case MVT::i16: 3674 L = DAG.getAtomic(ISD::ATOMIC_CMP_SWAP_16, Root, 3675 getValue(I.getOperand(1)), 3676 getValue(I.getOperand(2)), 3677 getValue(I.getOperand(3)), 3678 I.getOperand(1)); 3679 break; 3680 case MVT::i32: 3681 L = DAG.getAtomic(ISD::ATOMIC_CMP_SWAP_32, Root, 3682 getValue(I.getOperand(1)), 3683 getValue(I.getOperand(2)), 3684 getValue(I.getOperand(3)), 3685 I.getOperand(1)); 3686 break; 3687 case MVT::i64: 3688 L = DAG.getAtomic(ISD::ATOMIC_CMP_SWAP_64, Root, 3689 getValue(I.getOperand(1)), 3690 getValue(I.getOperand(2)), 3691 getValue(I.getOperand(3)), 3692 I.getOperand(1)); 3693 break; 3694 default: 3695 assert(0 && "Invalid atomic type"); 3696 abort(); 3697 } 3698 setValue(&I, L); 3699 DAG.setRoot(L.getValue(1)); 3700 return 0; 3701 } 3702 case Intrinsic::atomic_load_add: 3703 switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) { 3704 case MVT::i8: 3705 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_ADD_8); 3706 case MVT::i16: 3707 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_ADD_16); 3708 case MVT::i32: 3709 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_ADD_32); 3710 case MVT::i64: 3711 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_ADD_64); 3712 default: 3713 assert(0 && "Invalid atomic type"); 3714 abort(); 3715 } 3716 case Intrinsic::atomic_load_sub: 3717 switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) { 3718 case MVT::i8: 3719 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_SUB_8); 3720 case MVT::i16: 3721 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_SUB_16); 3722 case MVT::i32: 3723 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_SUB_32); 3724 case MVT::i64: 3725 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_SUB_64); 3726 default: 3727 assert(0 && "Invalid atomic type"); 3728 abort(); 3729 } 3730 case Intrinsic::atomic_load_or: 3731 switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) { 3732 case MVT::i8: 3733 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_OR_8); 3734 case MVT::i16: 3735 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_OR_16); 3736 case MVT::i32: 3737 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_OR_32); 3738 case MVT::i64: 3739 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_OR_64); 3740 default: 3741 assert(0 && "Invalid atomic type"); 3742 abort(); 3743 } 3744 case Intrinsic::atomic_load_xor: 3745 switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) { 3746 case MVT::i8: 3747 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_XOR_8); 3748 case MVT::i16: 3749 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_XOR_16); 3750 case MVT::i32: 3751 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_XOR_32); 3752 case MVT::i64: 3753 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_XOR_64); 3754 default: 3755 assert(0 && "Invalid atomic type"); 3756 abort(); 3757 } 3758 case Intrinsic::atomic_load_and: 3759 switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) { 3760 case MVT::i8: 3761 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_AND_8); 3762 case MVT::i16: 3763 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_AND_16); 3764 case MVT::i32: 3765 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_AND_32); 3766 case MVT::i64: 3767 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_AND_64); 3768 default: 3769 assert(0 && "Invalid atomic type"); 3770 abort(); 3771 } 3772 case Intrinsic::atomic_load_nand: 3773 switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) { 3774 case MVT::i8: 3775 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_NAND_8); 3776 case MVT::i16: 3777 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_NAND_16); 3778 case MVT::i32: 3779 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_NAND_32); 3780 case MVT::i64: 3781 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_NAND_64); 3782 default: 3783 assert(0 && "Invalid atomic type"); 3784 abort(); 3785 } 3786 case Intrinsic::atomic_load_max: 3787 switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) { 3788 case MVT::i8: 3789 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MAX_8); 3790 case MVT::i16: 3791 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MAX_16); 3792 case MVT::i32: 3793 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MAX_32); 3794 case MVT::i64: 3795 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MAX_64); 3796 default: 3797 assert(0 && "Invalid atomic type"); 3798 abort(); 3799 } 3800 case Intrinsic::atomic_load_min: 3801 switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) { 3802 case MVT::i8: 3803 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MIN_8); 3804 case MVT::i16: 3805 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MIN_16); 3806 case MVT::i32: 3807 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MIN_32); 3808 case MVT::i64: 3809 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MIN_64); 3810 default: 3811 assert(0 && "Invalid atomic type"); 3812 abort(); 3813 } 3814 case Intrinsic::atomic_load_umin: 3815 switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) { 3816 case MVT::i8: 3817 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMIN_8); 3818 case MVT::i16: 3819 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMIN_16); 3820 case MVT::i32: 3821 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMIN_32); 3822 case MVT::i64: 3823 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMIN_64); 3824 default: 3825 assert(0 && "Invalid atomic type"); 3826 abort(); 3827 } 3828 case Intrinsic::atomic_load_umax: 3829 switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) { 3830 case MVT::i8: 3831 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMAX_8); 3832 case MVT::i16: 3833 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMAX_16); 3834 case MVT::i32: 3835 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMAX_32); 3836 case MVT::i64: 3837 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMAX_64); 3838 default: 3839 assert(0 && "Invalid atomic type"); 3840 abort(); 3841 } 3842 case Intrinsic::atomic_swap: 3843 switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) { 3844 case MVT::i8: 3845 return implVisitBinaryAtomic(I, ISD::ATOMIC_SWAP_8); 3846 case MVT::i16: 3847 return implVisitBinaryAtomic(I, ISD::ATOMIC_SWAP_16); 3848 case MVT::i32: 3849 return implVisitBinaryAtomic(I, ISD::ATOMIC_SWAP_32); 3850 case MVT::i64: 3851 return implVisitBinaryAtomic(I, ISD::ATOMIC_SWAP_64); 3852 default: 3853 assert(0 && "Invalid atomic type"); 3854 abort(); 3855 } 3856 } 3857} 3858 3859 3860void SelectionDAGLowering::LowerCallTo(CallSite CS, SDValue Callee, 3861 bool IsTailCall, 3862 MachineBasicBlock *LandingPad) { 3863 const PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType()); 3864 const FunctionType *FTy = cast<FunctionType>(PT->getElementType()); 3865 MachineModuleInfo *MMI = DAG.getMachineModuleInfo(); 3866 unsigned BeginLabel = 0, EndLabel = 0; 3867 3868 TargetLowering::ArgListTy Args; 3869 TargetLowering::ArgListEntry Entry; 3870 Args.reserve(CS.arg_size()); 3871 for (CallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end(); 3872 i != e; ++i) { 3873 SDValue ArgNode = getValue(*i); 3874 Entry.Node = ArgNode; Entry.Ty = (*i)->getType(); 3875 3876 unsigned attrInd = i - CS.arg_begin() + 1; 3877 Entry.isSExt = CS.paramHasAttr(attrInd, ParamAttr::SExt); 3878 Entry.isZExt = CS.paramHasAttr(attrInd, ParamAttr::ZExt); 3879 Entry.isInReg = CS.paramHasAttr(attrInd, ParamAttr::InReg); 3880 Entry.isSRet = CS.paramHasAttr(attrInd, ParamAttr::StructRet); 3881 Entry.isNest = CS.paramHasAttr(attrInd, ParamAttr::Nest); 3882 Entry.isByVal = CS.paramHasAttr(attrInd, ParamAttr::ByVal); 3883 Entry.Alignment = CS.getParamAlignment(attrInd); 3884 Args.push_back(Entry); 3885 } 3886 3887 if (LandingPad && MMI) { 3888 // Insert a label before the invoke call to mark the try range. This can be 3889 // used to detect deletion of the invoke via the MachineModuleInfo. 3890 BeginLabel = MMI->NextLabelID(); 3891 // Both PendingLoads and PendingExports must be flushed here; 3892 // this call might not return. 3893 (void)getRoot(); 3894 DAG.setRoot(DAG.getLabel(ISD::EH_LABEL, getControlRoot(), BeginLabel)); 3895 } 3896 3897 std::pair<SDValue,SDValue> Result = 3898 TLI.LowerCallTo(getRoot(), CS.getType(), 3899 CS.paramHasAttr(0, ParamAttr::SExt), 3900 CS.paramHasAttr(0, ParamAttr::ZExt), 3901 FTy->isVarArg(), CS.getCallingConv(), IsTailCall, 3902 Callee, Args, DAG); 3903 if (CS.getType() != Type::VoidTy) 3904 setValue(CS.getInstruction(), Result.first); 3905 DAG.setRoot(Result.second); 3906 3907 if (LandingPad && MMI) { 3908 // Insert a label at the end of the invoke call to mark the try range. This 3909 // can be used to detect deletion of the invoke via the MachineModuleInfo. 3910 EndLabel = MMI->NextLabelID(); 3911 DAG.setRoot(DAG.getLabel(ISD::EH_LABEL, getRoot(), EndLabel)); 3912 3913 // Inform MachineModuleInfo of range. 3914 MMI->addInvoke(LandingPad, BeginLabel, EndLabel); 3915 } 3916} 3917 3918 3919void SelectionDAGLowering::visitCall(CallInst &I) { 3920 const char *RenameFn = 0; 3921 if (Function *F = I.getCalledFunction()) { 3922 if (F->isDeclaration()) { 3923 if (unsigned IID = F->getIntrinsicID()) { 3924 RenameFn = visitIntrinsicCall(I, IID); 3925 if (!RenameFn) 3926 return; 3927 } 3928 } 3929 3930 // Check for well-known libc/libm calls. If the function is internal, it 3931 // can't be a library call. 3932 unsigned NameLen = F->getNameLen(); 3933 if (!F->hasInternalLinkage() && NameLen) { 3934 const char *NameStr = F->getNameStart(); 3935 if (NameStr[0] == 'c' && 3936 ((NameLen == 8 && !strcmp(NameStr, "copysign")) || 3937 (NameLen == 9 && !strcmp(NameStr, "copysignf")))) { 3938 if (I.getNumOperands() == 3 && // Basic sanity checks. 3939 I.getOperand(1)->getType()->isFloatingPoint() && 3940 I.getType() == I.getOperand(1)->getType() && 3941 I.getType() == I.getOperand(2)->getType()) { 3942 SDValue LHS = getValue(I.getOperand(1)); 3943 SDValue RHS = getValue(I.getOperand(2)); 3944 setValue(&I, DAG.getNode(ISD::FCOPYSIGN, LHS.getValueType(), 3945 LHS, RHS)); 3946 return; 3947 } 3948 } else if (NameStr[0] == 'f' && 3949 ((NameLen == 4 && !strcmp(NameStr, "fabs")) || 3950 (NameLen == 5 && !strcmp(NameStr, "fabsf")) || 3951 (NameLen == 5 && !strcmp(NameStr, "fabsl")))) { 3952 if (I.getNumOperands() == 2 && // Basic sanity checks. 3953 I.getOperand(1)->getType()->isFloatingPoint() && 3954 I.getType() == I.getOperand(1)->getType()) { 3955 SDValue Tmp = getValue(I.getOperand(1)); 3956 setValue(&I, DAG.getNode(ISD::FABS, Tmp.getValueType(), Tmp)); 3957 return; 3958 } 3959 } else if (NameStr[0] == 's' && 3960 ((NameLen == 3 && !strcmp(NameStr, "sin")) || 3961 (NameLen == 4 && !strcmp(NameStr, "sinf")) || 3962 (NameLen == 4 && !strcmp(NameStr, "sinl")))) { 3963 if (I.getNumOperands() == 2 && // Basic sanity checks. 3964 I.getOperand(1)->getType()->isFloatingPoint() && 3965 I.getType() == I.getOperand(1)->getType()) { 3966 SDValue Tmp = getValue(I.getOperand(1)); 3967 setValue(&I, DAG.getNode(ISD::FSIN, Tmp.getValueType(), Tmp)); 3968 return; 3969 } 3970 } else if (NameStr[0] == 'c' && 3971 ((NameLen == 3 && !strcmp(NameStr, "cos")) || 3972 (NameLen == 4 && !strcmp(NameStr, "cosf")) || 3973 (NameLen == 4 && !strcmp(NameStr, "cosl")))) { 3974 if (I.getNumOperands() == 2 && // Basic sanity checks. 3975 I.getOperand(1)->getType()->isFloatingPoint() && 3976 I.getType() == I.getOperand(1)->getType()) { 3977 SDValue Tmp = getValue(I.getOperand(1)); 3978 setValue(&I, DAG.getNode(ISD::FCOS, Tmp.getValueType(), Tmp)); 3979 return; 3980 } 3981 } 3982 } 3983 } else if (isa<InlineAsm>(I.getOperand(0))) { 3984 visitInlineAsm(&I); 3985 return; 3986 } 3987 3988 SDValue Callee; 3989 if (!RenameFn) 3990 Callee = getValue(I.getOperand(0)); 3991 else 3992 Callee = DAG.getExternalSymbol(RenameFn, TLI.getPointerTy()); 3993 3994 LowerCallTo(&I, Callee, I.isTailCall()); 3995} 3996 3997 3998/// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from 3999/// this value and returns the result as a ValueVT value. This uses 4000/// Chain/Flag as the input and updates them for the output Chain/Flag. 4001/// If the Flag pointer is NULL, no flag is used. 4002SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG, 4003 SDValue &Chain, 4004 SDValue *Flag) const { 4005 // Assemble the legal parts into the final values. 4006 SmallVector<SDValue, 4> Values(ValueVTs.size()); 4007 SmallVector<SDValue, 8> Parts; 4008 for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) { 4009 // Copy the legal parts from the registers. 4010 MVT ValueVT = ValueVTs[Value]; 4011 unsigned NumRegs = TLI->getNumRegisters(ValueVT); 4012 MVT RegisterVT = RegVTs[Value]; 4013 4014 Parts.resize(NumRegs); 4015 for (unsigned i = 0; i != NumRegs; ++i) { 4016 SDValue P; 4017 if (Flag == 0) 4018 P = DAG.getCopyFromReg(Chain, Regs[Part+i], RegisterVT); 4019 else { 4020 P = DAG.getCopyFromReg(Chain, Regs[Part+i], RegisterVT, *Flag); 4021 *Flag = P.getValue(2); 4022 } 4023 Chain = P.getValue(1); 4024 4025 // If the source register was virtual and if we know something about it, 4026 // add an assert node. 4027 if (TargetRegisterInfo::isVirtualRegister(Regs[Part+i]) && 4028 RegisterVT.isInteger() && !RegisterVT.isVector()) { 4029 unsigned SlotNo = Regs[Part+i]-TargetRegisterInfo::FirstVirtualRegister; 4030 FunctionLoweringInfo &FLI = DAG.getFunctionLoweringInfo(); 4031 if (FLI.LiveOutRegInfo.size() > SlotNo) { 4032 FunctionLoweringInfo::LiveOutInfo &LOI = FLI.LiveOutRegInfo[SlotNo]; 4033 4034 unsigned RegSize = RegisterVT.getSizeInBits(); 4035 unsigned NumSignBits = LOI.NumSignBits; 4036 unsigned NumZeroBits = LOI.KnownZero.countLeadingOnes(); 4037 4038 // FIXME: We capture more information than the dag can represent. For 4039 // now, just use the tightest assertzext/assertsext possible. 4040 bool isSExt = true; 4041 MVT FromVT(MVT::Other); 4042 if (NumSignBits == RegSize) 4043 isSExt = true, FromVT = MVT::i1; // ASSERT SEXT 1 4044 else if (NumZeroBits >= RegSize-1) 4045 isSExt = false, FromVT = MVT::i1; // ASSERT ZEXT 1 4046 else if (NumSignBits > RegSize-8) 4047 isSExt = true, FromVT = MVT::i8; // ASSERT SEXT 8 4048 else if (NumZeroBits >= RegSize-9) 4049 isSExt = false, FromVT = MVT::i8; // ASSERT ZEXT 8 4050 else if (NumSignBits > RegSize-16) 4051 isSExt = true, FromVT = MVT::i16; // ASSERT SEXT 16 4052 else if (NumZeroBits >= RegSize-17) 4053 isSExt = false, FromVT = MVT::i16; // ASSERT ZEXT 16 4054 else if (NumSignBits > RegSize-32) 4055 isSExt = true, FromVT = MVT::i32; // ASSERT SEXT 32 4056 else if (NumZeroBits >= RegSize-33) 4057 isSExt = false, FromVT = MVT::i32; // ASSERT ZEXT 32 4058 4059 if (FromVT != MVT::Other) { 4060 P = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, 4061 RegisterVT, P, DAG.getValueType(FromVT)); 4062 4063 } 4064 } 4065 } 4066 4067 Parts[i] = P; 4068 } 4069 4070 Values[Value] = getCopyFromParts(DAG, Parts.begin(), NumRegs, RegisterVT, 4071 ValueVT); 4072 Part += NumRegs; 4073 Parts.clear(); 4074 } 4075 4076 return DAG.getMergeValues(DAG.getVTList(&ValueVTs[0], ValueVTs.size()), 4077 &Values[0], ValueVTs.size()); 4078} 4079 4080/// getCopyToRegs - Emit a series of CopyToReg nodes that copies the 4081/// specified value into the registers specified by this object. This uses 4082/// Chain/Flag as the input and updates them for the output Chain/Flag. 4083/// If the Flag pointer is NULL, no flag is used. 4084void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG, 4085 SDValue &Chain, SDValue *Flag) const { 4086 // Get the list of the values's legal parts. 4087 unsigned NumRegs = Regs.size(); 4088 SmallVector<SDValue, 8> Parts(NumRegs); 4089 for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) { 4090 MVT ValueVT = ValueVTs[Value]; 4091 unsigned NumParts = TLI->getNumRegisters(ValueVT); 4092 MVT RegisterVT = RegVTs[Value]; 4093 4094 getCopyToParts(DAG, Val.getValue(Val.getResNo() + Value), 4095 &Parts[Part], NumParts, RegisterVT); 4096 Part += NumParts; 4097 } 4098 4099 // Copy the parts into the registers. 4100 SmallVector<SDValue, 8> Chains(NumRegs); 4101 for (unsigned i = 0; i != NumRegs; ++i) { 4102 SDValue Part; 4103 if (Flag == 0) 4104 Part = DAG.getCopyToReg(Chain, Regs[i], Parts[i]); 4105 else { 4106 Part = DAG.getCopyToReg(Chain, Regs[i], Parts[i], *Flag); 4107 *Flag = Part.getValue(1); 4108 } 4109 Chains[i] = Part.getValue(0); 4110 } 4111 4112 if (NumRegs == 1 || Flag) 4113 // If NumRegs > 1 && Flag is used then the use of the last CopyToReg is 4114 // flagged to it. That is the CopyToReg nodes and the user are considered 4115 // a single scheduling unit. If we create a TokenFactor and return it as 4116 // chain, then the TokenFactor is both a predecessor (operand) of the 4117 // user as well as a successor (the TF operands are flagged to the user). 4118 // c1, f1 = CopyToReg 4119 // c2, f2 = CopyToReg 4120 // c3 = TokenFactor c1, c2 4121 // ... 4122 // = op c3, ..., f2 4123 Chain = Chains[NumRegs-1]; 4124 else 4125 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, &Chains[0], NumRegs); 4126} 4127 4128/// AddInlineAsmOperands - Add this value to the specified inlineasm node 4129/// operand list. This adds the code marker and includes the number of 4130/// values added into it. 4131void RegsForValue::AddInlineAsmOperands(unsigned Code, SelectionDAG &DAG, 4132 std::vector<SDValue> &Ops) const { 4133 MVT IntPtrTy = DAG.getTargetLoweringInfo().getPointerTy(); 4134 Ops.push_back(DAG.getTargetConstant(Code | (Regs.size() << 3), IntPtrTy)); 4135 for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) { 4136 unsigned NumRegs = TLI->getNumRegisters(ValueVTs[Value]); 4137 MVT RegisterVT = RegVTs[Value]; 4138 for (unsigned i = 0; i != NumRegs; ++i) 4139 Ops.push_back(DAG.getRegister(Regs[Reg++], RegisterVT)); 4140 } 4141} 4142 4143/// isAllocatableRegister - If the specified register is safe to allocate, 4144/// i.e. it isn't a stack pointer or some other special register, return the 4145/// register class for the register. Otherwise, return null. 4146static const TargetRegisterClass * 4147isAllocatableRegister(unsigned Reg, MachineFunction &MF, 4148 const TargetLowering &TLI, 4149 const TargetRegisterInfo *TRI) { 4150 MVT FoundVT = MVT::Other; 4151 const TargetRegisterClass *FoundRC = 0; 4152 for (TargetRegisterInfo::regclass_iterator RCI = TRI->regclass_begin(), 4153 E = TRI->regclass_end(); RCI != E; ++RCI) { 4154 MVT ThisVT = MVT::Other; 4155 4156 const TargetRegisterClass *RC = *RCI; 4157 // If none of the the value types for this register class are valid, we 4158 // can't use it. For example, 64-bit reg classes on 32-bit targets. 4159 for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end(); 4160 I != E; ++I) { 4161 if (TLI.isTypeLegal(*I)) { 4162 // If we have already found this register in a different register class, 4163 // choose the one with the largest VT specified. For example, on 4164 // PowerPC, we favor f64 register classes over f32. 4165 if (FoundVT == MVT::Other || FoundVT.bitsLT(*I)) { 4166 ThisVT = *I; 4167 break; 4168 } 4169 } 4170 } 4171 4172 if (ThisVT == MVT::Other) continue; 4173 4174 // NOTE: This isn't ideal. In particular, this might allocate the 4175 // frame pointer in functions that need it (due to them not being taken 4176 // out of allocation, because a variable sized allocation hasn't been seen 4177 // yet). This is a slight code pessimization, but should still work. 4178 for (TargetRegisterClass::iterator I = RC->allocation_order_begin(MF), 4179 E = RC->allocation_order_end(MF); I != E; ++I) 4180 if (*I == Reg) { 4181 // We found a matching register class. Keep looking at others in case 4182 // we find one with larger registers that this physreg is also in. 4183 FoundRC = RC; 4184 FoundVT = ThisVT; 4185 break; 4186 } 4187 } 4188 return FoundRC; 4189} 4190 4191 4192namespace { 4193/// AsmOperandInfo - This contains information for each constraint that we are 4194/// lowering. 4195struct SDISelAsmOperandInfo : public TargetLowering::AsmOperandInfo { 4196 /// CallOperand - If this is the result output operand or a clobber 4197 /// this is null, otherwise it is the incoming operand to the CallInst. 4198 /// This gets modified as the asm is processed. 4199 SDValue CallOperand; 4200 4201 /// AssignedRegs - If this is a register or register class operand, this 4202 /// contains the set of register corresponding to the operand. 4203 RegsForValue AssignedRegs; 4204 4205 explicit SDISelAsmOperandInfo(const InlineAsm::ConstraintInfo &info) 4206 : TargetLowering::AsmOperandInfo(info), CallOperand(0,0) { 4207 } 4208 4209 /// MarkAllocatedRegs - Once AssignedRegs is set, mark the assigned registers 4210 /// busy in OutputRegs/InputRegs. 4211 void MarkAllocatedRegs(bool isOutReg, bool isInReg, 4212 std::set<unsigned> &OutputRegs, 4213 std::set<unsigned> &InputRegs, 4214 const TargetRegisterInfo &TRI) const { 4215 if (isOutReg) { 4216 for (unsigned i = 0, e = AssignedRegs.Regs.size(); i != e; ++i) 4217 MarkRegAndAliases(AssignedRegs.Regs[i], OutputRegs, TRI); 4218 } 4219 if (isInReg) { 4220 for (unsigned i = 0, e = AssignedRegs.Regs.size(); i != e; ++i) 4221 MarkRegAndAliases(AssignedRegs.Regs[i], InputRegs, TRI); 4222 } 4223 } 4224 4225private: 4226 /// MarkRegAndAliases - Mark the specified register and all aliases in the 4227 /// specified set. 4228 static void MarkRegAndAliases(unsigned Reg, std::set<unsigned> &Regs, 4229 const TargetRegisterInfo &TRI) { 4230 assert(TargetRegisterInfo::isPhysicalRegister(Reg) && "Isn't a physreg"); 4231 Regs.insert(Reg); 4232 if (const unsigned *Aliases = TRI.getAliasSet(Reg)) 4233 for (; *Aliases; ++Aliases) 4234 Regs.insert(*Aliases); 4235 } 4236}; 4237} // end anon namespace. 4238 4239 4240/// GetRegistersForValue - Assign registers (virtual or physical) for the 4241/// specified operand. We prefer to assign virtual registers, to allow the 4242/// register allocator handle the assignment process. However, if the asm uses 4243/// features that we can't model on machineinstrs, we have SDISel do the 4244/// allocation. This produces generally horrible, but correct, code. 4245/// 4246/// OpInfo describes the operand. 4247/// HasEarlyClobber is true if there are any early clobber constraints (=&r) 4248/// or any explicitly clobbered registers. 4249/// Input and OutputRegs are the set of already allocated physical registers. 4250/// 4251void SelectionDAGLowering:: 4252GetRegistersForValue(SDISelAsmOperandInfo &OpInfo, bool HasEarlyClobber, 4253 std::set<unsigned> &OutputRegs, 4254 std::set<unsigned> &InputRegs) { 4255 // Compute whether this value requires an input register, an output register, 4256 // or both. 4257 bool isOutReg = false; 4258 bool isInReg = false; 4259 switch (OpInfo.Type) { 4260 case InlineAsm::isOutput: 4261 isOutReg = true; 4262 4263 // If this is an early-clobber output, or if there is an input 4264 // constraint that matches this, we need to reserve the input register 4265 // so no other inputs allocate to it. 4266 isInReg = OpInfo.isEarlyClobber || OpInfo.hasMatchingInput; 4267 break; 4268 case InlineAsm::isInput: 4269 isInReg = true; 4270 isOutReg = false; 4271 break; 4272 case InlineAsm::isClobber: 4273 isOutReg = true; 4274 isInReg = true; 4275 break; 4276 } 4277 4278 4279 MachineFunction &MF = DAG.getMachineFunction(); 4280 SmallVector<unsigned, 4> Regs; 4281 4282 // If this is a constraint for a single physreg, or a constraint for a 4283 // register class, find it. 4284 std::pair<unsigned, const TargetRegisterClass*> PhysReg = 4285 TLI.getRegForInlineAsmConstraint(OpInfo.ConstraintCode, 4286 OpInfo.ConstraintVT); 4287 4288 unsigned NumRegs = 1; 4289 if (OpInfo.ConstraintVT != MVT::Other) 4290 NumRegs = TLI.getNumRegisters(OpInfo.ConstraintVT); 4291 MVT RegVT; 4292 MVT ValueVT = OpInfo.ConstraintVT; 4293 4294 4295 // If this is a constraint for a specific physical register, like {r17}, 4296 // assign it now. 4297 if (PhysReg.first) { 4298 if (OpInfo.ConstraintVT == MVT::Other) 4299 ValueVT = *PhysReg.second->vt_begin(); 4300 4301 // Get the actual register value type. This is important, because the user 4302 // may have asked for (e.g.) the AX register in i32 type. We need to 4303 // remember that AX is actually i16 to get the right extension. 4304 RegVT = *PhysReg.second->vt_begin(); 4305 4306 // This is a explicit reference to a physical register. 4307 Regs.push_back(PhysReg.first); 4308 4309 // If this is an expanded reference, add the rest of the regs to Regs. 4310 if (NumRegs != 1) { 4311 TargetRegisterClass::iterator I = PhysReg.second->begin(); 4312 for (; *I != PhysReg.first; ++I) 4313 assert(I != PhysReg.second->end() && "Didn't find reg!"); 4314 4315 // Already added the first reg. 4316 --NumRegs; ++I; 4317 for (; NumRegs; --NumRegs, ++I) { 4318 assert(I != PhysReg.second->end() && "Ran out of registers to allocate!"); 4319 Regs.push_back(*I); 4320 } 4321 } 4322 OpInfo.AssignedRegs = RegsForValue(TLI, Regs, RegVT, ValueVT); 4323 const TargetRegisterInfo *TRI = DAG.getTarget().getRegisterInfo(); 4324 OpInfo.MarkAllocatedRegs(isOutReg, isInReg, OutputRegs, InputRegs, *TRI); 4325 return; 4326 } 4327 4328 // Otherwise, if this was a reference to an LLVM register class, create vregs 4329 // for this reference. 4330 std::vector<unsigned> RegClassRegs; 4331 const TargetRegisterClass *RC = PhysReg.second; 4332 if (RC) { 4333 // If this is an early clobber or tied register, our regalloc doesn't know 4334 // how to maintain the constraint. If it isn't, go ahead and create vreg 4335 // and let the regalloc do the right thing. 4336 if (!OpInfo.hasMatchingInput && !OpInfo.isEarlyClobber && 4337 // If there is some other early clobber and this is an input register, 4338 // then we are forced to pre-allocate the input reg so it doesn't 4339 // conflict with the earlyclobber. 4340 !(OpInfo.Type == InlineAsm::isInput && HasEarlyClobber)) { 4341 RegVT = *PhysReg.second->vt_begin(); 4342 4343 if (OpInfo.ConstraintVT == MVT::Other) 4344 ValueVT = RegVT; 4345 4346 // Create the appropriate number of virtual registers. 4347 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 4348 for (; NumRegs; --NumRegs) 4349 Regs.push_back(RegInfo.createVirtualRegister(PhysReg.second)); 4350 4351 OpInfo.AssignedRegs = RegsForValue(TLI, Regs, RegVT, ValueVT); 4352 return; 4353 } 4354 4355 // Otherwise, we can't allocate it. Let the code below figure out how to 4356 // maintain these constraints. 4357 RegClassRegs.assign(PhysReg.second->begin(), PhysReg.second->end()); 4358 4359 } else { 4360 // This is a reference to a register class that doesn't directly correspond 4361 // to an LLVM register class. Allocate NumRegs consecutive, available, 4362 // registers from the class. 4363 RegClassRegs = TLI.getRegClassForInlineAsmConstraint(OpInfo.ConstraintCode, 4364 OpInfo.ConstraintVT); 4365 } 4366 4367 const TargetRegisterInfo *TRI = DAG.getTarget().getRegisterInfo(); 4368 unsigned NumAllocated = 0; 4369 for (unsigned i = 0, e = RegClassRegs.size(); i != e; ++i) { 4370 unsigned Reg = RegClassRegs[i]; 4371 // See if this register is available. 4372 if ((isOutReg && OutputRegs.count(Reg)) || // Already used. 4373 (isInReg && InputRegs.count(Reg))) { // Already used. 4374 // Make sure we find consecutive registers. 4375 NumAllocated = 0; 4376 continue; 4377 } 4378 4379 // Check to see if this register is allocatable (i.e. don't give out the 4380 // stack pointer). 4381 if (RC == 0) { 4382 RC = isAllocatableRegister(Reg, MF, TLI, TRI); 4383 if (!RC) { // Couldn't allocate this register. 4384 // Reset NumAllocated to make sure we return consecutive registers. 4385 NumAllocated = 0; 4386 continue; 4387 } 4388 } 4389 4390 // Okay, this register is good, we can use it. 4391 ++NumAllocated; 4392 4393 // If we allocated enough consecutive registers, succeed. 4394 if (NumAllocated == NumRegs) { 4395 unsigned RegStart = (i-NumAllocated)+1; 4396 unsigned RegEnd = i+1; 4397 // Mark all of the allocated registers used. 4398 for (unsigned i = RegStart; i != RegEnd; ++i) 4399 Regs.push_back(RegClassRegs[i]); 4400 4401 OpInfo.AssignedRegs = RegsForValue(TLI, Regs, *RC->vt_begin(), 4402 OpInfo.ConstraintVT); 4403 OpInfo.MarkAllocatedRegs(isOutReg, isInReg, OutputRegs, InputRegs, *TRI); 4404 return; 4405 } 4406 } 4407 4408 // Otherwise, we couldn't allocate enough registers for this. 4409} 4410 4411 4412/// visitInlineAsm - Handle a call to an InlineAsm object. 4413/// 4414void SelectionDAGLowering::visitInlineAsm(CallSite CS) { 4415 InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue()); 4416 4417 /// ConstraintOperands - Information about all of the constraints. 4418 std::vector<SDISelAsmOperandInfo> ConstraintOperands; 4419 4420 SDValue Chain = getRoot(); 4421 SDValue Flag; 4422 4423 std::set<unsigned> OutputRegs, InputRegs; 4424 4425 // Do a prepass over the constraints, canonicalizing them, and building up the 4426 // ConstraintOperands list. 4427 std::vector<InlineAsm::ConstraintInfo> 4428 ConstraintInfos = IA->ParseConstraints(); 4429 4430 // SawEarlyClobber - Keep track of whether we saw an earlyclobber output 4431 // constraint. If so, we can't let the register allocator allocate any input 4432 // registers, because it will not know to avoid the earlyclobbered output reg. 4433 bool SawEarlyClobber = false; 4434 4435 unsigned ArgNo = 0; // ArgNo - The argument of the CallInst. 4436 unsigned ResNo = 0; // ResNo - The result number of the next output. 4437 for (unsigned i = 0, e = ConstraintInfos.size(); i != e; ++i) { 4438 ConstraintOperands.push_back(SDISelAsmOperandInfo(ConstraintInfos[i])); 4439 SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back(); 4440 4441 MVT OpVT = MVT::Other; 4442 4443 // Compute the value type for each operand. 4444 switch (OpInfo.Type) { 4445 case InlineAsm::isOutput: 4446 // Indirect outputs just consume an argument. 4447 if (OpInfo.isIndirect) { 4448 OpInfo.CallOperandVal = CS.getArgument(ArgNo++); 4449 break; 4450 } 4451 // The return value of the call is this value. As such, there is no 4452 // corresponding argument. 4453 assert(CS.getType() != Type::VoidTy && "Bad inline asm!"); 4454 if (const StructType *STy = dyn_cast<StructType>(CS.getType())) { 4455 OpVT = TLI.getValueType(STy->getElementType(ResNo)); 4456 } else { 4457 assert(ResNo == 0 && "Asm only has one result!"); 4458 OpVT = TLI.getValueType(CS.getType()); 4459 } 4460 ++ResNo; 4461 break; 4462 case InlineAsm::isInput: 4463 OpInfo.CallOperandVal = CS.getArgument(ArgNo++); 4464 break; 4465 case InlineAsm::isClobber: 4466 // Nothing to do. 4467 break; 4468 } 4469 4470 // If this is an input or an indirect output, process the call argument. 4471 // BasicBlocks are labels, currently appearing only in asm's. 4472 if (OpInfo.CallOperandVal) { 4473 if (BasicBlock *BB = dyn_cast<BasicBlock>(OpInfo.CallOperandVal)) 4474 OpInfo.CallOperand = DAG.getBasicBlock(FuncInfo.MBBMap[BB]); 4475 else { 4476 OpInfo.CallOperand = getValue(OpInfo.CallOperandVal); 4477 const Type *OpTy = OpInfo.CallOperandVal->getType(); 4478 // If this is an indirect operand, the operand is a pointer to the 4479 // accessed type. 4480 if (OpInfo.isIndirect) 4481 OpTy = cast<PointerType>(OpTy)->getElementType(); 4482 4483 // If OpTy is not a single value, it may be a struct/union that we 4484 // can tile with integers. 4485 if (!OpTy->isSingleValueType() && OpTy->isSized()) { 4486 unsigned BitSize = TD->getTypeSizeInBits(OpTy); 4487 switch (BitSize) { 4488 default: break; 4489 case 1: 4490 case 8: 4491 case 16: 4492 case 32: 4493 case 64: 4494 OpTy = IntegerType::get(BitSize); 4495 break; 4496 } 4497 } 4498 4499 OpVT = TLI.getValueType(OpTy, true); 4500 } 4501 } 4502 4503 OpInfo.ConstraintVT = OpVT; 4504 4505 // Compute the constraint code and ConstraintType to use. 4506 TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, &DAG); 4507 4508 // Keep track of whether we see an earlyclobber. 4509 SawEarlyClobber |= OpInfo.isEarlyClobber; 4510 4511 // If we see a clobber of a register, it is an early clobber. 4512 if (!SawEarlyClobber && 4513 OpInfo.Type == InlineAsm::isClobber && 4514 OpInfo.ConstraintType == TargetLowering::C_Register) { 4515 // Note that we want to ignore things that we don't track here, like 4516 // dirflag, fpsr, flags, etc. 4517 std::pair<unsigned, const TargetRegisterClass*> PhysReg = 4518 TLI.getRegForInlineAsmConstraint(OpInfo.ConstraintCode, 4519 OpInfo.ConstraintVT); 4520 if (PhysReg.first || PhysReg.second) { 4521 // This is a register we know of. 4522 SawEarlyClobber = true; 4523 } 4524 } 4525 4526 // If this is a memory input, and if the operand is not indirect, do what we 4527 // need to to provide an address for the memory input. 4528 if (OpInfo.ConstraintType == TargetLowering::C_Memory && 4529 !OpInfo.isIndirect) { 4530 assert(OpInfo.Type == InlineAsm::isInput && 4531 "Can only indirectify direct input operands!"); 4532 4533 // Memory operands really want the address of the value. If we don't have 4534 // an indirect input, put it in the constpool if we can, otherwise spill 4535 // it to a stack slot. 4536 4537 // If the operand is a float, integer, or vector constant, spill to a 4538 // constant pool entry to get its address. 4539 Value *OpVal = OpInfo.CallOperandVal; 4540 if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) || 4541 isa<ConstantVector>(OpVal)) { 4542 OpInfo.CallOperand = DAG.getConstantPool(cast<Constant>(OpVal), 4543 TLI.getPointerTy()); 4544 } else { 4545 // Otherwise, create a stack slot and emit a store to it before the 4546 // asm. 4547 const Type *Ty = OpVal->getType(); 4548 uint64_t TySize = TLI.getTargetData()->getABITypeSize(Ty); 4549 unsigned Align = TLI.getTargetData()->getPrefTypeAlignment(Ty); 4550 MachineFunction &MF = DAG.getMachineFunction(); 4551 int SSFI = MF.getFrameInfo()->CreateStackObject(TySize, Align); 4552 SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getPointerTy()); 4553 Chain = DAG.getStore(Chain, OpInfo.CallOperand, StackSlot, NULL, 0); 4554 OpInfo.CallOperand = StackSlot; 4555 } 4556 4557 // There is no longer a Value* corresponding to this operand. 4558 OpInfo.CallOperandVal = 0; 4559 // It is now an indirect operand. 4560 OpInfo.isIndirect = true; 4561 } 4562 4563 // If this constraint is for a specific register, allocate it before 4564 // anything else. 4565 if (OpInfo.ConstraintType == TargetLowering::C_Register) 4566 GetRegistersForValue(OpInfo, SawEarlyClobber, OutputRegs, InputRegs); 4567 } 4568 ConstraintInfos.clear(); 4569 4570 4571 // Second pass - Loop over all of the operands, assigning virtual or physregs 4572 // to registerclass operands. 4573 for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) { 4574 SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i]; 4575 4576 // C_Register operands have already been allocated, Other/Memory don't need 4577 // to be. 4578 if (OpInfo.ConstraintType == TargetLowering::C_RegisterClass) 4579 GetRegistersForValue(OpInfo, SawEarlyClobber, OutputRegs, InputRegs); 4580 } 4581 4582 // AsmNodeOperands - The operands for the ISD::INLINEASM node. 4583 std::vector<SDValue> AsmNodeOperands; 4584 AsmNodeOperands.push_back(SDValue()); // reserve space for input chain 4585 AsmNodeOperands.push_back( 4586 DAG.getTargetExternalSymbol(IA->getAsmString().c_str(), MVT::Other)); 4587 4588 4589 // Loop over all of the inputs, copying the operand values into the 4590 // appropriate registers and processing the output regs. 4591 RegsForValue RetValRegs; 4592 4593 // IndirectStoresToEmit - The set of stores to emit after the inline asm node. 4594 std::vector<std::pair<RegsForValue, Value*> > IndirectStoresToEmit; 4595 4596 for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) { 4597 SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i]; 4598 4599 switch (OpInfo.Type) { 4600 case InlineAsm::isOutput: { 4601 if (OpInfo.ConstraintType != TargetLowering::C_RegisterClass && 4602 OpInfo.ConstraintType != TargetLowering::C_Register) { 4603 // Memory output, or 'other' output (e.g. 'X' constraint). 4604 assert(OpInfo.isIndirect && "Memory output must be indirect operand"); 4605 4606 // Add information to the INLINEASM node to know about this output. 4607 unsigned ResOpType = 4/*MEM*/ | (1 << 3); 4608 AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType, 4609 TLI.getPointerTy())); 4610 AsmNodeOperands.push_back(OpInfo.CallOperand); 4611 break; 4612 } 4613 4614 // Otherwise, this is a register or register class output. 4615 4616 // Copy the output from the appropriate register. Find a register that 4617 // we can use. 4618 if (OpInfo.AssignedRegs.Regs.empty()) { 4619 cerr << "Couldn't allocate output reg for constraint '" 4620 << OpInfo.ConstraintCode << "'!\n"; 4621 exit(1); 4622 } 4623 4624 // If this is an indirect operand, store through the pointer after the 4625 // asm. 4626 if (OpInfo.isIndirect) { 4627 IndirectStoresToEmit.push_back(std::make_pair(OpInfo.AssignedRegs, 4628 OpInfo.CallOperandVal)); 4629 } else { 4630 // This is the result value of the call. 4631 assert(CS.getType() != Type::VoidTy && "Bad inline asm!"); 4632 // Concatenate this output onto the outputs list. 4633 RetValRegs.append(OpInfo.AssignedRegs); 4634 } 4635 4636 // Add information to the INLINEASM node to know that this register is 4637 // set. 4638 OpInfo.AssignedRegs.AddInlineAsmOperands(2 /*REGDEF*/, DAG, 4639 AsmNodeOperands); 4640 break; 4641 } 4642 case InlineAsm::isInput: { 4643 SDValue InOperandVal = OpInfo.CallOperand; 4644 4645 if (isdigit(OpInfo.ConstraintCode[0])) { // Matching constraint? 4646 // If this is required to match an output register we have already set, 4647 // just use its register. 4648 unsigned OperandNo = atoi(OpInfo.ConstraintCode.c_str()); 4649 4650 // Scan until we find the definition we already emitted of this operand. 4651 // When we find it, create a RegsForValue operand. 4652 unsigned CurOp = 2; // The first operand. 4653 for (; OperandNo; --OperandNo) { 4654 // Advance to the next operand. 4655 unsigned NumOps = 4656 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getValue(); 4657 assert(((NumOps & 7) == 2 /*REGDEF*/ || 4658 (NumOps & 7) == 4 /*MEM*/) && 4659 "Skipped past definitions?"); 4660 CurOp += (NumOps>>3)+1; 4661 } 4662 4663 unsigned NumOps = 4664 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getValue(); 4665 if ((NumOps & 7) == 2 /*REGDEF*/) { 4666 // Add NumOps>>3 registers to MatchedRegs. 4667 RegsForValue MatchedRegs; 4668 MatchedRegs.TLI = &TLI; 4669 MatchedRegs.ValueVTs.push_back(InOperandVal.getValueType()); 4670 MatchedRegs.RegVTs.push_back(AsmNodeOperands[CurOp+1].getValueType()); 4671 for (unsigned i = 0, e = NumOps>>3; i != e; ++i) { 4672 unsigned Reg = 4673 cast<RegisterSDNode>(AsmNodeOperands[++CurOp])->getReg(); 4674 MatchedRegs.Regs.push_back(Reg); 4675 } 4676 4677 // Use the produced MatchedRegs object to 4678 MatchedRegs.getCopyToRegs(InOperandVal, DAG, Chain, &Flag); 4679 MatchedRegs.AddInlineAsmOperands(1 /*REGUSE*/, DAG, AsmNodeOperands); 4680 break; 4681 } else { 4682 assert((NumOps & 7) == 4/*MEM*/ && "Unknown matching constraint!"); 4683 assert((NumOps >> 3) == 1 && "Unexpected number of operands"); 4684 // Add information to the INLINEASM node to know about this input. 4685 unsigned ResOpType = 4/*MEM*/ | (1 << 3); 4686 AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType, 4687 TLI.getPointerTy())); 4688 AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]); 4689 break; 4690 } 4691 } 4692 4693 if (OpInfo.ConstraintType == TargetLowering::C_Other) { 4694 assert(!OpInfo.isIndirect && 4695 "Don't know how to handle indirect other inputs yet!"); 4696 4697 std::vector<SDValue> Ops; 4698 TLI.LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode[0], 4699 Ops, DAG); 4700 if (Ops.empty()) { 4701 cerr << "Invalid operand for inline asm constraint '" 4702 << OpInfo.ConstraintCode << "'!\n"; 4703 exit(1); 4704 } 4705 4706 // Add information to the INLINEASM node to know about this input. 4707 unsigned ResOpType = 3 /*IMM*/ | (Ops.size() << 3); 4708 AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType, 4709 TLI.getPointerTy())); 4710 AsmNodeOperands.insert(AsmNodeOperands.end(), Ops.begin(), Ops.end()); 4711 break; 4712 } else if (OpInfo.ConstraintType == TargetLowering::C_Memory) { 4713 assert(OpInfo.isIndirect && "Operand must be indirect to be a mem!"); 4714 assert(InOperandVal.getValueType() == TLI.getPointerTy() && 4715 "Memory operands expect pointer values"); 4716 4717 // Add information to the INLINEASM node to know about this input. 4718 unsigned ResOpType = 4/*MEM*/ | (1 << 3); 4719 AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType, 4720 TLI.getPointerTy())); 4721 AsmNodeOperands.push_back(InOperandVal); 4722 break; 4723 } 4724 4725 assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass || 4726 OpInfo.ConstraintType == TargetLowering::C_Register) && 4727 "Unknown constraint type!"); 4728 assert(!OpInfo.isIndirect && 4729 "Don't know how to handle indirect register inputs yet!"); 4730 4731 // Copy the input into the appropriate registers. 4732 assert(!OpInfo.AssignedRegs.Regs.empty() && 4733 "Couldn't allocate input reg!"); 4734 4735 OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, Chain, &Flag); 4736 4737 OpInfo.AssignedRegs.AddInlineAsmOperands(1/*REGUSE*/, DAG, 4738 AsmNodeOperands); 4739 break; 4740 } 4741 case InlineAsm::isClobber: { 4742 // Add the clobbered value to the operand list, so that the register 4743 // allocator is aware that the physreg got clobbered. 4744 if (!OpInfo.AssignedRegs.Regs.empty()) 4745 OpInfo.AssignedRegs.AddInlineAsmOperands(2/*REGDEF*/, DAG, 4746 AsmNodeOperands); 4747 break; 4748 } 4749 } 4750 } 4751 4752 // Finish up input operands. 4753 AsmNodeOperands[0] = Chain; 4754 if (Flag.getNode()) AsmNodeOperands.push_back(Flag); 4755 4756 Chain = DAG.getNode(ISD::INLINEASM, 4757 DAG.getNodeValueTypes(MVT::Other, MVT::Flag), 2, 4758 &AsmNodeOperands[0], AsmNodeOperands.size()); 4759 Flag = Chain.getValue(1); 4760 4761 // If this asm returns a register value, copy the result from that register 4762 // and set it as the value of the call. 4763 if (!RetValRegs.Regs.empty()) { 4764 SDValue Val = RetValRegs.getCopyFromRegs(DAG, Chain, &Flag); 4765 4766 // If any of the results of the inline asm is a vector, it may have the 4767 // wrong width/num elts. This can happen for register classes that can 4768 // contain multiple different value types. The preg or vreg allocated may 4769 // not have the same VT as was expected. Convert it to the right type with 4770 // bit_convert. 4771 if (const StructType *ResSTy = dyn_cast<StructType>(CS.getType())) { 4772 for (unsigned i = 0, e = ResSTy->getNumElements(); i != e; ++i) { 4773 if (Val.getNode()->getValueType(i).isVector()) 4774 Val = DAG.getNode(ISD::BIT_CONVERT, 4775 TLI.getValueType(ResSTy->getElementType(i)), Val); 4776 } 4777 } else { 4778 if (Val.getValueType().isVector()) 4779 Val = DAG.getNode(ISD::BIT_CONVERT, TLI.getValueType(CS.getType()), 4780 Val); 4781 } 4782 4783 setValue(CS.getInstruction(), Val); 4784 } 4785 4786 std::vector<std::pair<SDValue, Value*> > StoresToEmit; 4787 4788 // Process indirect outputs, first output all of the flagged copies out of 4789 // physregs. 4790 for (unsigned i = 0, e = IndirectStoresToEmit.size(); i != e; ++i) { 4791 RegsForValue &OutRegs = IndirectStoresToEmit[i].first; 4792 Value *Ptr = IndirectStoresToEmit[i].second; 4793 SDValue OutVal = OutRegs.getCopyFromRegs(DAG, Chain, &Flag); 4794 StoresToEmit.push_back(std::make_pair(OutVal, Ptr)); 4795 } 4796 4797 // Emit the non-flagged stores from the physregs. 4798 SmallVector<SDValue, 8> OutChains; 4799 for (unsigned i = 0, e = StoresToEmit.size(); i != e; ++i) 4800 OutChains.push_back(DAG.getStore(Chain, StoresToEmit[i].first, 4801 getValue(StoresToEmit[i].second), 4802 StoresToEmit[i].second, 0)); 4803 if (!OutChains.empty()) 4804 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 4805 &OutChains[0], OutChains.size()); 4806 DAG.setRoot(Chain); 4807} 4808 4809 4810void SelectionDAGLowering::visitMalloc(MallocInst &I) { 4811 SDValue Src = getValue(I.getOperand(0)); 4812 4813 MVT IntPtr = TLI.getPointerTy(); 4814 4815 if (IntPtr.bitsLT(Src.getValueType())) 4816 Src = DAG.getNode(ISD::TRUNCATE, IntPtr, Src); 4817 else if (IntPtr.bitsGT(Src.getValueType())) 4818 Src = DAG.getNode(ISD::ZERO_EXTEND, IntPtr, Src); 4819 4820 // Scale the source by the type size. 4821 uint64_t ElementSize = TD->getABITypeSize(I.getType()->getElementType()); 4822 Src = DAG.getNode(ISD::MUL, Src.getValueType(), 4823 Src, DAG.getIntPtrConstant(ElementSize)); 4824 4825 TargetLowering::ArgListTy Args; 4826 TargetLowering::ArgListEntry Entry; 4827 Entry.Node = Src; 4828 Entry.Ty = TLI.getTargetData()->getIntPtrType(); 4829 Args.push_back(Entry); 4830 4831 std::pair<SDValue,SDValue> Result = 4832 TLI.LowerCallTo(getRoot(), I.getType(), false, false, false, CallingConv::C, 4833 true, DAG.getExternalSymbol("malloc", IntPtr), Args, DAG); 4834 setValue(&I, Result.first); // Pointers always fit in registers 4835 DAG.setRoot(Result.second); 4836} 4837 4838void SelectionDAGLowering::visitFree(FreeInst &I) { 4839 TargetLowering::ArgListTy Args; 4840 TargetLowering::ArgListEntry Entry; 4841 Entry.Node = getValue(I.getOperand(0)); 4842 Entry.Ty = TLI.getTargetData()->getIntPtrType(); 4843 Args.push_back(Entry); 4844 MVT IntPtr = TLI.getPointerTy(); 4845 std::pair<SDValue,SDValue> Result = 4846 TLI.LowerCallTo(getRoot(), Type::VoidTy, false, false, false, 4847 CallingConv::C, true, 4848 DAG.getExternalSymbol("free", IntPtr), Args, DAG); 4849 DAG.setRoot(Result.second); 4850} 4851 4852// EmitInstrWithCustomInserter - This method should be implemented by targets 4853// that mark instructions with the 'usesCustomDAGSchedInserter' flag. These 4854// instructions are special in various ways, which require special support to 4855// insert. The specified MachineInstr is created but not inserted into any 4856// basic blocks, and the scheduler passes ownership of it to this method. 4857MachineBasicBlock *TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 4858 MachineBasicBlock *MBB) { 4859 cerr << "If a target marks an instruction with " 4860 << "'usesCustomDAGSchedInserter', it must implement " 4861 << "TargetLowering::EmitInstrWithCustomInserter!\n"; 4862 abort(); 4863 return 0; 4864} 4865 4866void SelectionDAGLowering::visitVAStart(CallInst &I) { 4867 DAG.setRoot(DAG.getNode(ISD::VASTART, MVT::Other, getRoot(), 4868 getValue(I.getOperand(1)), 4869 DAG.getSrcValue(I.getOperand(1)))); 4870} 4871 4872void SelectionDAGLowering::visitVAArg(VAArgInst &I) { 4873 SDValue V = DAG.getVAArg(TLI.getValueType(I.getType()), getRoot(), 4874 getValue(I.getOperand(0)), 4875 DAG.getSrcValue(I.getOperand(0))); 4876 setValue(&I, V); 4877 DAG.setRoot(V.getValue(1)); 4878} 4879 4880void SelectionDAGLowering::visitVAEnd(CallInst &I) { 4881 DAG.setRoot(DAG.getNode(ISD::VAEND, MVT::Other, getRoot(), 4882 getValue(I.getOperand(1)), 4883 DAG.getSrcValue(I.getOperand(1)))); 4884} 4885 4886void SelectionDAGLowering::visitVACopy(CallInst &I) { 4887 DAG.setRoot(DAG.getNode(ISD::VACOPY, MVT::Other, getRoot(), 4888 getValue(I.getOperand(1)), 4889 getValue(I.getOperand(2)), 4890 DAG.getSrcValue(I.getOperand(1)), 4891 DAG.getSrcValue(I.getOperand(2)))); 4892} 4893 4894/// TargetLowering::LowerArguments - This is the default LowerArguments 4895/// implementation, which just inserts a FORMAL_ARGUMENTS node. FIXME: When all 4896/// targets are migrated to using FORMAL_ARGUMENTS, this hook should be 4897/// integrated into SDISel. 4898void TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG, 4899 SmallVectorImpl<SDValue> &ArgValues) { 4900 // Add CC# and isVararg as operands to the FORMAL_ARGUMENTS node. 4901 SmallVector<SDValue, 3+16> Ops; 4902 Ops.push_back(DAG.getRoot()); 4903 Ops.push_back(DAG.getConstant(F.getCallingConv(), getPointerTy())); 4904 Ops.push_back(DAG.getConstant(F.isVarArg(), getPointerTy())); 4905 4906 // Add one result value for each formal argument. 4907 SmallVector<MVT, 16> RetVals; 4908 unsigned j = 1; 4909 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); 4910 I != E; ++I, ++j) { 4911 SmallVector<MVT, 4> ValueVTs; 4912 ComputeValueVTs(*this, I->getType(), ValueVTs); 4913 for (unsigned Value = 0, NumValues = ValueVTs.size(); 4914 Value != NumValues; ++Value) { 4915 MVT VT = ValueVTs[Value]; 4916 const Type *ArgTy = VT.getTypeForMVT(); 4917 ISD::ArgFlagsTy Flags; 4918 unsigned OriginalAlignment = 4919 getTargetData()->getABITypeAlignment(ArgTy); 4920 4921 if (F.paramHasAttr(j, ParamAttr::ZExt)) 4922 Flags.setZExt(); 4923 if (F.paramHasAttr(j, ParamAttr::SExt)) 4924 Flags.setSExt(); 4925 if (F.paramHasAttr(j, ParamAttr::InReg)) 4926 Flags.setInReg(); 4927 if (F.paramHasAttr(j, ParamAttr::StructRet)) 4928 Flags.setSRet(); 4929 if (F.paramHasAttr(j, ParamAttr::ByVal)) { 4930 Flags.setByVal(); 4931 const PointerType *Ty = cast<PointerType>(I->getType()); 4932 const Type *ElementTy = Ty->getElementType(); 4933 unsigned FrameAlign = getByValTypeAlignment(ElementTy); 4934 unsigned FrameSize = getTargetData()->getABITypeSize(ElementTy); 4935 // For ByVal, alignment should be passed from FE. BE will guess if 4936 // this info is not there but there are cases it cannot get right. 4937 if (F.getParamAlignment(j)) 4938 FrameAlign = F.getParamAlignment(j); 4939 Flags.setByValAlign(FrameAlign); 4940 Flags.setByValSize(FrameSize); 4941 } 4942 if (F.paramHasAttr(j, ParamAttr::Nest)) 4943 Flags.setNest(); 4944 Flags.setOrigAlign(OriginalAlignment); 4945 4946 MVT RegisterVT = getRegisterType(VT); 4947 unsigned NumRegs = getNumRegisters(VT); 4948 for (unsigned i = 0; i != NumRegs; ++i) { 4949 RetVals.push_back(RegisterVT); 4950 ISD::ArgFlagsTy MyFlags = Flags; 4951 if (NumRegs > 1 && i == 0) 4952 MyFlags.setSplit(); 4953 // if it isn't first piece, alignment must be 1 4954 else if (i > 0) 4955 MyFlags.setOrigAlign(1); 4956 Ops.push_back(DAG.getArgFlags(MyFlags)); 4957 } 4958 } 4959 } 4960 4961 RetVals.push_back(MVT::Other); 4962 4963 // Create the node. 4964 SDNode *Result = DAG.getNode(ISD::FORMAL_ARGUMENTS, 4965 DAG.getVTList(&RetVals[0], RetVals.size()), 4966 &Ops[0], Ops.size()).getNode(); 4967 4968 // Prelower FORMAL_ARGUMENTS. This isn't required for functionality, but 4969 // allows exposing the loads that may be part of the argument access to the 4970 // first DAGCombiner pass. 4971 SDValue TmpRes = LowerOperation(SDValue(Result, 0), DAG); 4972 4973 // The number of results should match up, except that the lowered one may have 4974 // an extra flag result. 4975 assert((Result->getNumValues() == TmpRes.getNode()->getNumValues() || 4976 (Result->getNumValues()+1 == TmpRes.getNode()->getNumValues() && 4977 TmpRes.getValue(Result->getNumValues()).getValueType() == MVT::Flag)) 4978 && "Lowering produced unexpected number of results!"); 4979 4980 // The FORMAL_ARGUMENTS node itself is likely no longer needed. 4981 if (Result != TmpRes.getNode() && Result->use_empty()) { 4982 HandleSDNode Dummy(DAG.getRoot()); 4983 DAG.RemoveDeadNode(Result); 4984 } 4985 4986 Result = TmpRes.getNode(); 4987 4988 unsigned NumArgRegs = Result->getNumValues() - 1; 4989 DAG.setRoot(SDValue(Result, NumArgRegs)); 4990 4991 // Set up the return result vector. 4992 unsigned i = 0; 4993 unsigned Idx = 1; 4994 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; 4995 ++I, ++Idx) { 4996 SmallVector<MVT, 4> ValueVTs; 4997 ComputeValueVTs(*this, I->getType(), ValueVTs); 4998 for (unsigned Value = 0, NumValues = ValueVTs.size(); 4999 Value != NumValues; ++Value) { 5000 MVT VT = ValueVTs[Value]; 5001 MVT PartVT = getRegisterType(VT); 5002 5003 unsigned NumParts = getNumRegisters(VT); 5004 SmallVector<SDValue, 4> Parts(NumParts); 5005 for (unsigned j = 0; j != NumParts; ++j) 5006 Parts[j] = SDValue(Result, i++); 5007 5008 ISD::NodeType AssertOp = ISD::DELETED_NODE; 5009 if (F.paramHasAttr(Idx, ParamAttr::SExt)) 5010 AssertOp = ISD::AssertSext; 5011 else if (F.paramHasAttr(Idx, ParamAttr::ZExt)) 5012 AssertOp = ISD::AssertZext; 5013 5014 ArgValues.push_back(getCopyFromParts(DAG, &Parts[0], NumParts, PartVT, VT, 5015 AssertOp)); 5016 } 5017 } 5018 assert(i == NumArgRegs && "Argument register count mismatch!"); 5019} 5020 5021 5022/// TargetLowering::LowerCallTo - This is the default LowerCallTo 5023/// implementation, which just inserts an ISD::CALL node, which is later custom 5024/// lowered by the target to something concrete. FIXME: When all targets are 5025/// migrated to using ISD::CALL, this hook should be integrated into SDISel. 5026std::pair<SDValue, SDValue> 5027TargetLowering::LowerCallTo(SDValue Chain, const Type *RetTy, 5028 bool RetSExt, bool RetZExt, bool isVarArg, 5029 unsigned CallingConv, bool isTailCall, 5030 SDValue Callee, 5031 ArgListTy &Args, SelectionDAG &DAG) { 5032 SmallVector<SDValue, 32> Ops; 5033 Ops.push_back(Chain); // Op#0 - Chain 5034 Ops.push_back(DAG.getConstant(CallingConv, getPointerTy())); // Op#1 - CC 5035 Ops.push_back(DAG.getConstant(isVarArg, getPointerTy())); // Op#2 - VarArg 5036 Ops.push_back(DAG.getConstant(isTailCall, getPointerTy())); // Op#3 - Tail 5037 Ops.push_back(Callee); 5038 5039 // Handle all of the outgoing arguments. 5040 for (unsigned i = 0, e = Args.size(); i != e; ++i) { 5041 SmallVector<MVT, 4> ValueVTs; 5042 ComputeValueVTs(*this, Args[i].Ty, ValueVTs); 5043 for (unsigned Value = 0, NumValues = ValueVTs.size(); 5044 Value != NumValues; ++Value) { 5045 MVT VT = ValueVTs[Value]; 5046 const Type *ArgTy = VT.getTypeForMVT(); 5047 SDValue Op = SDValue(Args[i].Node.getNode(), Args[i].Node.getResNo() + Value); 5048 ISD::ArgFlagsTy Flags; 5049 unsigned OriginalAlignment = 5050 getTargetData()->getABITypeAlignment(ArgTy); 5051 5052 if (Args[i].isZExt) 5053 Flags.setZExt(); 5054 if (Args[i].isSExt) 5055 Flags.setSExt(); 5056 if (Args[i].isInReg) 5057 Flags.setInReg(); 5058 if (Args[i].isSRet) 5059 Flags.setSRet(); 5060 if (Args[i].isByVal) { 5061 Flags.setByVal(); 5062 const PointerType *Ty = cast<PointerType>(Args[i].Ty); 5063 const Type *ElementTy = Ty->getElementType(); 5064 unsigned FrameAlign = getByValTypeAlignment(ElementTy); 5065 unsigned FrameSize = getTargetData()->getABITypeSize(ElementTy); 5066 // For ByVal, alignment should come from FE. BE will guess if this 5067 // info is not there but there are cases it cannot get right. 5068 if (Args[i].Alignment) 5069 FrameAlign = Args[i].Alignment; 5070 Flags.setByValAlign(FrameAlign); 5071 Flags.setByValSize(FrameSize); 5072 } 5073 if (Args[i].isNest) 5074 Flags.setNest(); 5075 Flags.setOrigAlign(OriginalAlignment); 5076 5077 MVT PartVT = getRegisterType(VT); 5078 unsigned NumParts = getNumRegisters(VT); 5079 SmallVector<SDValue, 4> Parts(NumParts); 5080 ISD::NodeType ExtendKind = ISD::ANY_EXTEND; 5081 5082 if (Args[i].isSExt) 5083 ExtendKind = ISD::SIGN_EXTEND; 5084 else if (Args[i].isZExt) 5085 ExtendKind = ISD::ZERO_EXTEND; 5086 5087 getCopyToParts(DAG, Op, &Parts[0], NumParts, PartVT, ExtendKind); 5088 5089 for (unsigned i = 0; i != NumParts; ++i) { 5090 // if it isn't first piece, alignment must be 1 5091 ISD::ArgFlagsTy MyFlags = Flags; 5092 if (NumParts > 1 && i == 0) 5093 MyFlags.setSplit(); 5094 else if (i != 0) 5095 MyFlags.setOrigAlign(1); 5096 5097 Ops.push_back(Parts[i]); 5098 Ops.push_back(DAG.getArgFlags(MyFlags)); 5099 } 5100 } 5101 } 5102 5103 // Figure out the result value types. We start by making a list of 5104 // the potentially illegal return value types. 5105 SmallVector<MVT, 4> LoweredRetTys; 5106 SmallVector<MVT, 4> RetTys; 5107 ComputeValueVTs(*this, RetTy, RetTys); 5108 5109 // Then we translate that to a list of legal types. 5110 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) { 5111 MVT VT = RetTys[I]; 5112 MVT RegisterVT = getRegisterType(VT); 5113 unsigned NumRegs = getNumRegisters(VT); 5114 for (unsigned i = 0; i != NumRegs; ++i) 5115 LoweredRetTys.push_back(RegisterVT); 5116 } 5117 5118 LoweredRetTys.push_back(MVT::Other); // Always has a chain. 5119 5120 // Create the CALL node. 5121 SDValue Res = DAG.getNode(ISD::CALL, 5122 DAG.getVTList(&LoweredRetTys[0], 5123 LoweredRetTys.size()), 5124 &Ops[0], Ops.size()); 5125 Chain = Res.getValue(LoweredRetTys.size() - 1); 5126 5127 // Gather up the call result into a single value. 5128 if (RetTy != Type::VoidTy) { 5129 ISD::NodeType AssertOp = ISD::DELETED_NODE; 5130 5131 if (RetSExt) 5132 AssertOp = ISD::AssertSext; 5133 else if (RetZExt) 5134 AssertOp = ISD::AssertZext; 5135 5136 SmallVector<SDValue, 4> ReturnValues; 5137 unsigned RegNo = 0; 5138 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) { 5139 MVT VT = RetTys[I]; 5140 MVT RegisterVT = getRegisterType(VT); 5141 unsigned NumRegs = getNumRegisters(VT); 5142 unsigned RegNoEnd = NumRegs + RegNo; 5143 SmallVector<SDValue, 4> Results; 5144 for (; RegNo != RegNoEnd; ++RegNo) 5145 Results.push_back(Res.getValue(RegNo)); 5146 SDValue ReturnValue = 5147 getCopyFromParts(DAG, &Results[0], NumRegs, RegisterVT, VT, 5148 AssertOp); 5149 ReturnValues.push_back(ReturnValue); 5150 } 5151 Res = DAG.getMergeValues(DAG.getVTList(&RetTys[0], RetTys.size()), 5152 &ReturnValues[0], ReturnValues.size()); 5153 } 5154 5155 return std::make_pair(Res, Chain); 5156} 5157 5158SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) { 5159 assert(0 && "LowerOperation not implemented for this target!"); 5160 abort(); 5161 return SDValue(); 5162} 5163 5164 5165//===----------------------------------------------------------------------===// 5166// SelectionDAGISel code 5167//===----------------------------------------------------------------------===// 5168 5169SelectionDAGISel::SelectionDAGISel(TargetLowering &tli, bool fast) : 5170 FunctionPass((intptr_t)&ID), TLI(tli), 5171 FuncInfo(new FunctionLoweringInfo(TLI)), 5172 CurDAG(new SelectionDAG(TLI, *FuncInfo)), 5173 SDL(new SelectionDAGLowering(*CurDAG, TLI, *FuncInfo)), 5174 GFI(), 5175 Fast(fast), 5176 DAGSize(0) 5177{} 5178 5179SelectionDAGISel::~SelectionDAGISel() { 5180 delete SDL; 5181 delete CurDAG; 5182 delete FuncInfo; 5183} 5184 5185unsigned SelectionDAGISel::MakeReg(MVT VT) { 5186 return RegInfo->createVirtualRegister(TLI.getRegClassFor(VT)); 5187} 5188 5189void SelectionDAGISel::getAnalysisUsage(AnalysisUsage &AU) const { 5190 AU.addRequired<AliasAnalysis>(); 5191 AU.addRequired<GCModuleInfo>(); 5192 AU.setPreservesAll(); 5193} 5194 5195bool SelectionDAGISel::runOnFunction(Function &Fn) { 5196 // Get alias analysis for load/store combining. 5197 AA = &getAnalysis<AliasAnalysis>(); 5198 5199 MachineFunction &MF = MachineFunction::construct(&Fn, TLI.getTargetMachine()); 5200 if (MF.getFunction()->hasGC()) 5201 GFI = &getAnalysis<GCModuleInfo>().getFunctionInfo(*MF.getFunction()); 5202 else 5203 GFI = 0; 5204 RegInfo = &MF.getRegInfo(); 5205 DOUT << "\n\n\n=== " << Fn.getName() << "\n"; 5206 5207 FuncInfo->set(Fn, MF); 5208 CurDAG->init(MF, getAnalysisToUpdate<MachineModuleInfo>()); 5209 SDL->init(GFI, *AA); 5210 5211 for (Function::iterator I = Fn.begin(), E = Fn.end(); I != E; ++I) 5212 if (InvokeInst *Invoke = dyn_cast<InvokeInst>(I->getTerminator())) 5213 // Mark landing pad. 5214 FuncInfo->MBBMap[Invoke->getSuccessor(1)]->setIsLandingPad(); 5215 5216 SelectAllBasicBlocks(Fn, MF); 5217 5218 // Add function live-ins to entry block live-in set. 5219 BasicBlock *EntryBB = &Fn.getEntryBlock(); 5220 BB = FuncInfo->MBBMap[EntryBB]; 5221 if (!RegInfo->livein_empty()) 5222 for (MachineRegisterInfo::livein_iterator I = RegInfo->livein_begin(), 5223 E = RegInfo->livein_end(); I != E; ++I) 5224 BB->addLiveIn(I->first); 5225 5226#ifndef NDEBUG 5227 assert(FuncInfo->CatchInfoFound.size() == FuncInfo->CatchInfoLost.size() && 5228 "Not all catch info was assigned to a landing pad!"); 5229#endif 5230 5231 FuncInfo->clear(); 5232 5233 return true; 5234} 5235 5236void SelectionDAGLowering::CopyValueToVirtualRegister(Value *V, unsigned Reg) { 5237 SDValue Op = getValue(V); 5238 assert((Op.getOpcode() != ISD::CopyFromReg || 5239 cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) && 5240 "Copy from a reg to the same reg!"); 5241 assert(!TargetRegisterInfo::isPhysicalRegister(Reg) && "Is a physreg"); 5242 5243 RegsForValue RFV(TLI, Reg, V->getType()); 5244 SDValue Chain = DAG.getEntryNode(); 5245 RFV.getCopyToRegs(Op, DAG, Chain, 0); 5246 PendingExports.push_back(Chain); 5247} 5248 5249void SelectionDAGISel:: 5250LowerArguments(BasicBlock *LLVMBB) { 5251 // If this is the entry block, emit arguments. 5252 Function &F = *LLVMBB->getParent(); 5253 SDValue OldRoot = SDL->DAG.getRoot(); 5254 SmallVector<SDValue, 16> Args; 5255 TLI.LowerArguments(F, SDL->DAG, Args); 5256 5257 unsigned a = 0; 5258 for (Function::arg_iterator AI = F.arg_begin(), E = F.arg_end(); 5259 AI != E; ++AI) { 5260 SmallVector<MVT, 4> ValueVTs; 5261 ComputeValueVTs(TLI, AI->getType(), ValueVTs); 5262 unsigned NumValues = ValueVTs.size(); 5263 if (!AI->use_empty()) { 5264 SDL->setValue(AI, SDL->DAG.getMergeValues(&Args[a], NumValues)); 5265 // If this argument is live outside of the entry block, insert a copy from 5266 // whereever we got it to the vreg that other BB's will reference it as. 5267 DenseMap<const Value*, unsigned>::iterator VMI=FuncInfo->ValueMap.find(AI); 5268 if (VMI != FuncInfo->ValueMap.end()) { 5269 SDL->CopyValueToVirtualRegister(AI, VMI->second); 5270 } 5271 } 5272 a += NumValues; 5273 } 5274 5275 // Finally, if the target has anything special to do, allow it to do so. 5276 // FIXME: this should insert code into the DAG! 5277 EmitFunctionEntryCode(F, SDL->DAG.getMachineFunction()); 5278} 5279 5280static void copyCatchInfo(BasicBlock *SrcBB, BasicBlock *DestBB, 5281 MachineModuleInfo *MMI, FunctionLoweringInfo &FLI) { 5282 for (BasicBlock::iterator I = SrcBB->begin(), E = --SrcBB->end(); I != E; ++I) 5283 if (isSelector(I)) { 5284 // Apply the catch info to DestBB. 5285 addCatchInfo(cast<CallInst>(*I), MMI, FLI.MBBMap[DestBB]); 5286#ifndef NDEBUG 5287 if (!FLI.MBBMap[SrcBB]->isLandingPad()) 5288 FLI.CatchInfoFound.insert(I); 5289#endif 5290 } 5291} 5292 5293/// IsFixedFrameObjectWithPosOffset - Check if object is a fixed frame object and 5294/// whether object offset >= 0. 5295static bool 5296IsFixedFrameObjectWithPosOffset(MachineFrameInfo * MFI, SDValue Op) { 5297 if (!isa<FrameIndexSDNode>(Op)) return false; 5298 5299 FrameIndexSDNode * FrameIdxNode = dyn_cast<FrameIndexSDNode>(Op); 5300 int FrameIdx = FrameIdxNode->getIndex(); 5301 return MFI->isFixedObjectIndex(FrameIdx) && 5302 MFI->getObjectOffset(FrameIdx) >= 0; 5303} 5304 5305/// IsPossiblyOverwrittenArgumentOfTailCall - Check if the operand could 5306/// possibly be overwritten when lowering the outgoing arguments in a tail 5307/// call. Currently the implementation of this call is very conservative and 5308/// assumes all arguments sourcing from FORMAL_ARGUMENTS or a CopyFromReg with 5309/// virtual registers would be overwritten by direct lowering. 5310static bool IsPossiblyOverwrittenArgumentOfTailCall(SDValue Op, 5311 MachineFrameInfo * MFI) { 5312 RegisterSDNode * OpReg = NULL; 5313 if (Op.getOpcode() == ISD::FORMAL_ARGUMENTS || 5314 (Op.getOpcode()== ISD::CopyFromReg && 5315 (OpReg = dyn_cast<RegisterSDNode>(Op.getOperand(1))) && 5316 (OpReg->getReg() >= TargetRegisterInfo::FirstVirtualRegister)) || 5317 (Op.getOpcode() == ISD::LOAD && 5318 IsFixedFrameObjectWithPosOffset(MFI, Op.getOperand(1))) || 5319 (Op.getOpcode() == ISD::MERGE_VALUES && 5320 Op.getOperand(Op.getResNo()).getOpcode() == ISD::LOAD && 5321 IsFixedFrameObjectWithPosOffset(MFI, Op.getOperand(Op.getResNo()). 5322 getOperand(1)))) 5323 return true; 5324 return false; 5325} 5326 5327/// CheckDAGForTailCallsAndFixThem - This Function looks for CALL nodes in the 5328/// DAG and fixes their tailcall attribute operand. 5329static void CheckDAGForTailCallsAndFixThem(SelectionDAG &DAG, 5330 TargetLowering& TLI) { 5331 SDNode * Ret = NULL; 5332 SDValue Terminator = DAG.getRoot(); 5333 5334 // Find RET node. 5335 if (Terminator.getOpcode() == ISD::RET) { 5336 Ret = Terminator.getNode(); 5337 } 5338 5339 // Fix tail call attribute of CALL nodes. 5340 for (SelectionDAG::allnodes_iterator BE = DAG.allnodes_begin(), 5341 BI = DAG.allnodes_end(); BI != BE; ) { 5342 --BI; 5343 if (BI->getOpcode() == ISD::CALL) { 5344 SDValue OpRet(Ret, 0); 5345 SDValue OpCall(BI, 0); 5346 bool isMarkedTailCall = 5347 cast<ConstantSDNode>(OpCall.getOperand(3))->getValue() != 0; 5348 // If CALL node has tail call attribute set to true and the call is not 5349 // eligible (no RET or the target rejects) the attribute is fixed to 5350 // false. The TargetLowering::IsEligibleForTailCallOptimization function 5351 // must correctly identify tail call optimizable calls. 5352 if (!isMarkedTailCall) continue; 5353 if (Ret==NULL || 5354 !TLI.IsEligibleForTailCallOptimization(OpCall, OpRet, DAG)) { 5355 // Not eligible. Mark CALL node as non tail call. 5356 SmallVector<SDValue, 32> Ops; 5357 unsigned idx=0; 5358 for(SDNode::op_iterator I =OpCall.getNode()->op_begin(), 5359 E = OpCall.getNode()->op_end(); I != E; I++, idx++) { 5360 if (idx!=3) 5361 Ops.push_back(*I); 5362 else 5363 Ops.push_back(DAG.getConstant(false, TLI.getPointerTy())); 5364 } 5365 DAG.UpdateNodeOperands(OpCall, Ops.begin(), Ops.size()); 5366 } else { 5367 // Look for tail call clobbered arguments. Emit a series of 5368 // copyto/copyfrom virtual register nodes to protect them. 5369 SmallVector<SDValue, 32> Ops; 5370 SDValue Chain = OpCall.getOperand(0), InFlag; 5371 unsigned idx=0; 5372 for(SDNode::op_iterator I = OpCall.getNode()->op_begin(), 5373 E = OpCall.getNode()->op_end(); I != E; I++, idx++) { 5374 SDValue Arg = *I; 5375 if (idx > 4 && (idx % 2)) { 5376 bool isByVal = cast<ARG_FLAGSSDNode>(OpCall.getOperand(idx+1))-> 5377 getArgFlags().isByVal(); 5378 MachineFunction &MF = DAG.getMachineFunction(); 5379 MachineFrameInfo *MFI = MF.getFrameInfo(); 5380 if (!isByVal && 5381 IsPossiblyOverwrittenArgumentOfTailCall(Arg, MFI)) { 5382 MVT VT = Arg.getValueType(); 5383 unsigned VReg = MF.getRegInfo(). 5384 createVirtualRegister(TLI.getRegClassFor(VT)); 5385 Chain = DAG.getCopyToReg(Chain, VReg, Arg, InFlag); 5386 InFlag = Chain.getValue(1); 5387 Arg = DAG.getCopyFromReg(Chain, VReg, VT, InFlag); 5388 Chain = Arg.getValue(1); 5389 InFlag = Arg.getValue(2); 5390 } 5391 } 5392 Ops.push_back(Arg); 5393 } 5394 // Link in chain of CopyTo/CopyFromReg. 5395 Ops[0] = Chain; 5396 DAG.UpdateNodeOperands(OpCall, Ops.begin(), Ops.size()); 5397 } 5398 } 5399 } 5400} 5401 5402/// Handle PHI nodes in successor blocks. Emit code into the SelectionDAG to 5403/// ensure constants are generated when needed. Remember the virtual registers 5404/// that need to be added to the Machine PHI nodes as input. We cannot just 5405/// directly add them, because expansion might result in multiple MBB's for one 5406/// BB. As such, the start of the BB might correspond to a different MBB than 5407/// the end. 5408/// 5409void 5410SelectionDAGISel::HandlePHINodesInSuccessorBlocks(BasicBlock *LLVMBB) { 5411 TerminatorInst *TI = LLVMBB->getTerminator(); 5412 5413 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled; 5414 5415 // Check successor nodes' PHI nodes that expect a constant to be available 5416 // from this block. 5417 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) { 5418 BasicBlock *SuccBB = TI->getSuccessor(succ); 5419 if (!isa<PHINode>(SuccBB->begin())) continue; 5420 MachineBasicBlock *SuccMBB = FuncInfo->MBBMap[SuccBB]; 5421 5422 // If this terminator has multiple identical successors (common for 5423 // switches), only handle each succ once. 5424 if (!SuccsHandled.insert(SuccMBB)) continue; 5425 5426 MachineBasicBlock::iterator MBBI = SuccMBB->begin(); 5427 PHINode *PN; 5428 5429 // At this point we know that there is a 1-1 correspondence between LLVM PHI 5430 // nodes and Machine PHI nodes, but the incoming operands have not been 5431 // emitted yet. 5432 for (BasicBlock::iterator I = SuccBB->begin(); 5433 (PN = dyn_cast<PHINode>(I)); ++I) { 5434 // Ignore dead phi's. 5435 if (PN->use_empty()) continue; 5436 5437 unsigned Reg; 5438 Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB); 5439 5440 if (Constant *C = dyn_cast<Constant>(PHIOp)) { 5441 unsigned &RegOut = SDL->ConstantsOut[C]; 5442 if (RegOut == 0) { 5443 RegOut = FuncInfo->CreateRegForValue(C); 5444 SDL->CopyValueToVirtualRegister(C, RegOut); 5445 } 5446 Reg = RegOut; 5447 } else { 5448 Reg = FuncInfo->ValueMap[PHIOp]; 5449 if (Reg == 0) { 5450 assert(isa<AllocaInst>(PHIOp) && 5451 FuncInfo->StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) && 5452 "Didn't codegen value into a register!??"); 5453 Reg = FuncInfo->CreateRegForValue(PHIOp); 5454 SDL->CopyValueToVirtualRegister(PHIOp, Reg); 5455 } 5456 } 5457 5458 // Remember that this register needs to added to the machine PHI node as 5459 // the input for this MBB. 5460 SmallVector<MVT, 4> ValueVTs; 5461 ComputeValueVTs(TLI, PN->getType(), ValueVTs); 5462 for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) { 5463 MVT VT = ValueVTs[vti]; 5464 unsigned NumRegisters = TLI.getNumRegisters(VT); 5465 for (unsigned i = 0, e = NumRegisters; i != e; ++i) 5466 SDL->PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg+i)); 5467 Reg += NumRegisters; 5468 } 5469 } 5470 } 5471 SDL->ConstantsOut.clear(); 5472 5473 // Lower the terminator after the copies are emitted. 5474 SDL->visit(*LLVMBB->getTerminator()); 5475} 5476 5477void SelectionDAGISel::SelectBasicBlock(BasicBlock *LLVMBB, 5478 BasicBlock::iterator Begin, 5479 BasicBlock::iterator End) { 5480 SDL->setCurrentBasicBlock(BB); 5481 5482 MachineModuleInfo *MMI = CurDAG->getMachineModuleInfo(); 5483 5484 if (MMI && BB->isLandingPad()) { 5485 // Add a label to mark the beginning of the landing pad. Deletion of the 5486 // landing pad can thus be detected via the MachineModuleInfo. 5487 unsigned LabelID = MMI->addLandingPad(BB); 5488 CurDAG->setRoot(CurDAG->getLabel(ISD::EH_LABEL, 5489 CurDAG->getEntryNode(), LabelID)); 5490 5491 // Mark exception register as live in. 5492 unsigned Reg = TLI.getExceptionAddressRegister(); 5493 if (Reg) BB->addLiveIn(Reg); 5494 5495 // Mark exception selector register as live in. 5496 Reg = TLI.getExceptionSelectorRegister(); 5497 if (Reg) BB->addLiveIn(Reg); 5498 5499 // FIXME: Hack around an exception handling flaw (PR1508): the personality 5500 // function and list of typeids logically belong to the invoke (or, if you 5501 // like, the basic block containing the invoke), and need to be associated 5502 // with it in the dwarf exception handling tables. Currently however the 5503 // information is provided by an intrinsic (eh.selector) that can be moved 5504 // to unexpected places by the optimizers: if the unwind edge is critical, 5505 // then breaking it can result in the intrinsics being in the successor of 5506 // the landing pad, not the landing pad itself. This results in exceptions 5507 // not being caught because no typeids are associated with the invoke. 5508 // This may not be the only way things can go wrong, but it is the only way 5509 // we try to work around for the moment. 5510 BranchInst *Br = dyn_cast<BranchInst>(LLVMBB->getTerminator()); 5511 5512 if (Br && Br->isUnconditional()) { // Critical edge? 5513 BasicBlock::iterator I, E; 5514 for (I = LLVMBB->begin(), E = --LLVMBB->end(); I != E; ++I) 5515 if (isSelector(I)) 5516 break; 5517 5518 if (I == E) 5519 // No catch info found - try to extract some from the successor. 5520 copyCatchInfo(Br->getSuccessor(0), LLVMBB, MMI, *FuncInfo); 5521 } 5522 } 5523 5524 // Lower all of the non-terminator instructions. 5525 for (BasicBlock::iterator I = Begin; I != End; ++I) 5526 if (!isa<TerminatorInst>(I)) 5527 SDL->visit(*I); 5528 5529 // Ensure that all instructions which are used outside of their defining 5530 // blocks are available as virtual registers. Invoke is handled elsewhere. 5531 for (BasicBlock::iterator I = Begin; I != End; ++I) 5532 if (!I->use_empty() && !isa<PHINode>(I) && !isa<InvokeInst>(I)) { 5533 DenseMap<const Value*,unsigned>::iterator VMI =FuncInfo->ValueMap.find(I); 5534 if (VMI != FuncInfo->ValueMap.end()) 5535 SDL->CopyValueToVirtualRegister(I, VMI->second); 5536 } 5537 5538 // Handle PHI nodes in successor blocks. 5539 if (End == LLVMBB->end()) 5540 HandlePHINodesInSuccessorBlocks(LLVMBB); 5541 5542 // Make sure the root of the DAG is up-to-date. 5543 CurDAG->setRoot(SDL->getControlRoot()); 5544 5545 // Check whether calls in this block are real tail calls. Fix up CALL nodes 5546 // with correct tailcall attribute so that the target can rely on the tailcall 5547 // attribute indicating whether the call is really eligible for tail call 5548 // optimization. 5549 CheckDAGForTailCallsAndFixThem(*CurDAG, TLI); 5550 5551 // Final step, emit the lowered DAG as machine code. 5552 CodeGenAndEmitDAG(); 5553 SDL->clear(); 5554} 5555 5556void SelectionDAGISel::ComputeLiveOutVRegInfo() { 5557 SmallPtrSet<SDNode*, 128> VisitedNodes; 5558 SmallVector<SDNode*, 128> Worklist; 5559 5560 Worklist.push_back(CurDAG->getRoot().getNode()); 5561 5562 APInt Mask; 5563 APInt KnownZero; 5564 APInt KnownOne; 5565 5566 while (!Worklist.empty()) { 5567 SDNode *N = Worklist.back(); 5568 Worklist.pop_back(); 5569 5570 // If we've already seen this node, ignore it. 5571 if (!VisitedNodes.insert(N)) 5572 continue; 5573 5574 // Otherwise, add all chain operands to the worklist. 5575 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) 5576 if (N->getOperand(i).getValueType() == MVT::Other) 5577 Worklist.push_back(N->getOperand(i).getNode()); 5578 5579 // If this is a CopyToReg with a vreg dest, process it. 5580 if (N->getOpcode() != ISD::CopyToReg) 5581 continue; 5582 5583 unsigned DestReg = cast<RegisterSDNode>(N->getOperand(1))->getReg(); 5584 if (!TargetRegisterInfo::isVirtualRegister(DestReg)) 5585 continue; 5586 5587 // Ignore non-scalar or non-integer values. 5588 SDValue Src = N->getOperand(2); 5589 MVT SrcVT = Src.getValueType(); 5590 if (!SrcVT.isInteger() || SrcVT.isVector()) 5591 continue; 5592 5593 unsigned NumSignBits = CurDAG->ComputeNumSignBits(Src); 5594 Mask = APInt::getAllOnesValue(SrcVT.getSizeInBits()); 5595 CurDAG->ComputeMaskedBits(Src, Mask, KnownZero, KnownOne); 5596 5597 // Only install this information if it tells us something. 5598 if (NumSignBits != 1 || KnownZero != 0 || KnownOne != 0) { 5599 DestReg -= TargetRegisterInfo::FirstVirtualRegister; 5600 FunctionLoweringInfo &FLI = CurDAG->getFunctionLoweringInfo(); 5601 if (DestReg >= FLI.LiveOutRegInfo.size()) 5602 FLI.LiveOutRegInfo.resize(DestReg+1); 5603 FunctionLoweringInfo::LiveOutInfo &LOI = FLI.LiveOutRegInfo[DestReg]; 5604 LOI.NumSignBits = NumSignBits; 5605 LOI.KnownOne = NumSignBits; 5606 LOI.KnownZero = NumSignBits; 5607 } 5608 } 5609} 5610 5611void SelectionDAGISel::CodeGenAndEmitDAG() { 5612 std::string GroupName; 5613 if (TimePassesIsEnabled) 5614 GroupName = "Instruction Selection and Scheduling"; 5615 std::string BlockName; 5616 if (ViewDAGCombine1 || ViewLegalizeTypesDAGs || ViewLegalizeDAGs || 5617 ViewDAGCombine2 || ViewISelDAGs || ViewSchedDAGs || ViewSUnitDAGs) 5618 BlockName = CurDAG->getMachineFunction().getFunction()->getName() + ':' + 5619 BB->getBasicBlock()->getName(); 5620 5621 DOUT << "Initial selection DAG:\n"; 5622 DEBUG(CurDAG->dump()); 5623 5624 if (ViewDAGCombine1) CurDAG->viewGraph("dag-combine1 input for " + BlockName); 5625 5626 // Run the DAG combiner in pre-legalize mode. 5627 if (TimePassesIsEnabled) { 5628 NamedRegionTimer T("DAG Combining 1", GroupName); 5629 CurDAG->Combine(false, *AA, Fast); 5630 } else { 5631 CurDAG->Combine(false, *AA, Fast); 5632 } 5633 5634 DOUT << "Optimized lowered selection DAG:\n"; 5635 DEBUG(CurDAG->dump()); 5636 5637 // Second step, hack on the DAG until it only uses operations and types that 5638 // the target supports. 5639 if (EnableLegalizeTypes) {// Enable this some day. 5640 if (ViewLegalizeTypesDAGs) CurDAG->viewGraph("legalize-types input for " + 5641 BlockName); 5642 5643 if (TimePassesIsEnabled) { 5644 NamedRegionTimer T("Type Legalization", GroupName); 5645 CurDAG->LegalizeTypes(); 5646 } else { 5647 CurDAG->LegalizeTypes(); 5648 } 5649 5650 DOUT << "Type-legalized selection DAG:\n"; 5651 DEBUG(CurDAG->dump()); 5652 5653 // TODO: enable a dag combine pass here. 5654 } 5655 5656 if (ViewLegalizeDAGs) CurDAG->viewGraph("legalize input for " + BlockName); 5657 5658 if (TimePassesIsEnabled) { 5659 NamedRegionTimer T("DAG Legalization", GroupName); 5660 CurDAG->Legalize(); 5661 } else { 5662 CurDAG->Legalize(); 5663 } 5664 5665 DOUT << "Legalized selection DAG:\n"; 5666 DEBUG(CurDAG->dump()); 5667 5668 if (ViewDAGCombine2) CurDAG->viewGraph("dag-combine2 input for " + BlockName); 5669 5670 // Run the DAG combiner in post-legalize mode. 5671 if (TimePassesIsEnabled) { 5672 NamedRegionTimer T("DAG Combining 2", GroupName); 5673 CurDAG->Combine(true, *AA, Fast); 5674 } else { 5675 CurDAG->Combine(true, *AA, Fast); 5676 } 5677 5678 DOUT << "Optimized legalized selection DAG:\n"; 5679 DEBUG(CurDAG->dump()); 5680 5681 if (ViewISelDAGs) CurDAG->viewGraph("isel input for " + BlockName); 5682 5683 if (!Fast && EnableValueProp) 5684 ComputeLiveOutVRegInfo(); 5685 5686 // Third, instruction select all of the operations to machine code, adding the 5687 // code to the MachineBasicBlock. 5688 if (TimePassesIsEnabled) { 5689 NamedRegionTimer T("Instruction Selection", GroupName); 5690 InstructionSelect(); 5691 } else { 5692 InstructionSelect(); 5693 } 5694 5695 DOUT << "Selected selection DAG:\n"; 5696 DEBUG(CurDAG->dump()); 5697 5698 if (ViewSchedDAGs) CurDAG->viewGraph("scheduler input for " + BlockName); 5699 5700 // Schedule machine code. 5701 ScheduleDAG *Scheduler; 5702 if (TimePassesIsEnabled) { 5703 NamedRegionTimer T("Instruction Scheduling", GroupName); 5704 Scheduler = Schedule(); 5705 } else { 5706 Scheduler = Schedule(); 5707 } 5708 5709 if (ViewSUnitDAGs) Scheduler->viewGraph(); 5710 5711 // Emit machine code to BB. This can change 'BB' to the last block being 5712 // inserted into. 5713 if (TimePassesIsEnabled) { 5714 NamedRegionTimer T("Instruction Creation", GroupName); 5715 BB = Scheduler->EmitSchedule(); 5716 } else { 5717 BB = Scheduler->EmitSchedule(); 5718 } 5719 5720 // Free the scheduler state. 5721 if (TimePassesIsEnabled) { 5722 NamedRegionTimer T("Instruction Scheduling Cleanup", GroupName); 5723 delete Scheduler; 5724 } else { 5725 delete Scheduler; 5726 } 5727 5728 DOUT << "Selected machine code:\n"; 5729 DEBUG(BB->dump()); 5730} 5731 5732void SelectionDAGISel::SelectAllBasicBlocks(Function &Fn, MachineFunction &MF) { 5733 for (Function::iterator I = Fn.begin(), E = Fn.end(); I != E; ++I) { 5734 BasicBlock *LLVMBB = &*I; 5735 BB = FuncInfo->MBBMap[LLVMBB]; 5736 5737 BasicBlock::iterator Begin = LLVMBB->begin(); 5738 BasicBlock::iterator End = LLVMBB->end(); 5739 5740 // Lower any arguments needed in this block if this is the entry block. 5741 if (LLVMBB == &Fn.getEntryBlock()) 5742 LowerArguments(LLVMBB); 5743 5744 // Before doing SelectionDAG ISel, see if FastISel has been requested. 5745 // FastISel doesn't support EH landing pads, which require special handling. 5746 if (EnableFastISel && !BB->isLandingPad()) { 5747 if (FastISel *F = TLI.createFastISel(*FuncInfo->MF)) { 5748 // Emit code for any incoming arguments. This must happen before 5749 // beginning FastISel on the entry block. 5750 if (LLVMBB == &Fn.getEntryBlock()) { 5751 CurDAG->setRoot(SDL->getControlRoot()); 5752 CodeGenAndEmitDAG(); 5753 SDL->clear(); 5754 } 5755 // Do FastISel on as many instructions as possible. 5756 while (Begin != End) { 5757 Begin = F->SelectInstructions(Begin, End, FuncInfo->ValueMap, 5758 FuncInfo->MBBMap, BB); 5759 5760 // If the "fast" selector selected the entire block, we're done. 5761 if (Begin == End) 5762 break; 5763 5764 // Next, try calling the target to attempt to handle the instruction. 5765 if (F->TargetSelectInstruction(Begin, FuncInfo->ValueMap, 5766 FuncInfo->MBBMap, BB)) 5767 continue; 5768 5769 // Handle certain instructions as single-LLVM-Instruction blocks. 5770 if (isa<CallInst>(Begin) || isa<LoadInst>(Begin) || 5771 isa<StoreInst>(Begin)) { 5772 if (Begin->getType() != Type::VoidTy) { 5773 unsigned &R = FuncInfo->ValueMap[Begin]; 5774 if (!R) 5775 R = FuncInfo->CreateRegForValue(Begin); 5776 } 5777 5778 SelectBasicBlock(LLVMBB, Begin, next(Begin)); 5779 ++Begin; 5780 continue; 5781 } 5782 5783 if (!DisableFastISelAbort && 5784 // For now, don't abort on non-conditional-branch terminators. 5785 (!isa<TerminatorInst>(Begin) || 5786 (isa<BranchInst>(Begin) && 5787 cast<BranchInst>(Begin)->isUnconditional()))) { 5788 // The "fast" selector couldn't handle something and bailed. 5789 // For the purpose of debugging, just abort. 5790#ifndef NDEBUG 5791 Begin->dump(); 5792#endif 5793 assert(0 && "FastISel didn't select the entire block"); 5794 } 5795 break; 5796 } 5797 delete F; 5798 } 5799 } 5800 5801 // Run SelectionDAG instruction selection on the remainder of the block 5802 // not handled by FastISel. If FastISel is not run, this is the entire 5803 // block. If FastISel is run and happens to handle all of the 5804 // LLVM Instructions in the block, [Begin,End) will be an empty range, 5805 // but we still need to run this so that 5806 // HandlePHINodesInSuccessorBlocks is called and any resulting code 5807 // is emitted. 5808 SelectBasicBlock(LLVMBB, Begin, End); 5809 5810 FinishBasicBlock(); 5811 } 5812} 5813 5814void 5815SelectionDAGISel::FinishBasicBlock() { 5816 5817 // Perform target specific isel post processing. 5818 InstructionSelectPostProcessing(); 5819 5820 DOUT << "Target-post-processed machine code:\n"; 5821 DEBUG(BB->dump()); 5822 5823 DOUT << "Total amount of phi nodes to update: " 5824 << SDL->PHINodesToUpdate.size() << "\n"; 5825 DEBUG(for (unsigned i = 0, e = SDL->PHINodesToUpdate.size(); i != e; ++i) 5826 DOUT << "Node " << i << " : (" << SDL->PHINodesToUpdate[i].first 5827 << ", " << SDL->PHINodesToUpdate[i].second << ")\n";); 5828 5829 // Next, now that we know what the last MBB the LLVM BB expanded is, update 5830 // PHI nodes in successors. 5831 if (SDL->SwitchCases.empty() && 5832 SDL->JTCases.empty() && 5833 SDL->BitTestCases.empty()) { 5834 for (unsigned i = 0, e = SDL->PHINodesToUpdate.size(); i != e; ++i) { 5835 MachineInstr *PHI = SDL->PHINodesToUpdate[i].first; 5836 assert(PHI->getOpcode() == TargetInstrInfo::PHI && 5837 "This is not a machine PHI node that we are updating!"); 5838 PHI->addOperand(MachineOperand::CreateReg(SDL->PHINodesToUpdate[i].second, 5839 false)); 5840 PHI->addOperand(MachineOperand::CreateMBB(BB)); 5841 } 5842 SDL->PHINodesToUpdate.clear(); 5843 return; 5844 } 5845 5846 for (unsigned i = 0, e = SDL->BitTestCases.size(); i != e; ++i) { 5847 // Lower header first, if it wasn't already lowered 5848 if (!SDL->BitTestCases[i].Emitted) { 5849 // Set the current basic block to the mbb we wish to insert the code into 5850 BB = SDL->BitTestCases[i].Parent; 5851 SDL->setCurrentBasicBlock(BB); 5852 // Emit the code 5853 SDL->visitBitTestHeader(SDL->BitTestCases[i]); 5854 CurDAG->setRoot(SDL->getRoot()); 5855 CodeGenAndEmitDAG(); 5856 SDL->clear(); 5857 } 5858 5859 for (unsigned j = 0, ej = SDL->BitTestCases[i].Cases.size(); j != ej; ++j) { 5860 // Set the current basic block to the mbb we wish to insert the code into 5861 BB = SDL->BitTestCases[i].Cases[j].ThisBB; 5862 SDL->setCurrentBasicBlock(BB); 5863 // Emit the code 5864 if (j+1 != ej) 5865 SDL->visitBitTestCase(SDL->BitTestCases[i].Cases[j+1].ThisBB, 5866 SDL->BitTestCases[i].Reg, 5867 SDL->BitTestCases[i].Cases[j]); 5868 else 5869 SDL->visitBitTestCase(SDL->BitTestCases[i].Default, 5870 SDL->BitTestCases[i].Reg, 5871 SDL->BitTestCases[i].Cases[j]); 5872 5873 5874 CurDAG->setRoot(SDL->getRoot()); 5875 CodeGenAndEmitDAG(); 5876 SDL->clear(); 5877 } 5878 5879 // Update PHI Nodes 5880 for (unsigned pi = 0, pe = SDL->PHINodesToUpdate.size(); pi != pe; ++pi) { 5881 MachineInstr *PHI = SDL->PHINodesToUpdate[pi].first; 5882 MachineBasicBlock *PHIBB = PHI->getParent(); 5883 assert(PHI->getOpcode() == TargetInstrInfo::PHI && 5884 "This is not a machine PHI node that we are updating!"); 5885 // This is "default" BB. We have two jumps to it. From "header" BB and 5886 // from last "case" BB. 5887 if (PHIBB == SDL->BitTestCases[i].Default) { 5888 PHI->addOperand(MachineOperand::CreateReg(SDL->PHINodesToUpdate[pi].second, 5889 false)); 5890 PHI->addOperand(MachineOperand::CreateMBB(SDL->BitTestCases[i].Parent)); 5891 PHI->addOperand(MachineOperand::CreateReg(SDL->PHINodesToUpdate[pi].second, 5892 false)); 5893 PHI->addOperand(MachineOperand::CreateMBB(SDL->BitTestCases[i].Cases. 5894 back().ThisBB)); 5895 } 5896 // One of "cases" BB. 5897 for (unsigned j = 0, ej = SDL->BitTestCases[i].Cases.size(); 5898 j != ej; ++j) { 5899 MachineBasicBlock* cBB = SDL->BitTestCases[i].Cases[j].ThisBB; 5900 if (cBB->succ_end() != 5901 std::find(cBB->succ_begin(),cBB->succ_end(), PHIBB)) { 5902 PHI->addOperand(MachineOperand::CreateReg(SDL->PHINodesToUpdate[pi].second, 5903 false)); 5904 PHI->addOperand(MachineOperand::CreateMBB(cBB)); 5905 } 5906 } 5907 } 5908 } 5909 SDL->BitTestCases.clear(); 5910 5911 // If the JumpTable record is filled in, then we need to emit a jump table. 5912 // Updating the PHI nodes is tricky in this case, since we need to determine 5913 // whether the PHI is a successor of the range check MBB or the jump table MBB 5914 for (unsigned i = 0, e = SDL->JTCases.size(); i != e; ++i) { 5915 // Lower header first, if it wasn't already lowered 5916 if (!SDL->JTCases[i].first.Emitted) { 5917 // Set the current basic block to the mbb we wish to insert the code into 5918 BB = SDL->JTCases[i].first.HeaderBB; 5919 SDL->setCurrentBasicBlock(BB); 5920 // Emit the code 5921 SDL->visitJumpTableHeader(SDL->JTCases[i].second, SDL->JTCases[i].first); 5922 CurDAG->setRoot(SDL->getRoot()); 5923 CodeGenAndEmitDAG(); 5924 SDL->clear(); 5925 } 5926 5927 // Set the current basic block to the mbb we wish to insert the code into 5928 BB = SDL->JTCases[i].second.MBB; 5929 SDL->setCurrentBasicBlock(BB); 5930 // Emit the code 5931 SDL->visitJumpTable(SDL->JTCases[i].second); 5932 CurDAG->setRoot(SDL->getRoot()); 5933 CodeGenAndEmitDAG(); 5934 SDL->clear(); 5935 5936 // Update PHI Nodes 5937 for (unsigned pi = 0, pe = SDL->PHINodesToUpdate.size(); pi != pe; ++pi) { 5938 MachineInstr *PHI = SDL->PHINodesToUpdate[pi].first; 5939 MachineBasicBlock *PHIBB = PHI->getParent(); 5940 assert(PHI->getOpcode() == TargetInstrInfo::PHI && 5941 "This is not a machine PHI node that we are updating!"); 5942 // "default" BB. We can go there only from header BB. 5943 if (PHIBB == SDL->JTCases[i].second.Default) { 5944 PHI->addOperand(MachineOperand::CreateReg(SDL->PHINodesToUpdate[pi].second, 5945 false)); 5946 PHI->addOperand(MachineOperand::CreateMBB(SDL->JTCases[i].first.HeaderBB)); 5947 } 5948 // JT BB. Just iterate over successors here 5949 if (BB->succ_end() != std::find(BB->succ_begin(),BB->succ_end(), PHIBB)) { 5950 PHI->addOperand(MachineOperand::CreateReg(SDL->PHINodesToUpdate[pi].second, 5951 false)); 5952 PHI->addOperand(MachineOperand::CreateMBB(BB)); 5953 } 5954 } 5955 } 5956 SDL->JTCases.clear(); 5957 5958 // If the switch block involved a branch to one of the actual successors, we 5959 // need to update PHI nodes in that block. 5960 for (unsigned i = 0, e = SDL->PHINodesToUpdate.size(); i != e; ++i) { 5961 MachineInstr *PHI = SDL->PHINodesToUpdate[i].first; 5962 assert(PHI->getOpcode() == TargetInstrInfo::PHI && 5963 "This is not a machine PHI node that we are updating!"); 5964 if (BB->isSuccessor(PHI->getParent())) { 5965 PHI->addOperand(MachineOperand::CreateReg(SDL->PHINodesToUpdate[i].second, 5966 false)); 5967 PHI->addOperand(MachineOperand::CreateMBB(BB)); 5968 } 5969 } 5970 5971 // If we generated any switch lowering information, build and codegen any 5972 // additional DAGs necessary. 5973 for (unsigned i = 0, e = SDL->SwitchCases.size(); i != e; ++i) { 5974 // Set the current basic block to the mbb we wish to insert the code into 5975 BB = SDL->SwitchCases[i].ThisBB; 5976 SDL->setCurrentBasicBlock(BB); 5977 5978 // Emit the code 5979 SDL->visitSwitchCase(SDL->SwitchCases[i]); 5980 CurDAG->setRoot(SDL->getRoot()); 5981 CodeGenAndEmitDAG(); 5982 SDL->clear(); 5983 5984 // Handle any PHI nodes in successors of this chunk, as if we were coming 5985 // from the original BB before switch expansion. Note that PHI nodes can 5986 // occur multiple times in PHINodesToUpdate. We have to be very careful to 5987 // handle them the right number of times. 5988 while ((BB = SDL->SwitchCases[i].TrueBB)) { // Handle LHS and RHS. 5989 for (MachineBasicBlock::iterator Phi = BB->begin(); 5990 Phi != BB->end() && Phi->getOpcode() == TargetInstrInfo::PHI; ++Phi){ 5991 // This value for this PHI node is recorded in PHINodesToUpdate, get it. 5992 for (unsigned pn = 0; ; ++pn) { 5993 assert(pn != SDL->PHINodesToUpdate.size() && 5994 "Didn't find PHI entry!"); 5995 if (SDL->PHINodesToUpdate[pn].first == Phi) { 5996 Phi->addOperand(MachineOperand::CreateReg(SDL->PHINodesToUpdate[pn]. 5997 second, false)); 5998 Phi->addOperand(MachineOperand::CreateMBB(SDL->SwitchCases[i].ThisBB)); 5999 break; 6000 } 6001 } 6002 } 6003 6004 // Don't process RHS if same block as LHS. 6005 if (BB == SDL->SwitchCases[i].FalseBB) 6006 SDL->SwitchCases[i].FalseBB = 0; 6007 6008 // If we haven't handled the RHS, do so now. Otherwise, we're done. 6009 SDL->SwitchCases[i].TrueBB = SDL->SwitchCases[i].FalseBB; 6010 SDL->SwitchCases[i].FalseBB = 0; 6011 } 6012 assert(SDL->SwitchCases[i].TrueBB == 0 && SDL->SwitchCases[i].FalseBB == 0); 6013 } 6014 SDL->SwitchCases.clear(); 6015 6016 SDL->PHINodesToUpdate.clear(); 6017} 6018 6019 6020/// Schedule - Pick a safe ordering for instructions for each 6021/// target node in the graph. 6022/// 6023ScheduleDAG *SelectionDAGISel::Schedule() { 6024 RegisterScheduler::FunctionPassCtor Ctor = RegisterScheduler::getDefault(); 6025 6026 if (!Ctor) { 6027 Ctor = ISHeuristic; 6028 RegisterScheduler::setDefault(Ctor); 6029 } 6030 6031 ScheduleDAG *Scheduler = Ctor(this, CurDAG, BB, Fast); 6032 Scheduler->Run(); 6033 6034 return Scheduler; 6035} 6036 6037 6038HazardRecognizer *SelectionDAGISel::CreateTargetHazardRecognizer() { 6039 return new HazardRecognizer(); 6040} 6041 6042//===----------------------------------------------------------------------===// 6043// Helper functions used by the generated instruction selector. 6044//===----------------------------------------------------------------------===// 6045// Calls to these methods are generated by tblgen. 6046 6047/// CheckAndMask - The isel is trying to match something like (and X, 255). If 6048/// the dag combiner simplified the 255, we still want to match. RHS is the 6049/// actual value in the DAG on the RHS of an AND, and DesiredMaskS is the value 6050/// specified in the .td file (e.g. 255). 6051bool SelectionDAGISel::CheckAndMask(SDValue LHS, ConstantSDNode *RHS, 6052 int64_t DesiredMaskS) const { 6053 const APInt &ActualMask = RHS->getAPIntValue(); 6054 const APInt &DesiredMask = APInt(LHS.getValueSizeInBits(), DesiredMaskS); 6055 6056 // If the actual mask exactly matches, success! 6057 if (ActualMask == DesiredMask) 6058 return true; 6059 6060 // If the actual AND mask is allowing unallowed bits, this doesn't match. 6061 if (ActualMask.intersects(~DesiredMask)) 6062 return false; 6063 6064 // Otherwise, the DAG Combiner may have proven that the value coming in is 6065 // either already zero or is not demanded. Check for known zero input bits. 6066 APInt NeededMask = DesiredMask & ~ActualMask; 6067 if (CurDAG->MaskedValueIsZero(LHS, NeededMask)) 6068 return true; 6069 6070 // TODO: check to see if missing bits are just not demanded. 6071 6072 // Otherwise, this pattern doesn't match. 6073 return false; 6074} 6075 6076/// CheckOrMask - The isel is trying to match something like (or X, 255). If 6077/// the dag combiner simplified the 255, we still want to match. RHS is the 6078/// actual value in the DAG on the RHS of an OR, and DesiredMaskS is the value 6079/// specified in the .td file (e.g. 255). 6080bool SelectionDAGISel::CheckOrMask(SDValue LHS, ConstantSDNode *RHS, 6081 int64_t DesiredMaskS) const { 6082 const APInt &ActualMask = RHS->getAPIntValue(); 6083 const APInt &DesiredMask = APInt(LHS.getValueSizeInBits(), DesiredMaskS); 6084 6085 // If the actual mask exactly matches, success! 6086 if (ActualMask == DesiredMask) 6087 return true; 6088 6089 // If the actual AND mask is allowing unallowed bits, this doesn't match. 6090 if (ActualMask.intersects(~DesiredMask)) 6091 return false; 6092 6093 // Otherwise, the DAG Combiner may have proven that the value coming in is 6094 // either already zero or is not demanded. Check for known zero input bits. 6095 APInt NeededMask = DesiredMask & ~ActualMask; 6096 6097 APInt KnownZero, KnownOne; 6098 CurDAG->ComputeMaskedBits(LHS, NeededMask, KnownZero, KnownOne); 6099 6100 // If all the missing bits in the or are already known to be set, match! 6101 if ((NeededMask & KnownOne) == NeededMask) 6102 return true; 6103 6104 // TODO: check to see if missing bits are just not demanded. 6105 6106 // Otherwise, this pattern doesn't match. 6107 return false; 6108} 6109 6110 6111/// SelectInlineAsmMemoryOperands - Calls to this are automatically generated 6112/// by tblgen. Others should not call it. 6113void SelectionDAGISel:: 6114SelectInlineAsmMemoryOperands(std::vector<SDValue> &Ops) { 6115 std::vector<SDValue> InOps; 6116 std::swap(InOps, Ops); 6117 6118 Ops.push_back(InOps[0]); // input chain. 6119 Ops.push_back(InOps[1]); // input asm string. 6120 6121 unsigned i = 2, e = InOps.size(); 6122 if (InOps[e-1].getValueType() == MVT::Flag) 6123 --e; // Don't process a flag operand if it is here. 6124 6125 while (i != e) { 6126 unsigned Flags = cast<ConstantSDNode>(InOps[i])->getValue(); 6127 if ((Flags & 7) != 4 /*MEM*/) { 6128 // Just skip over this operand, copying the operands verbatim. 6129 Ops.insert(Ops.end(), InOps.begin()+i, InOps.begin()+i+(Flags >> 3) + 1); 6130 i += (Flags >> 3) + 1; 6131 } else { 6132 assert((Flags >> 3) == 1 && "Memory operand with multiple values?"); 6133 // Otherwise, this is a memory operand. Ask the target to select it. 6134 std::vector<SDValue> SelOps; 6135 if (SelectInlineAsmMemoryOperand(InOps[i+1], 'm', SelOps)) { 6136 cerr << "Could not match memory address. Inline asm failure!\n"; 6137 exit(1); 6138 } 6139 6140 // Add this to the output node. 6141 MVT IntPtrTy = CurDAG->getTargetLoweringInfo().getPointerTy(); 6142 Ops.push_back(CurDAG->getTargetConstant(4/*MEM*/ | (SelOps.size() << 3), 6143 IntPtrTy)); 6144 Ops.insert(Ops.end(), SelOps.begin(), SelOps.end()); 6145 i += 2; 6146 } 6147 } 6148 6149 // Add the flag input back if present. 6150 if (e != InOps.size()) 6151 Ops.push_back(InOps.back()); 6152} 6153 6154char SelectionDAGISel::ID = 0; 6155