SelectionDAGISel.cpp revision 693163e74d047a05799eaaf619ba4972fdc3a0e9
1//===-- SelectionDAGISel.cpp - Implement the SelectionDAGISel class -------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file was developed by the LLVM research group and is distributed under 6// the University of Illinois Open Source License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This implements the SelectionDAGISel class. 11// 12//===----------------------------------------------------------------------===// 13 14#define DEBUG_TYPE "isel" 15#include "llvm/CodeGen/SelectionDAGISel.h" 16#include "llvm/CodeGen/ScheduleDAG.h" 17#include "llvm/CallingConv.h" 18#include "llvm/Constants.h" 19#include "llvm/DerivedTypes.h" 20#include "llvm/Function.h" 21#include "llvm/GlobalVariable.h" 22#include "llvm/InlineAsm.h" 23#include "llvm/Instructions.h" 24#include "llvm/Intrinsics.h" 25#include "llvm/IntrinsicInst.h" 26#include "llvm/CodeGen/IntrinsicLowering.h" 27#include "llvm/CodeGen/MachineDebugInfo.h" 28#include "llvm/CodeGen/MachineFunction.h" 29#include "llvm/CodeGen/MachineFrameInfo.h" 30#include "llvm/CodeGen/MachineJumpTableInfo.h" 31#include "llvm/CodeGen/MachineInstrBuilder.h" 32#include "llvm/CodeGen/SchedulerRegistry.h" 33#include "llvm/CodeGen/SelectionDAG.h" 34#include "llvm/CodeGen/SSARegMap.h" 35#include "llvm/Target/MRegisterInfo.h" 36#include "llvm/Target/TargetData.h" 37#include "llvm/Target/TargetFrameInfo.h" 38#include "llvm/Target/TargetInstrInfo.h" 39#include "llvm/Target/TargetLowering.h" 40#include "llvm/Target/TargetMachine.h" 41#include "llvm/Target/TargetOptions.h" 42#include "llvm/Transforms/Utils/BasicBlockUtils.h" 43#include "llvm/Support/MathExtras.h" 44#include "llvm/Support/Debug.h" 45#include "llvm/Support/Compiler.h" 46#include <map> 47#include <set> 48#include <iostream> 49#include <algorithm> 50using namespace llvm; 51 52#ifndef NDEBUG 53static cl::opt<bool> 54ViewISelDAGs("view-isel-dags", cl::Hidden, 55 cl::desc("Pop up a window to show isel dags as they are selected")); 56static cl::opt<bool> 57ViewSchedDAGs("view-sched-dags", cl::Hidden, 58 cl::desc("Pop up a window to show sched dags as they are processed")); 59#else 60static const bool ViewISelDAGs = 0, ViewSchedDAGs = 0; 61#endif 62 63 64//===---------------------------------------------------------------------===// 65/// 66/// RegisterScheduler class - Track the registration of instruction schedulers. 67/// 68//===---------------------------------------------------------------------===// 69MachinePassRegistry RegisterScheduler::Registry; 70 71//===---------------------------------------------------------------------===// 72/// 73/// ISHeuristic command line option for instruction schedulers. 74/// 75//===---------------------------------------------------------------------===// 76namespace { 77 cl::opt<RegisterScheduler::FunctionPassCtor, false, 78 RegisterPassParser<RegisterScheduler> > 79 ISHeuristic("sched", 80 cl::init(&createDefaultScheduler), 81 cl::desc("Instruction schedulers available:")); 82 83 static RegisterScheduler 84 defaultListDAGScheduler("default", " Best scheduler for the target", 85 createDefaultScheduler); 86} // namespace 87 88namespace { 89 /// RegsForValue - This struct represents the physical registers that a 90 /// particular value is assigned and the type information about the value. 91 /// This is needed because values can be promoted into larger registers and 92 /// expanded into multiple smaller registers than the value. 93 struct VISIBILITY_HIDDEN RegsForValue { 94 /// Regs - This list hold the register (for legal and promoted values) 95 /// or register set (for expanded values) that the value should be assigned 96 /// to. 97 std::vector<unsigned> Regs; 98 99 /// RegVT - The value type of each register. 100 /// 101 MVT::ValueType RegVT; 102 103 /// ValueVT - The value type of the LLVM value, which may be promoted from 104 /// RegVT or made from merging the two expanded parts. 105 MVT::ValueType ValueVT; 106 107 RegsForValue() : RegVT(MVT::Other), ValueVT(MVT::Other) {} 108 109 RegsForValue(unsigned Reg, MVT::ValueType regvt, MVT::ValueType valuevt) 110 : RegVT(regvt), ValueVT(valuevt) { 111 Regs.push_back(Reg); 112 } 113 RegsForValue(const std::vector<unsigned> ®s, 114 MVT::ValueType regvt, MVT::ValueType valuevt) 115 : Regs(regs), RegVT(regvt), ValueVT(valuevt) { 116 } 117 118 /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from 119 /// this value and returns the result as a ValueVT value. This uses 120 /// Chain/Flag as the input and updates them for the output Chain/Flag. 121 SDOperand getCopyFromRegs(SelectionDAG &DAG, 122 SDOperand &Chain, SDOperand &Flag) const; 123 124 /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the 125 /// specified value into the registers specified by this object. This uses 126 /// Chain/Flag as the input and updates them for the output Chain/Flag. 127 void getCopyToRegs(SDOperand Val, SelectionDAG &DAG, 128 SDOperand &Chain, SDOperand &Flag, 129 MVT::ValueType PtrVT) const; 130 131 /// AddInlineAsmOperands - Add this value to the specified inlineasm node 132 /// operand list. This adds the code marker and includes the number of 133 /// values added into it. 134 void AddInlineAsmOperands(unsigned Code, SelectionDAG &DAG, 135 std::vector<SDOperand> &Ops) const; 136 }; 137} 138 139namespace llvm { 140 //===--------------------------------------------------------------------===// 141 /// createDefaultScheduler - This creates an instruction scheduler appropriate 142 /// for the target. 143 ScheduleDAG* createDefaultScheduler(SelectionDAGISel *IS, 144 SelectionDAG *DAG, 145 MachineBasicBlock *BB) { 146 TargetLowering &TLI = IS->getTargetLowering(); 147 148 if (TLI.getSchedulingPreference() == TargetLowering::SchedulingForLatency) { 149 return createTDListDAGScheduler(IS, DAG, BB); 150 } else { 151 assert(TLI.getSchedulingPreference() == 152 TargetLowering::SchedulingForRegPressure && "Unknown sched type!"); 153 return createBURRListDAGScheduler(IS, DAG, BB); 154 } 155 } 156 157 158 //===--------------------------------------------------------------------===// 159 /// FunctionLoweringInfo - This contains information that is global to a 160 /// function that is used when lowering a region of the function. 161 class FunctionLoweringInfo { 162 public: 163 TargetLowering &TLI; 164 Function &Fn; 165 MachineFunction &MF; 166 SSARegMap *RegMap; 167 168 FunctionLoweringInfo(TargetLowering &TLI, Function &Fn,MachineFunction &MF); 169 170 /// MBBMap - A mapping from LLVM basic blocks to their machine code entry. 171 std::map<const BasicBlock*, MachineBasicBlock *> MBBMap; 172 173 /// ValueMap - Since we emit code for the function a basic block at a time, 174 /// we must remember which virtual registers hold the values for 175 /// cross-basic-block values. 176 std::map<const Value*, unsigned> ValueMap; 177 178 /// StaticAllocaMap - Keep track of frame indices for fixed sized allocas in 179 /// the entry block. This allows the allocas to be efficiently referenced 180 /// anywhere in the function. 181 std::map<const AllocaInst*, int> StaticAllocaMap; 182 183 unsigned MakeReg(MVT::ValueType VT) { 184 return RegMap->createVirtualRegister(TLI.getRegClassFor(VT)); 185 } 186 187 unsigned CreateRegForValue(const Value *V); 188 189 unsigned InitializeRegForValue(const Value *V) { 190 unsigned &R = ValueMap[V]; 191 assert(R == 0 && "Already initialized this value register!"); 192 return R = CreateRegForValue(V); 193 } 194 }; 195} 196 197/// isUsedOutsideOfDefiningBlock - Return true if this instruction is used by 198/// PHI nodes or outside of the basic block that defines it, or used by a 199/// switch instruction, which may expand to multiple basic blocks. 200static bool isUsedOutsideOfDefiningBlock(Instruction *I) { 201 if (isa<PHINode>(I)) return true; 202 BasicBlock *BB = I->getParent(); 203 for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E; ++UI) 204 if (cast<Instruction>(*UI)->getParent() != BB || isa<PHINode>(*UI) || 205 isa<SwitchInst>(*UI)) 206 return true; 207 return false; 208} 209 210/// isOnlyUsedInEntryBlock - If the specified argument is only used in the 211/// entry block, return true. This includes arguments used by switches, since 212/// the switch may expand into multiple basic blocks. 213static bool isOnlyUsedInEntryBlock(Argument *A) { 214 BasicBlock *Entry = A->getParent()->begin(); 215 for (Value::use_iterator UI = A->use_begin(), E = A->use_end(); UI != E; ++UI) 216 if (cast<Instruction>(*UI)->getParent() != Entry || isa<SwitchInst>(*UI)) 217 return false; // Use not in entry block. 218 return true; 219} 220 221FunctionLoweringInfo::FunctionLoweringInfo(TargetLowering &tli, 222 Function &fn, MachineFunction &mf) 223 : TLI(tli), Fn(fn), MF(mf), RegMap(MF.getSSARegMap()) { 224 225 // Create a vreg for each argument register that is not dead and is used 226 // outside of the entry block for the function. 227 for (Function::arg_iterator AI = Fn.arg_begin(), E = Fn.arg_end(); 228 AI != E; ++AI) 229 if (!isOnlyUsedInEntryBlock(AI)) 230 InitializeRegForValue(AI); 231 232 // Initialize the mapping of values to registers. This is only set up for 233 // instruction values that are used outside of the block that defines 234 // them. 235 Function::iterator BB = Fn.begin(), EB = Fn.end(); 236 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) 237 if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) 238 if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(AI->getArraySize())) { 239 const Type *Ty = AI->getAllocatedType(); 240 uint64_t TySize = TLI.getTargetData()->getTypeSize(Ty); 241 unsigned Align = 242 std::max((unsigned)TLI.getTargetData()->getTypeAlignment(Ty), 243 AI->getAlignment()); 244 245 // If the alignment of the value is smaller than the size of the value, 246 // and if the size of the value is particularly small (<= 8 bytes), 247 // round up to the size of the value for potentially better performance. 248 // 249 // FIXME: This could be made better with a preferred alignment hook in 250 // TargetData. It serves primarily to 8-byte align doubles for X86. 251 if (Align < TySize && TySize <= 8) Align = TySize; 252 TySize *= CUI->getValue(); // Get total allocated size. 253 if (TySize == 0) TySize = 1; // Don't create zero-sized stack objects. 254 StaticAllocaMap[AI] = 255 MF.getFrameInfo()->CreateStackObject((unsigned)TySize, Align); 256 } 257 258 for (; BB != EB; ++BB) 259 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) 260 if (!I->use_empty() && isUsedOutsideOfDefiningBlock(I)) 261 if (!isa<AllocaInst>(I) || 262 !StaticAllocaMap.count(cast<AllocaInst>(I))) 263 InitializeRegForValue(I); 264 265 // Create an initial MachineBasicBlock for each LLVM BasicBlock in F. This 266 // also creates the initial PHI MachineInstrs, though none of the input 267 // operands are populated. 268 for (BB = Fn.begin(), EB = Fn.end(); BB != EB; ++BB) { 269 MachineBasicBlock *MBB = new MachineBasicBlock(BB); 270 MBBMap[BB] = MBB; 271 MF.getBasicBlockList().push_back(MBB); 272 273 // Create Machine PHI nodes for LLVM PHI nodes, lowering them as 274 // appropriate. 275 PHINode *PN; 276 for (BasicBlock::iterator I = BB->begin(); 277 (PN = dyn_cast<PHINode>(I)); ++I) 278 if (!PN->use_empty()) { 279 MVT::ValueType VT = TLI.getValueType(PN->getType()); 280 unsigned NumElements; 281 if (VT != MVT::Vector) 282 NumElements = TLI.getNumElements(VT); 283 else { 284 MVT::ValueType VT1,VT2; 285 NumElements = 286 TLI.getPackedTypeBreakdown(cast<PackedType>(PN->getType()), 287 VT1, VT2); 288 } 289 unsigned PHIReg = ValueMap[PN]; 290 assert(PHIReg &&"PHI node does not have an assigned virtual register!"); 291 for (unsigned i = 0; i != NumElements; ++i) 292 BuildMI(MBB, TargetInstrInfo::PHI, PN->getNumOperands(), PHIReg+i); 293 } 294 } 295} 296 297/// CreateRegForValue - Allocate the appropriate number of virtual registers of 298/// the correctly promoted or expanded types. Assign these registers 299/// consecutive vreg numbers and return the first assigned number. 300unsigned FunctionLoweringInfo::CreateRegForValue(const Value *V) { 301 MVT::ValueType VT = TLI.getValueType(V->getType()); 302 303 // The number of multiples of registers that we need, to, e.g., split up 304 // a <2 x int64> -> 4 x i32 registers. 305 unsigned NumVectorRegs = 1; 306 307 // If this is a packed type, figure out what type it will decompose into 308 // and how many of the elements it will use. 309 if (VT == MVT::Vector) { 310 const PackedType *PTy = cast<PackedType>(V->getType()); 311 unsigned NumElts = PTy->getNumElements(); 312 MVT::ValueType EltTy = TLI.getValueType(PTy->getElementType()); 313 314 // Divide the input until we get to a supported size. This will always 315 // end with a scalar if the target doesn't support vectors. 316 while (NumElts > 1 && !TLI.isTypeLegal(getVectorType(EltTy, NumElts))) { 317 NumElts >>= 1; 318 NumVectorRegs <<= 1; 319 } 320 if (NumElts == 1) 321 VT = EltTy; 322 else 323 VT = getVectorType(EltTy, NumElts); 324 } 325 326 // The common case is that we will only create one register for this 327 // value. If we have that case, create and return the virtual register. 328 unsigned NV = TLI.getNumElements(VT); 329 if (NV == 1) { 330 // If we are promoting this value, pick the next largest supported type. 331 MVT::ValueType PromotedType = TLI.getTypeToTransformTo(VT); 332 unsigned Reg = MakeReg(PromotedType); 333 // If this is a vector of supported or promoted types (e.g. 4 x i16), 334 // create all of the registers. 335 for (unsigned i = 1; i != NumVectorRegs; ++i) 336 MakeReg(PromotedType); 337 return Reg; 338 } 339 340 // If this value is represented with multiple target registers, make sure 341 // to create enough consecutive registers of the right (smaller) type. 342 unsigned NT = VT-1; // Find the type to use. 343 while (TLI.getNumElements((MVT::ValueType)NT) != 1) 344 --NT; 345 346 unsigned R = MakeReg((MVT::ValueType)NT); 347 for (unsigned i = 1; i != NV*NumVectorRegs; ++i) 348 MakeReg((MVT::ValueType)NT); 349 return R; 350} 351 352//===----------------------------------------------------------------------===// 353/// SelectionDAGLowering - This is the common target-independent lowering 354/// implementation that is parameterized by a TargetLowering object. 355/// Also, targets can overload any lowering method. 356/// 357namespace llvm { 358class SelectionDAGLowering { 359 MachineBasicBlock *CurMBB; 360 361 std::map<const Value*, SDOperand> NodeMap; 362 363 /// PendingLoads - Loads are not emitted to the program immediately. We bunch 364 /// them up and then emit token factor nodes when possible. This allows us to 365 /// get simple disambiguation between loads without worrying about alias 366 /// analysis. 367 std::vector<SDOperand> PendingLoads; 368 369 /// Case - A pair of values to record the Value for a switch case, and the 370 /// case's target basic block. 371 typedef std::pair<Constant*, MachineBasicBlock*> Case; 372 typedef std::vector<Case>::iterator CaseItr; 373 typedef std::pair<CaseItr, CaseItr> CaseRange; 374 375 /// CaseRec - A struct with ctor used in lowering switches to a binary tree 376 /// of conditional branches. 377 struct CaseRec { 378 CaseRec(MachineBasicBlock *bb, Constant *lt, Constant *ge, CaseRange r) : 379 CaseBB(bb), LT(lt), GE(ge), Range(r) {} 380 381 /// CaseBB - The MBB in which to emit the compare and branch 382 MachineBasicBlock *CaseBB; 383 /// LT, GE - If nonzero, we know the current case value must be less-than or 384 /// greater-than-or-equal-to these Constants. 385 Constant *LT; 386 Constant *GE; 387 /// Range - A pair of iterators representing the range of case values to be 388 /// processed at this point in the binary search tree. 389 CaseRange Range; 390 }; 391 392 /// The comparison function for sorting Case values. 393 struct CaseCmp { 394 bool operator () (const Case& C1, const Case& C2) { 395 if (const ConstantUInt* U1 = dyn_cast<const ConstantUInt>(C1.first)) 396 return U1->getValue() < cast<const ConstantUInt>(C2.first)->getValue(); 397 398 const ConstantSInt* S1 = dyn_cast<const ConstantSInt>(C1.first); 399 return S1->getValue() < cast<const ConstantSInt>(C2.first)->getValue(); 400 } 401 }; 402 403public: 404 // TLI - This is information that describes the available target features we 405 // need for lowering. This indicates when operations are unavailable, 406 // implemented with a libcall, etc. 407 TargetLowering &TLI; 408 SelectionDAG &DAG; 409 const TargetData *TD; 410 411 /// SwitchCases - Vector of CaseBlock structures used to communicate 412 /// SwitchInst code generation information. 413 std::vector<SelectionDAGISel::CaseBlock> SwitchCases; 414 SelectionDAGISel::JumpTable JT; 415 416 /// FuncInfo - Information about the function as a whole. 417 /// 418 FunctionLoweringInfo &FuncInfo; 419 420 SelectionDAGLowering(SelectionDAG &dag, TargetLowering &tli, 421 FunctionLoweringInfo &funcinfo) 422 : TLI(tli), DAG(dag), TD(DAG.getTarget().getTargetData()), 423 JT(0,0,0,0), FuncInfo(funcinfo) { 424 } 425 426 /// getRoot - Return the current virtual root of the Selection DAG. 427 /// 428 SDOperand getRoot() { 429 if (PendingLoads.empty()) 430 return DAG.getRoot(); 431 432 if (PendingLoads.size() == 1) { 433 SDOperand Root = PendingLoads[0]; 434 DAG.setRoot(Root); 435 PendingLoads.clear(); 436 return Root; 437 } 438 439 // Otherwise, we have to make a token factor node. 440 SDOperand Root = DAG.getNode(ISD::TokenFactor, MVT::Other, 441 &PendingLoads[0], PendingLoads.size()); 442 PendingLoads.clear(); 443 DAG.setRoot(Root); 444 return Root; 445 } 446 447 void visit(Instruction &I) { visit(I.getOpcode(), I); } 448 449 void visit(unsigned Opcode, User &I) { 450 switch (Opcode) { 451 default: assert(0 && "Unknown instruction type encountered!"); 452 abort(); 453 // Build the switch statement using the Instruction.def file. 454#define HANDLE_INST(NUM, OPCODE, CLASS) \ 455 case Instruction::OPCODE:return visit##OPCODE((CLASS&)I); 456#include "llvm/Instruction.def" 457 } 458 } 459 460 void setCurrentBasicBlock(MachineBasicBlock *MBB) { CurMBB = MBB; } 461 462 SDOperand getLoadFrom(const Type *Ty, SDOperand Ptr, 463 SDOperand SrcValue, SDOperand Root, 464 bool isVolatile); 465 466 SDOperand getIntPtrConstant(uint64_t Val) { 467 return DAG.getConstant(Val, TLI.getPointerTy()); 468 } 469 470 SDOperand getValue(const Value *V); 471 472 const SDOperand &setValue(const Value *V, SDOperand NewN) { 473 SDOperand &N = NodeMap[V]; 474 assert(N.Val == 0 && "Already set a value for this node!"); 475 return N = NewN; 476 } 477 478 RegsForValue GetRegistersForValue(const std::string &ConstrCode, 479 MVT::ValueType VT, 480 bool OutReg, bool InReg, 481 std::set<unsigned> &OutputRegs, 482 std::set<unsigned> &InputRegs); 483 484 // Terminator instructions. 485 void visitRet(ReturnInst &I); 486 void visitBr(BranchInst &I); 487 void visitSwitch(SwitchInst &I); 488 void visitUnreachable(UnreachableInst &I) { /* noop */ } 489 490 // Helper for visitSwitch 491 void visitSwitchCase(SelectionDAGISel::CaseBlock &CB); 492 void visitJumpTable(SelectionDAGISel::JumpTable &JT); 493 494 // These all get lowered before this pass. 495 void visitInvoke(InvokeInst &I) { assert(0 && "TODO"); } 496 void visitUnwind(UnwindInst &I) { assert(0 && "TODO"); } 497 498 void visitBinary(User &I, unsigned IntOp, unsigned FPOp, unsigned VecOp); 499 void visitShift(User &I, unsigned Opcode); 500 void visitAdd(User &I) { 501 visitBinary(I, ISD::ADD, ISD::FADD, ISD::VADD); 502 } 503 void visitSub(User &I); 504 void visitMul(User &I) { 505 visitBinary(I, ISD::MUL, ISD::FMUL, ISD::VMUL); 506 } 507 void visitDiv(User &I) { 508 const Type *Ty = I.getType(); 509 visitBinary(I, 510 Ty->isSigned() ? ISD::SDIV : ISD::UDIV, ISD::FDIV, 511 Ty->isSigned() ? ISD::VSDIV : ISD::VUDIV); 512 } 513 void visitRem(User &I) { 514 const Type *Ty = I.getType(); 515 visitBinary(I, Ty->isSigned() ? ISD::SREM : ISD::UREM, ISD::FREM, 0); 516 } 517 void visitAnd(User &I) { visitBinary(I, ISD::AND, 0, ISD::VAND); } 518 void visitOr (User &I) { visitBinary(I, ISD::OR, 0, ISD::VOR); } 519 void visitXor(User &I) { visitBinary(I, ISD::XOR, 0, ISD::VXOR); } 520 void visitShl(User &I) { visitShift(I, ISD::SHL); } 521 void visitShr(User &I) { 522 visitShift(I, I.getType()->isUnsigned() ? ISD::SRL : ISD::SRA); 523 } 524 525 void visitSetCC(User &I, ISD::CondCode SignedOpc, ISD::CondCode UnsignedOpc, 526 ISD::CondCode FPOpc); 527 void visitSetEQ(User &I) { visitSetCC(I, ISD::SETEQ, ISD::SETEQ, 528 ISD::SETOEQ); } 529 void visitSetNE(User &I) { visitSetCC(I, ISD::SETNE, ISD::SETNE, 530 ISD::SETUNE); } 531 void visitSetLE(User &I) { visitSetCC(I, ISD::SETLE, ISD::SETULE, 532 ISD::SETOLE); } 533 void visitSetGE(User &I) { visitSetCC(I, ISD::SETGE, ISD::SETUGE, 534 ISD::SETOGE); } 535 void visitSetLT(User &I) { visitSetCC(I, ISD::SETLT, ISD::SETULT, 536 ISD::SETOLT); } 537 void visitSetGT(User &I) { visitSetCC(I, ISD::SETGT, ISD::SETUGT, 538 ISD::SETOGT); } 539 540 void visitExtractElement(User &I); 541 void visitInsertElement(User &I); 542 void visitShuffleVector(User &I); 543 544 void visitGetElementPtr(User &I); 545 void visitCast(User &I); 546 void visitSelect(User &I); 547 548 void visitMalloc(MallocInst &I); 549 void visitFree(FreeInst &I); 550 void visitAlloca(AllocaInst &I); 551 void visitLoad(LoadInst &I); 552 void visitStore(StoreInst &I); 553 void visitPHI(PHINode &I) { } // PHI nodes are handled specially. 554 void visitCall(CallInst &I); 555 void visitInlineAsm(CallInst &I); 556 const char *visitIntrinsicCall(CallInst &I, unsigned Intrinsic); 557 void visitTargetIntrinsic(CallInst &I, unsigned Intrinsic); 558 559 void visitVAStart(CallInst &I); 560 void visitVAArg(VAArgInst &I); 561 void visitVAEnd(CallInst &I); 562 void visitVACopy(CallInst &I); 563 void visitFrameReturnAddress(CallInst &I, bool isFrameAddress); 564 565 void visitMemIntrinsic(CallInst &I, unsigned Op); 566 567 void visitUserOp1(Instruction &I) { 568 assert(0 && "UserOp1 should not exist at instruction selection time!"); 569 abort(); 570 } 571 void visitUserOp2(Instruction &I) { 572 assert(0 && "UserOp2 should not exist at instruction selection time!"); 573 abort(); 574 } 575}; 576} // end namespace llvm 577 578SDOperand SelectionDAGLowering::getValue(const Value *V) { 579 SDOperand &N = NodeMap[V]; 580 if (N.Val) return N; 581 582 const Type *VTy = V->getType(); 583 MVT::ValueType VT = TLI.getValueType(VTy); 584 if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(V))) { 585 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) { 586 visit(CE->getOpcode(), *CE); 587 assert(N.Val && "visit didn't populate the ValueMap!"); 588 return N; 589 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(C)) { 590 return N = DAG.getGlobalAddress(GV, VT); 591 } else if (isa<ConstantPointerNull>(C)) { 592 return N = DAG.getConstant(0, TLI.getPointerTy()); 593 } else if (isa<UndefValue>(C)) { 594 if (!isa<PackedType>(VTy)) 595 return N = DAG.getNode(ISD::UNDEF, VT); 596 597 // Create a VBUILD_VECTOR of undef nodes. 598 const PackedType *PTy = cast<PackedType>(VTy); 599 unsigned NumElements = PTy->getNumElements(); 600 MVT::ValueType PVT = TLI.getValueType(PTy->getElementType()); 601 602 SmallVector<SDOperand, 8> Ops; 603 Ops.assign(NumElements, DAG.getNode(ISD::UNDEF, PVT)); 604 605 // Create a VConstant node with generic Vector type. 606 Ops.push_back(DAG.getConstant(NumElements, MVT::i32)); 607 Ops.push_back(DAG.getValueType(PVT)); 608 return N = DAG.getNode(ISD::VBUILD_VECTOR, MVT::Vector, 609 &Ops[0], Ops.size()); 610 } else if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) { 611 return N = DAG.getConstantFP(CFP->getValue(), VT); 612 } else if (const PackedType *PTy = dyn_cast<PackedType>(VTy)) { 613 unsigned NumElements = PTy->getNumElements(); 614 MVT::ValueType PVT = TLI.getValueType(PTy->getElementType()); 615 616 // Now that we know the number and type of the elements, push a 617 // Constant or ConstantFP node onto the ops list for each element of 618 // the packed constant. 619 SmallVector<SDOperand, 8> Ops; 620 if (ConstantPacked *CP = dyn_cast<ConstantPacked>(C)) { 621 for (unsigned i = 0; i != NumElements; ++i) 622 Ops.push_back(getValue(CP->getOperand(i))); 623 } else { 624 assert(isa<ConstantAggregateZero>(C) && "Unknown packed constant!"); 625 SDOperand Op; 626 if (MVT::isFloatingPoint(PVT)) 627 Op = DAG.getConstantFP(0, PVT); 628 else 629 Op = DAG.getConstant(0, PVT); 630 Ops.assign(NumElements, Op); 631 } 632 633 // Create a VBUILD_VECTOR node with generic Vector type. 634 Ops.push_back(DAG.getConstant(NumElements, MVT::i32)); 635 Ops.push_back(DAG.getValueType(PVT)); 636 return N = DAG.getNode(ISD::VBUILD_VECTOR,MVT::Vector,&Ops[0],Ops.size()); 637 } else { 638 // Canonicalize all constant ints to be unsigned. 639 return N = DAG.getConstant(cast<ConstantIntegral>(C)->getRawValue(),VT); 640 } 641 } 642 643 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) { 644 std::map<const AllocaInst*, int>::iterator SI = 645 FuncInfo.StaticAllocaMap.find(AI); 646 if (SI != FuncInfo.StaticAllocaMap.end()) 647 return DAG.getFrameIndex(SI->second, TLI.getPointerTy()); 648 } 649 650 std::map<const Value*, unsigned>::const_iterator VMI = 651 FuncInfo.ValueMap.find(V); 652 assert(VMI != FuncInfo.ValueMap.end() && "Value not in map!"); 653 654 unsigned InReg = VMI->second; 655 656 // If this type is not legal, make it so now. 657 if (VT != MVT::Vector) { 658 MVT::ValueType DestVT = TLI.getTypeToTransformTo(VT); 659 660 N = DAG.getCopyFromReg(DAG.getEntryNode(), InReg, DestVT); 661 if (DestVT < VT) { 662 // Source must be expanded. This input value is actually coming from the 663 // register pair VMI->second and VMI->second+1. 664 N = DAG.getNode(ISD::BUILD_PAIR, VT, N, 665 DAG.getCopyFromReg(DAG.getEntryNode(), InReg+1, DestVT)); 666 } else if (DestVT > VT) { // Promotion case 667 if (MVT::isFloatingPoint(VT)) 668 N = DAG.getNode(ISD::FP_ROUND, VT, N); 669 else 670 N = DAG.getNode(ISD::TRUNCATE, VT, N); 671 } 672 } else { 673 // Otherwise, if this is a vector, make it available as a generic vector 674 // here. 675 MVT::ValueType PTyElementVT, PTyLegalElementVT; 676 const PackedType *PTy = cast<PackedType>(VTy); 677 unsigned NE = TLI.getPackedTypeBreakdown(PTy, PTyElementVT, 678 PTyLegalElementVT); 679 680 // Build a VBUILD_VECTOR with the input registers. 681 SmallVector<SDOperand, 8> Ops; 682 if (PTyElementVT == PTyLegalElementVT) { 683 // If the value types are legal, just VBUILD the CopyFromReg nodes. 684 for (unsigned i = 0; i != NE; ++i) 685 Ops.push_back(DAG.getCopyFromReg(DAG.getEntryNode(), InReg++, 686 PTyElementVT)); 687 } else if (PTyElementVT < PTyLegalElementVT) { 688 // If the register was promoted, use TRUNCATE of FP_ROUND as appropriate. 689 for (unsigned i = 0; i != NE; ++i) { 690 SDOperand Op = DAG.getCopyFromReg(DAG.getEntryNode(), InReg++, 691 PTyElementVT); 692 if (MVT::isFloatingPoint(PTyElementVT)) 693 Op = DAG.getNode(ISD::FP_ROUND, PTyElementVT, Op); 694 else 695 Op = DAG.getNode(ISD::TRUNCATE, PTyElementVT, Op); 696 Ops.push_back(Op); 697 } 698 } else { 699 // If the register was expanded, use BUILD_PAIR. 700 assert((NE & 1) == 0 && "Must expand into a multiple of 2 elements!"); 701 for (unsigned i = 0; i != NE/2; ++i) { 702 SDOperand Op0 = DAG.getCopyFromReg(DAG.getEntryNode(), InReg++, 703 PTyElementVT); 704 SDOperand Op1 = DAG.getCopyFromReg(DAG.getEntryNode(), InReg++, 705 PTyElementVT); 706 Ops.push_back(DAG.getNode(ISD::BUILD_PAIR, VT, Op0, Op1)); 707 } 708 } 709 710 Ops.push_back(DAG.getConstant(NE, MVT::i32)); 711 Ops.push_back(DAG.getValueType(PTyLegalElementVT)); 712 N = DAG.getNode(ISD::VBUILD_VECTOR, MVT::Vector, &Ops[0], Ops.size()); 713 714 // Finally, use a VBIT_CONVERT to make this available as the appropriate 715 // vector type. 716 N = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, N, 717 DAG.getConstant(PTy->getNumElements(), 718 MVT::i32), 719 DAG.getValueType(TLI.getValueType(PTy->getElementType()))); 720 } 721 722 return N; 723} 724 725 726void SelectionDAGLowering::visitRet(ReturnInst &I) { 727 if (I.getNumOperands() == 0) { 728 DAG.setRoot(DAG.getNode(ISD::RET, MVT::Other, getRoot())); 729 return; 730 } 731 SmallVector<SDOperand, 8> NewValues; 732 NewValues.push_back(getRoot()); 733 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) { 734 SDOperand RetOp = getValue(I.getOperand(i)); 735 bool isSigned = I.getOperand(i)->getType()->isSigned(); 736 737 // If this is an integer return value, we need to promote it ourselves to 738 // the full width of a register, since LegalizeOp will use ANY_EXTEND rather 739 // than sign/zero. 740 // FIXME: C calling convention requires the return type to be promoted to 741 // at least 32-bit. But this is not necessary for non-C calling conventions. 742 if (MVT::isInteger(RetOp.getValueType()) && 743 RetOp.getValueType() < MVT::i64) { 744 MVT::ValueType TmpVT; 745 if (TLI.getTypeAction(MVT::i32) == TargetLowering::Promote) 746 TmpVT = TLI.getTypeToTransformTo(MVT::i32); 747 else 748 TmpVT = MVT::i32; 749 750 if (isSigned) 751 RetOp = DAG.getNode(ISD::SIGN_EXTEND, TmpVT, RetOp); 752 else 753 RetOp = DAG.getNode(ISD::ZERO_EXTEND, TmpVT, RetOp); 754 } 755 NewValues.push_back(RetOp); 756 NewValues.push_back(DAG.getConstant(isSigned, MVT::i32)); 757 } 758 DAG.setRoot(DAG.getNode(ISD::RET, MVT::Other, 759 &NewValues[0], NewValues.size())); 760} 761 762void SelectionDAGLowering::visitBr(BranchInst &I) { 763 // Update machine-CFG edges. 764 MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)]; 765 CurMBB->addSuccessor(Succ0MBB); 766 767 // Figure out which block is immediately after the current one. 768 MachineBasicBlock *NextBlock = 0; 769 MachineFunction::iterator BBI = CurMBB; 770 if (++BBI != CurMBB->getParent()->end()) 771 NextBlock = BBI; 772 773 if (I.isUnconditional()) { 774 // If this is not a fall-through branch, emit the branch. 775 if (Succ0MBB != NextBlock) 776 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, getRoot(), 777 DAG.getBasicBlock(Succ0MBB))); 778 } else { 779 MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)]; 780 CurMBB->addSuccessor(Succ1MBB); 781 782 SDOperand Cond = getValue(I.getCondition()); 783 if (Succ1MBB == NextBlock) { 784 // If the condition is false, fall through. This means we should branch 785 // if the condition is true to Succ #0. 786 DAG.setRoot(DAG.getNode(ISD::BRCOND, MVT::Other, getRoot(), 787 Cond, DAG.getBasicBlock(Succ0MBB))); 788 } else if (Succ0MBB == NextBlock) { 789 // If the condition is true, fall through. This means we should branch if 790 // the condition is false to Succ #1. Invert the condition first. 791 SDOperand True = DAG.getConstant(1, Cond.getValueType()); 792 Cond = DAG.getNode(ISD::XOR, Cond.getValueType(), Cond, True); 793 DAG.setRoot(DAG.getNode(ISD::BRCOND, MVT::Other, getRoot(), 794 Cond, DAG.getBasicBlock(Succ1MBB))); 795 } else { 796 std::vector<SDOperand> Ops; 797 Ops.push_back(getRoot()); 798 // If the false case is the current basic block, then this is a self 799 // loop. We do not want to emit "Loop: ... brcond Out; br Loop", as it 800 // adds an extra instruction in the loop. Instead, invert the 801 // condition and emit "Loop: ... br!cond Loop; br Out. 802 if (CurMBB == Succ1MBB) { 803 std::swap(Succ0MBB, Succ1MBB); 804 SDOperand True = DAG.getConstant(1, Cond.getValueType()); 805 Cond = DAG.getNode(ISD::XOR, Cond.getValueType(), Cond, True); 806 } 807 SDOperand True = DAG.getNode(ISD::BRCOND, MVT::Other, getRoot(), Cond, 808 DAG.getBasicBlock(Succ0MBB)); 809 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, True, 810 DAG.getBasicBlock(Succ1MBB))); 811 } 812 } 813} 814 815/// visitSwitchCase - Emits the necessary code to represent a single node in 816/// the binary search tree resulting from lowering a switch instruction. 817void SelectionDAGLowering::visitSwitchCase(SelectionDAGISel::CaseBlock &CB) { 818 SDOperand SwitchOp = getValue(CB.SwitchV); 819 SDOperand CaseOp = getValue(CB.CaseC); 820 SDOperand Cond = DAG.getSetCC(MVT::i1, SwitchOp, CaseOp, CB.CC); 821 822 // Set NextBlock to be the MBB immediately after the current one, if any. 823 // This is used to avoid emitting unnecessary branches to the next block. 824 MachineBasicBlock *NextBlock = 0; 825 MachineFunction::iterator BBI = CurMBB; 826 if (++BBI != CurMBB->getParent()->end()) 827 NextBlock = BBI; 828 829 // If the lhs block is the next block, invert the condition so that we can 830 // fall through to the lhs instead of the rhs block. 831 if (CB.LHSBB == NextBlock) { 832 std::swap(CB.LHSBB, CB.RHSBB); 833 SDOperand True = DAG.getConstant(1, Cond.getValueType()); 834 Cond = DAG.getNode(ISD::XOR, Cond.getValueType(), Cond, True); 835 } 836 SDOperand BrCond = DAG.getNode(ISD::BRCOND, MVT::Other, getRoot(), Cond, 837 DAG.getBasicBlock(CB.LHSBB)); 838 if (CB.RHSBB == NextBlock) 839 DAG.setRoot(BrCond); 840 else 841 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, BrCond, 842 DAG.getBasicBlock(CB.RHSBB))); 843 // Update successor info 844 CurMBB->addSuccessor(CB.LHSBB); 845 CurMBB->addSuccessor(CB.RHSBB); 846} 847 848void SelectionDAGLowering::visitJumpTable(SelectionDAGISel::JumpTable &JT) { 849 // FIXME: Need to emit different code for PIC vs. Non-PIC, specifically, 850 // we need to add the address of the jump table to the value loaded, since 851 // the entries in the jump table will be differences rather than absolute 852 // addresses. 853 854 // Emit the code for the jump table 855 MVT::ValueType PTy = TLI.getPointerTy(); 856 assert((PTy == MVT::i32 || PTy == MVT::i64) && 857 "Jump table entries are 32-bit values"); 858 bool isPIC = TLI.getTargetMachine().getRelocationModel() == Reloc::PIC_; 859 // PIC jump table entries are 32-bit values. 860 unsigned EntrySize = isPIC ? 4 : MVT::getSizeInBits(PTy)/8; 861 SDOperand Copy = DAG.getCopyFromReg(getRoot(), JT.Reg, PTy); 862 SDOperand IDX = DAG.getNode(ISD::MUL, PTy, Copy, 863 DAG.getConstant(EntrySize, PTy)); 864 SDOperand TAB = DAG.getJumpTable(JT.JTI,PTy); 865 SDOperand ADD = DAG.getNode(ISD::ADD, PTy, IDX, TAB); 866 SDOperand LD = DAG.getLoad(isPIC ? MVT::i32 : PTy, Copy.getValue(1), ADD, 867 DAG.getSrcValue(0)); 868 if (isPIC) { 869 // For Pic, the sequence is: 870 // BRIND(load(Jumptable + index) + RelocBase) 871 // RelocBase is the JumpTable on PPC and X86, GOT on Alpha 872 SDOperand Reloc = DAG.getNode(ISD::JumpTableRelocBase, PTy, TAB); 873 ADD = DAG.getNode(ISD::ADD, PTy, 874 ((PTy != MVT::i32) ? DAG.getNode(ISD::SIGN_EXTEND, PTy, LD) : LD), Reloc); 875 DAG.setRoot(DAG.getNode(ISD::BRIND, MVT::Other, LD.getValue(1), ADD)); 876 } else { 877 DAG.setRoot(DAG.getNode(ISD::BRIND, MVT::Other, LD.getValue(1), LD)); 878 } 879} 880 881void SelectionDAGLowering::visitSwitch(SwitchInst &I) { 882 // Figure out which block is immediately after the current one. 883 MachineBasicBlock *NextBlock = 0; 884 MachineFunction::iterator BBI = CurMBB; 885 if (++BBI != CurMBB->getParent()->end()) 886 NextBlock = BBI; 887 888 // If there is only the default destination, branch to it if it is not the 889 // next basic block. Otherwise, just fall through. 890 if (I.getNumOperands() == 2) { 891 // Update machine-CFG edges. 892 MachineBasicBlock *DefaultMBB = FuncInfo.MBBMap[I.getDefaultDest()]; 893 // If this is not a fall-through branch, emit the branch. 894 if (DefaultMBB != NextBlock) 895 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, getRoot(), 896 DAG.getBasicBlock(DefaultMBB))); 897 CurMBB->addSuccessor(DefaultMBB); 898 return; 899 } 900 901 // If there are any non-default case statements, create a vector of Cases 902 // representing each one, and sort the vector so that we can efficiently 903 // create a binary search tree from them. 904 std::vector<Case> Cases; 905 for (unsigned i = 1; i < I.getNumSuccessors(); ++i) { 906 MachineBasicBlock *SMBB = FuncInfo.MBBMap[I.getSuccessor(i)]; 907 Cases.push_back(Case(I.getSuccessorValue(i), SMBB)); 908 } 909 std::sort(Cases.begin(), Cases.end(), CaseCmp()); 910 911 // Get the Value to be switched on and default basic blocks, which will be 912 // inserted into CaseBlock records, representing basic blocks in the binary 913 // search tree. 914 Value *SV = I.getOperand(0); 915 MachineBasicBlock *Default = FuncInfo.MBBMap[I.getDefaultDest()]; 916 917 // Get the MachineFunction which holds the current MBB. This is used during 918 // emission of jump tables, and when inserting any additional MBBs necessary 919 // to represent the switch. 920 MachineFunction *CurMF = CurMBB->getParent(); 921 const BasicBlock *LLVMBB = CurMBB->getBasicBlock(); 922 923 // If the switch has more than 5 blocks, and at least 31.25% dense, and the 924 // target supports indirect branches, then emit a jump table rather than 925 // lowering the switch to a binary tree of conditional branches. 926 if (TLI.isOperationLegal(ISD::BRIND, TLI.getPointerTy()) && 927 Cases.size() > 5) { 928 uint64_t First = cast<ConstantIntegral>(Cases.front().first)->getRawValue(); 929 uint64_t Last = cast<ConstantIntegral>(Cases.back().first)->getRawValue(); 930 double Density = (double)Cases.size() / (double)((Last - First) + 1ULL); 931 932 if (Density >= 0.3125) { 933 // Create a new basic block to hold the code for loading the address 934 // of the jump table, and jumping to it. Update successor information; 935 // we will either branch to the default case for the switch, or the jump 936 // table. 937 MachineBasicBlock *JumpTableBB = new MachineBasicBlock(LLVMBB); 938 CurMF->getBasicBlockList().insert(BBI, JumpTableBB); 939 CurMBB->addSuccessor(Default); 940 CurMBB->addSuccessor(JumpTableBB); 941 942 // Subtract the lowest switch case value from the value being switched on 943 // and conditional branch to default mbb if the result is greater than the 944 // difference between smallest and largest cases. 945 SDOperand SwitchOp = getValue(SV); 946 MVT::ValueType VT = SwitchOp.getValueType(); 947 SDOperand SUB = DAG.getNode(ISD::SUB, VT, SwitchOp, 948 DAG.getConstant(First, VT)); 949 950 // The SDNode we just created, which holds the value being switched on 951 // minus the the smallest case value, needs to be copied to a virtual 952 // register so it can be used as an index into the jump table in a 953 // subsequent basic block. This value may be smaller or larger than the 954 // target's pointer type, and therefore require extension or truncating. 955 if (VT > TLI.getPointerTy()) 956 SwitchOp = DAG.getNode(ISD::TRUNCATE, TLI.getPointerTy(), SUB); 957 else 958 SwitchOp = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(), SUB); 959 unsigned JumpTableReg = FuncInfo.MakeReg(TLI.getPointerTy()); 960 SDOperand CopyTo = DAG.getCopyToReg(getRoot(), JumpTableReg, SwitchOp); 961 962 // Emit the range check for the jump table, and branch to the default 963 // block for the switch statement if the value being switched on exceeds 964 // the largest case in the switch. 965 SDOperand CMP = DAG.getSetCC(TLI.getSetCCResultTy(), SUB, 966 DAG.getConstant(Last-First,VT), ISD::SETUGT); 967 DAG.setRoot(DAG.getNode(ISD::BRCOND, MVT::Other, CopyTo, CMP, 968 DAG.getBasicBlock(Default))); 969 970 // Build a vector of destination BBs, corresponding to each target 971 // of the jump table. If the value of the jump table slot corresponds to 972 // a case statement, push the case's BB onto the vector, otherwise, push 973 // the default BB. 974 std::vector<MachineBasicBlock*> DestBBs; 975 uint64_t TEI = First; 976 for (CaseItr ii = Cases.begin(), ee = Cases.end(); ii != ee; ++TEI) { 977 if (cast<ConstantIntegral>(ii->first)->getRawValue() == TEI) { 978 DestBBs.push_back(ii->second); 979 ++ii; 980 } else { 981 DestBBs.push_back(Default); 982 } 983 } 984 985 // Update successor info 986 for (std::vector<MachineBasicBlock*>::iterator I = DestBBs.begin(), 987 E = DestBBs.end(); I != E; ++I) 988 JumpTableBB->addSuccessor(*I); 989 990 // Create a jump table index for this jump table, or return an existing 991 // one. 992 unsigned JTI = CurMF->getJumpTableInfo()->getJumpTableIndex(DestBBs); 993 994 // Set the jump table information so that we can codegen it as a second 995 // MachineBasicBlock 996 JT.Reg = JumpTableReg; 997 JT.JTI = JTI; 998 JT.MBB = JumpTableBB; 999 JT.Default = Default; 1000 return; 1001 } 1002 } 1003 1004 // Push the initial CaseRec onto the worklist 1005 std::vector<CaseRec> CaseVec; 1006 CaseVec.push_back(CaseRec(CurMBB,0,0,CaseRange(Cases.begin(),Cases.end()))); 1007 1008 while (!CaseVec.empty()) { 1009 // Grab a record representing a case range to process off the worklist 1010 CaseRec CR = CaseVec.back(); 1011 CaseVec.pop_back(); 1012 1013 // Size is the number of Cases represented by this range. If Size is 1, 1014 // then we are processing a leaf of the binary search tree. Otherwise, 1015 // we need to pick a pivot, and push left and right ranges onto the 1016 // worklist. 1017 unsigned Size = CR.Range.second - CR.Range.first; 1018 1019 if (Size == 1) { 1020 // Create a CaseBlock record representing a conditional branch to 1021 // the Case's target mbb if the value being switched on SV is equal 1022 // to C. Otherwise, branch to default. 1023 Constant *C = CR.Range.first->first; 1024 MachineBasicBlock *Target = CR.Range.first->second; 1025 SelectionDAGISel::CaseBlock CB(ISD::SETEQ, SV, C, Target, Default, 1026 CR.CaseBB); 1027 // If the MBB representing the leaf node is the current MBB, then just 1028 // call visitSwitchCase to emit the code into the current block. 1029 // Otherwise, push the CaseBlock onto the vector to be later processed 1030 // by SDISel, and insert the node's MBB before the next MBB. 1031 if (CR.CaseBB == CurMBB) 1032 visitSwitchCase(CB); 1033 else { 1034 SwitchCases.push_back(CB); 1035 CurMF->getBasicBlockList().insert(BBI, CR.CaseBB); 1036 } 1037 } else { 1038 // split case range at pivot 1039 CaseItr Pivot = CR.Range.first + (Size / 2); 1040 CaseRange LHSR(CR.Range.first, Pivot); 1041 CaseRange RHSR(Pivot, CR.Range.second); 1042 Constant *C = Pivot->first; 1043 MachineBasicBlock *RHSBB = 0, *LHSBB = 0; 1044 // We know that we branch to the LHS if the Value being switched on is 1045 // less than the Pivot value, C. We use this to optimize our binary 1046 // tree a bit, by recognizing that if SV is greater than or equal to the 1047 // LHS's Case Value, and that Case Value is exactly one less than the 1048 // Pivot's Value, then we can branch directly to the LHS's Target, 1049 // rather than creating a leaf node for it. 1050 if ((LHSR.second - LHSR.first) == 1 && 1051 LHSR.first->first == CR.GE && 1052 cast<ConstantIntegral>(C)->getRawValue() == 1053 (cast<ConstantIntegral>(CR.GE)->getRawValue() + 1ULL)) { 1054 LHSBB = LHSR.first->second; 1055 } else { 1056 LHSBB = new MachineBasicBlock(LLVMBB); 1057 CaseVec.push_back(CaseRec(LHSBB,C,CR.GE,LHSR)); 1058 } 1059 // Similar to the optimization above, if the Value being switched on is 1060 // known to be less than the Constant CR.LT, and the current Case Value 1061 // is CR.LT - 1, then we can branch directly to the target block for 1062 // the current Case Value, rather than emitting a RHS leaf node for it. 1063 if ((RHSR.second - RHSR.first) == 1 && CR.LT && 1064 cast<ConstantIntegral>(RHSR.first->first)->getRawValue() == 1065 (cast<ConstantIntegral>(CR.LT)->getRawValue() - 1ULL)) { 1066 RHSBB = RHSR.first->second; 1067 } else { 1068 RHSBB = new MachineBasicBlock(LLVMBB); 1069 CaseVec.push_back(CaseRec(RHSBB,CR.LT,C,RHSR)); 1070 } 1071 // Create a CaseBlock record representing a conditional branch to 1072 // the LHS node if the value being switched on SV is less than C. 1073 // Otherwise, branch to LHS. 1074 ISD::CondCode CC = C->getType()->isSigned() ? ISD::SETLT : ISD::SETULT; 1075 SelectionDAGISel::CaseBlock CB(CC, SV, C, LHSBB, RHSBB, CR.CaseBB); 1076 if (CR.CaseBB == CurMBB) 1077 visitSwitchCase(CB); 1078 else { 1079 SwitchCases.push_back(CB); 1080 CurMF->getBasicBlockList().insert(BBI, CR.CaseBB); 1081 } 1082 } 1083 } 1084} 1085 1086void SelectionDAGLowering::visitSub(User &I) { 1087 // -0.0 - X --> fneg 1088 if (I.getType()->isFloatingPoint()) { 1089 if (ConstantFP *CFP = dyn_cast<ConstantFP>(I.getOperand(0))) 1090 if (CFP->isExactlyValue(-0.0)) { 1091 SDOperand Op2 = getValue(I.getOperand(1)); 1092 setValue(&I, DAG.getNode(ISD::FNEG, Op2.getValueType(), Op2)); 1093 return; 1094 } 1095 } 1096 visitBinary(I, ISD::SUB, ISD::FSUB, ISD::VSUB); 1097} 1098 1099void SelectionDAGLowering::visitBinary(User &I, unsigned IntOp, unsigned FPOp, 1100 unsigned VecOp) { 1101 const Type *Ty = I.getType(); 1102 SDOperand Op1 = getValue(I.getOperand(0)); 1103 SDOperand Op2 = getValue(I.getOperand(1)); 1104 1105 if (Ty->isIntegral()) { 1106 setValue(&I, DAG.getNode(IntOp, Op1.getValueType(), Op1, Op2)); 1107 } else if (Ty->isFloatingPoint()) { 1108 setValue(&I, DAG.getNode(FPOp, Op1.getValueType(), Op1, Op2)); 1109 } else { 1110 const PackedType *PTy = cast<PackedType>(Ty); 1111 SDOperand Num = DAG.getConstant(PTy->getNumElements(), MVT::i32); 1112 SDOperand Typ = DAG.getValueType(TLI.getValueType(PTy->getElementType())); 1113 setValue(&I, DAG.getNode(VecOp, MVT::Vector, Op1, Op2, Num, Typ)); 1114 } 1115} 1116 1117void SelectionDAGLowering::visitShift(User &I, unsigned Opcode) { 1118 SDOperand Op1 = getValue(I.getOperand(0)); 1119 SDOperand Op2 = getValue(I.getOperand(1)); 1120 1121 Op2 = DAG.getNode(ISD::ANY_EXTEND, TLI.getShiftAmountTy(), Op2); 1122 1123 setValue(&I, DAG.getNode(Opcode, Op1.getValueType(), Op1, Op2)); 1124} 1125 1126void SelectionDAGLowering::visitSetCC(User &I,ISD::CondCode SignedOpcode, 1127 ISD::CondCode UnsignedOpcode, 1128 ISD::CondCode FPOpcode) { 1129 SDOperand Op1 = getValue(I.getOperand(0)); 1130 SDOperand Op2 = getValue(I.getOperand(1)); 1131 ISD::CondCode Opcode = SignedOpcode; 1132 if (!FiniteOnlyFPMath() && I.getOperand(0)->getType()->isFloatingPoint()) 1133 Opcode = FPOpcode; 1134 else if (I.getOperand(0)->getType()->isUnsigned()) 1135 Opcode = UnsignedOpcode; 1136 setValue(&I, DAG.getSetCC(MVT::i1, Op1, Op2, Opcode)); 1137} 1138 1139void SelectionDAGLowering::visitSelect(User &I) { 1140 SDOperand Cond = getValue(I.getOperand(0)); 1141 SDOperand TrueVal = getValue(I.getOperand(1)); 1142 SDOperand FalseVal = getValue(I.getOperand(2)); 1143 if (!isa<PackedType>(I.getType())) { 1144 setValue(&I, DAG.getNode(ISD::SELECT, TrueVal.getValueType(), Cond, 1145 TrueVal, FalseVal)); 1146 } else { 1147 setValue(&I, DAG.getNode(ISD::VSELECT, MVT::Vector, Cond, TrueVal, FalseVal, 1148 *(TrueVal.Val->op_end()-2), 1149 *(TrueVal.Val->op_end()-1))); 1150 } 1151} 1152 1153void SelectionDAGLowering::visitCast(User &I) { 1154 SDOperand N = getValue(I.getOperand(0)); 1155 MVT::ValueType SrcVT = N.getValueType(); 1156 MVT::ValueType DestVT = TLI.getValueType(I.getType()); 1157 1158 if (DestVT == MVT::Vector) { 1159 // This is a cast to a vector from something else. This is always a bit 1160 // convert. Get information about the input vector. 1161 const PackedType *DestTy = cast<PackedType>(I.getType()); 1162 MVT::ValueType EltVT = TLI.getValueType(DestTy->getElementType()); 1163 setValue(&I, DAG.getNode(ISD::VBIT_CONVERT, DestVT, N, 1164 DAG.getConstant(DestTy->getNumElements(),MVT::i32), 1165 DAG.getValueType(EltVT))); 1166 } else if (SrcVT == DestVT) { 1167 setValue(&I, N); // noop cast. 1168 } else if (DestVT == MVT::i1) { 1169 // Cast to bool is a comparison against zero, not truncation to zero. 1170 SDOperand Zero = isInteger(SrcVT) ? DAG.getConstant(0, N.getValueType()) : 1171 DAG.getConstantFP(0.0, N.getValueType()); 1172 setValue(&I, DAG.getSetCC(MVT::i1, N, Zero, ISD::SETNE)); 1173 } else if (isInteger(SrcVT)) { 1174 if (isInteger(DestVT)) { // Int -> Int cast 1175 if (DestVT < SrcVT) // Truncating cast? 1176 setValue(&I, DAG.getNode(ISD::TRUNCATE, DestVT, N)); 1177 else if (I.getOperand(0)->getType()->isSigned()) 1178 setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, DestVT, N)); 1179 else 1180 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, DestVT, N)); 1181 } else if (isFloatingPoint(DestVT)) { // Int -> FP cast 1182 if (I.getOperand(0)->getType()->isSigned()) 1183 setValue(&I, DAG.getNode(ISD::SINT_TO_FP, DestVT, N)); 1184 else 1185 setValue(&I, DAG.getNode(ISD::UINT_TO_FP, DestVT, N)); 1186 } else { 1187 assert(0 && "Unknown cast!"); 1188 } 1189 } else if (isFloatingPoint(SrcVT)) { 1190 if (isFloatingPoint(DestVT)) { // FP -> FP cast 1191 if (DestVT < SrcVT) // Rounding cast? 1192 setValue(&I, DAG.getNode(ISD::FP_ROUND, DestVT, N)); 1193 else 1194 setValue(&I, DAG.getNode(ISD::FP_EXTEND, DestVT, N)); 1195 } else if (isInteger(DestVT)) { // FP -> Int cast. 1196 if (I.getType()->isSigned()) 1197 setValue(&I, DAG.getNode(ISD::FP_TO_SINT, DestVT, N)); 1198 else 1199 setValue(&I, DAG.getNode(ISD::FP_TO_UINT, DestVT, N)); 1200 } else { 1201 assert(0 && "Unknown cast!"); 1202 } 1203 } else { 1204 assert(SrcVT == MVT::Vector && "Unknown cast!"); 1205 assert(DestVT != MVT::Vector && "Casts to vector already handled!"); 1206 // This is a cast from a vector to something else. This is always a bit 1207 // convert. Get information about the input vector. 1208 setValue(&I, DAG.getNode(ISD::VBIT_CONVERT, DestVT, N)); 1209 } 1210} 1211 1212void SelectionDAGLowering::visitInsertElement(User &I) { 1213 SDOperand InVec = getValue(I.getOperand(0)); 1214 SDOperand InVal = getValue(I.getOperand(1)); 1215 SDOperand InIdx = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(), 1216 getValue(I.getOperand(2))); 1217 1218 SDOperand Num = *(InVec.Val->op_end()-2); 1219 SDOperand Typ = *(InVec.Val->op_end()-1); 1220 setValue(&I, DAG.getNode(ISD::VINSERT_VECTOR_ELT, MVT::Vector, 1221 InVec, InVal, InIdx, Num, Typ)); 1222} 1223 1224void SelectionDAGLowering::visitExtractElement(User &I) { 1225 SDOperand InVec = getValue(I.getOperand(0)); 1226 SDOperand InIdx = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(), 1227 getValue(I.getOperand(1))); 1228 SDOperand Typ = *(InVec.Val->op_end()-1); 1229 setValue(&I, DAG.getNode(ISD::VEXTRACT_VECTOR_ELT, 1230 TLI.getValueType(I.getType()), InVec, InIdx)); 1231} 1232 1233void SelectionDAGLowering::visitShuffleVector(User &I) { 1234 SDOperand V1 = getValue(I.getOperand(0)); 1235 SDOperand V2 = getValue(I.getOperand(1)); 1236 SDOperand Mask = getValue(I.getOperand(2)); 1237 1238 SDOperand Num = *(V1.Val->op_end()-2); 1239 SDOperand Typ = *(V2.Val->op_end()-1); 1240 setValue(&I, DAG.getNode(ISD::VVECTOR_SHUFFLE, MVT::Vector, 1241 V1, V2, Mask, Num, Typ)); 1242} 1243 1244 1245void SelectionDAGLowering::visitGetElementPtr(User &I) { 1246 SDOperand N = getValue(I.getOperand(0)); 1247 const Type *Ty = I.getOperand(0)->getType(); 1248 1249 for (GetElementPtrInst::op_iterator OI = I.op_begin()+1, E = I.op_end(); 1250 OI != E; ++OI) { 1251 Value *Idx = *OI; 1252 if (const StructType *StTy = dyn_cast<StructType>(Ty)) { 1253 unsigned Field = cast<ConstantUInt>(Idx)->getValue(); 1254 if (Field) { 1255 // N = N + Offset 1256 uint64_t Offset = TD->getStructLayout(StTy)->MemberOffsets[Field]; 1257 N = DAG.getNode(ISD::ADD, N.getValueType(), N, 1258 getIntPtrConstant(Offset)); 1259 } 1260 Ty = StTy->getElementType(Field); 1261 } else { 1262 Ty = cast<SequentialType>(Ty)->getElementType(); 1263 1264 // If this is a constant subscript, handle it quickly. 1265 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) { 1266 if (CI->getRawValue() == 0) continue; 1267 1268 uint64_t Offs; 1269 if (ConstantSInt *CSI = dyn_cast<ConstantSInt>(CI)) 1270 Offs = (int64_t)TD->getTypeSize(Ty)*CSI->getValue(); 1271 else 1272 Offs = TD->getTypeSize(Ty)*cast<ConstantUInt>(CI)->getValue(); 1273 N = DAG.getNode(ISD::ADD, N.getValueType(), N, getIntPtrConstant(Offs)); 1274 continue; 1275 } 1276 1277 // N = N + Idx * ElementSize; 1278 uint64_t ElementSize = TD->getTypeSize(Ty); 1279 SDOperand IdxN = getValue(Idx); 1280 1281 // If the index is smaller or larger than intptr_t, truncate or extend 1282 // it. 1283 if (IdxN.getValueType() < N.getValueType()) { 1284 if (Idx->getType()->isSigned()) 1285 IdxN = DAG.getNode(ISD::SIGN_EXTEND, N.getValueType(), IdxN); 1286 else 1287 IdxN = DAG.getNode(ISD::ZERO_EXTEND, N.getValueType(), IdxN); 1288 } else if (IdxN.getValueType() > N.getValueType()) 1289 IdxN = DAG.getNode(ISD::TRUNCATE, N.getValueType(), IdxN); 1290 1291 // If this is a multiply by a power of two, turn it into a shl 1292 // immediately. This is a very common case. 1293 if (isPowerOf2_64(ElementSize)) { 1294 unsigned Amt = Log2_64(ElementSize); 1295 IdxN = DAG.getNode(ISD::SHL, N.getValueType(), IdxN, 1296 DAG.getConstant(Amt, TLI.getShiftAmountTy())); 1297 N = DAG.getNode(ISD::ADD, N.getValueType(), N, IdxN); 1298 continue; 1299 } 1300 1301 SDOperand Scale = getIntPtrConstant(ElementSize); 1302 IdxN = DAG.getNode(ISD::MUL, N.getValueType(), IdxN, Scale); 1303 N = DAG.getNode(ISD::ADD, N.getValueType(), N, IdxN); 1304 } 1305 } 1306 setValue(&I, N); 1307} 1308 1309void SelectionDAGLowering::visitAlloca(AllocaInst &I) { 1310 // If this is a fixed sized alloca in the entry block of the function, 1311 // allocate it statically on the stack. 1312 if (FuncInfo.StaticAllocaMap.count(&I)) 1313 return; // getValue will auto-populate this. 1314 1315 const Type *Ty = I.getAllocatedType(); 1316 uint64_t TySize = TLI.getTargetData()->getTypeSize(Ty); 1317 unsigned Align = std::max((unsigned)TLI.getTargetData()->getTypeAlignment(Ty), 1318 I.getAlignment()); 1319 1320 SDOperand AllocSize = getValue(I.getArraySize()); 1321 MVT::ValueType IntPtr = TLI.getPointerTy(); 1322 if (IntPtr < AllocSize.getValueType()) 1323 AllocSize = DAG.getNode(ISD::TRUNCATE, IntPtr, AllocSize); 1324 else if (IntPtr > AllocSize.getValueType()) 1325 AllocSize = DAG.getNode(ISD::ZERO_EXTEND, IntPtr, AllocSize); 1326 1327 AllocSize = DAG.getNode(ISD::MUL, IntPtr, AllocSize, 1328 getIntPtrConstant(TySize)); 1329 1330 // Handle alignment. If the requested alignment is less than or equal to the 1331 // stack alignment, ignore it and round the size of the allocation up to the 1332 // stack alignment size. If the size is greater than the stack alignment, we 1333 // note this in the DYNAMIC_STACKALLOC node. 1334 unsigned StackAlign = 1335 TLI.getTargetMachine().getFrameInfo()->getStackAlignment(); 1336 if (Align <= StackAlign) { 1337 Align = 0; 1338 // Add SA-1 to the size. 1339 AllocSize = DAG.getNode(ISD::ADD, AllocSize.getValueType(), AllocSize, 1340 getIntPtrConstant(StackAlign-1)); 1341 // Mask out the low bits for alignment purposes. 1342 AllocSize = DAG.getNode(ISD::AND, AllocSize.getValueType(), AllocSize, 1343 getIntPtrConstant(~(uint64_t)(StackAlign-1))); 1344 } 1345 1346 SDOperand Ops[] = { getRoot(), AllocSize, getIntPtrConstant(Align) }; 1347 const MVT::ValueType *VTs = DAG.getNodeValueTypes(AllocSize.getValueType(), 1348 MVT::Other); 1349 SDOperand DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, VTs, 2, Ops, 3); 1350 DAG.setRoot(setValue(&I, DSA).getValue(1)); 1351 1352 // Inform the Frame Information that we have just allocated a variable-sized 1353 // object. 1354 CurMBB->getParent()->getFrameInfo()->CreateVariableSizedObject(); 1355} 1356 1357void SelectionDAGLowering::visitLoad(LoadInst &I) { 1358 SDOperand Ptr = getValue(I.getOperand(0)); 1359 1360 SDOperand Root; 1361 if (I.isVolatile()) 1362 Root = getRoot(); 1363 else { 1364 // Do not serialize non-volatile loads against each other. 1365 Root = DAG.getRoot(); 1366 } 1367 1368 setValue(&I, getLoadFrom(I.getType(), Ptr, DAG.getSrcValue(I.getOperand(0)), 1369 Root, I.isVolatile())); 1370} 1371 1372SDOperand SelectionDAGLowering::getLoadFrom(const Type *Ty, SDOperand Ptr, 1373 SDOperand SrcValue, SDOperand Root, 1374 bool isVolatile) { 1375 SDOperand L; 1376 if (const PackedType *PTy = dyn_cast<PackedType>(Ty)) { 1377 MVT::ValueType PVT = TLI.getValueType(PTy->getElementType()); 1378 L = DAG.getVecLoad(PTy->getNumElements(), PVT, Root, Ptr, SrcValue); 1379 } else { 1380 L = DAG.getLoad(TLI.getValueType(Ty), Root, Ptr, SrcValue); 1381 } 1382 1383 if (isVolatile) 1384 DAG.setRoot(L.getValue(1)); 1385 else 1386 PendingLoads.push_back(L.getValue(1)); 1387 1388 return L; 1389} 1390 1391 1392void SelectionDAGLowering::visitStore(StoreInst &I) { 1393 Value *SrcV = I.getOperand(0); 1394 SDOperand Src = getValue(SrcV); 1395 SDOperand Ptr = getValue(I.getOperand(1)); 1396 DAG.setRoot(DAG.getNode(ISD::STORE, MVT::Other, getRoot(), Src, Ptr, 1397 DAG.getSrcValue(I.getOperand(1)))); 1398} 1399 1400/// IntrinsicCannotAccessMemory - Return true if the specified intrinsic cannot 1401/// access memory and has no other side effects at all. 1402static bool IntrinsicCannotAccessMemory(unsigned IntrinsicID) { 1403#define GET_NO_MEMORY_INTRINSICS 1404#include "llvm/Intrinsics.gen" 1405#undef GET_NO_MEMORY_INTRINSICS 1406 return false; 1407} 1408 1409// IntrinsicOnlyReadsMemory - Return true if the specified intrinsic doesn't 1410// have any side-effects or if it only reads memory. 1411static bool IntrinsicOnlyReadsMemory(unsigned IntrinsicID) { 1412#define GET_SIDE_EFFECT_INFO 1413#include "llvm/Intrinsics.gen" 1414#undef GET_SIDE_EFFECT_INFO 1415 return false; 1416} 1417 1418/// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC 1419/// node. 1420void SelectionDAGLowering::visitTargetIntrinsic(CallInst &I, 1421 unsigned Intrinsic) { 1422 bool HasChain = !IntrinsicCannotAccessMemory(Intrinsic); 1423 bool OnlyLoad = HasChain && IntrinsicOnlyReadsMemory(Intrinsic); 1424 1425 // Build the operand list. 1426 SmallVector<SDOperand, 8> Ops; 1427 if (HasChain) { // If this intrinsic has side-effects, chainify it. 1428 if (OnlyLoad) { 1429 // We don't need to serialize loads against other loads. 1430 Ops.push_back(DAG.getRoot()); 1431 } else { 1432 Ops.push_back(getRoot()); 1433 } 1434 } 1435 1436 // Add the intrinsic ID as an integer operand. 1437 Ops.push_back(DAG.getConstant(Intrinsic, TLI.getPointerTy())); 1438 1439 // Add all operands of the call to the operand list. 1440 for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) { 1441 SDOperand Op = getValue(I.getOperand(i)); 1442 1443 // If this is a vector type, force it to the right packed type. 1444 if (Op.getValueType() == MVT::Vector) { 1445 const PackedType *OpTy = cast<PackedType>(I.getOperand(i)->getType()); 1446 MVT::ValueType EltVT = TLI.getValueType(OpTy->getElementType()); 1447 1448 MVT::ValueType VVT = MVT::getVectorType(EltVT, OpTy->getNumElements()); 1449 assert(VVT != MVT::Other && "Intrinsic uses a non-legal type?"); 1450 Op = DAG.getNode(ISD::VBIT_CONVERT, VVT, Op); 1451 } 1452 1453 assert(TLI.isTypeLegal(Op.getValueType()) && 1454 "Intrinsic uses a non-legal type?"); 1455 Ops.push_back(Op); 1456 } 1457 1458 std::vector<MVT::ValueType> VTs; 1459 if (I.getType() != Type::VoidTy) { 1460 MVT::ValueType VT = TLI.getValueType(I.getType()); 1461 if (VT == MVT::Vector) { 1462 const PackedType *DestTy = cast<PackedType>(I.getType()); 1463 MVT::ValueType EltVT = TLI.getValueType(DestTy->getElementType()); 1464 1465 VT = MVT::getVectorType(EltVT, DestTy->getNumElements()); 1466 assert(VT != MVT::Other && "Intrinsic uses a non-legal type?"); 1467 } 1468 1469 assert(TLI.isTypeLegal(VT) && "Intrinsic uses a non-legal type?"); 1470 VTs.push_back(VT); 1471 } 1472 if (HasChain) 1473 VTs.push_back(MVT::Other); 1474 1475 const MVT::ValueType *VTList = DAG.getNodeValueTypes(VTs); 1476 1477 // Create the node. 1478 SDOperand Result; 1479 if (!HasChain) 1480 Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, VTList, VTs.size(), 1481 &Ops[0], Ops.size()); 1482 else if (I.getType() != Type::VoidTy) 1483 Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, VTList, VTs.size(), 1484 &Ops[0], Ops.size()); 1485 else 1486 Result = DAG.getNode(ISD::INTRINSIC_VOID, VTList, VTs.size(), 1487 &Ops[0], Ops.size()); 1488 1489 if (HasChain) { 1490 SDOperand Chain = Result.getValue(Result.Val->getNumValues()-1); 1491 if (OnlyLoad) 1492 PendingLoads.push_back(Chain); 1493 else 1494 DAG.setRoot(Chain); 1495 } 1496 if (I.getType() != Type::VoidTy) { 1497 if (const PackedType *PTy = dyn_cast<PackedType>(I.getType())) { 1498 MVT::ValueType EVT = TLI.getValueType(PTy->getElementType()); 1499 Result = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, Result, 1500 DAG.getConstant(PTy->getNumElements(), MVT::i32), 1501 DAG.getValueType(EVT)); 1502 } 1503 setValue(&I, Result); 1504 } 1505} 1506 1507/// visitIntrinsicCall - Lower the call to the specified intrinsic function. If 1508/// we want to emit this as a call to a named external function, return the name 1509/// otherwise lower it and return null. 1510const char * 1511SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) { 1512 switch (Intrinsic) { 1513 default: 1514 // By default, turn this into a target intrinsic node. 1515 visitTargetIntrinsic(I, Intrinsic); 1516 return 0; 1517 case Intrinsic::vastart: visitVAStart(I); return 0; 1518 case Intrinsic::vaend: visitVAEnd(I); return 0; 1519 case Intrinsic::vacopy: visitVACopy(I); return 0; 1520 case Intrinsic::returnaddress: visitFrameReturnAddress(I, false); return 0; 1521 case Intrinsic::frameaddress: visitFrameReturnAddress(I, true); return 0; 1522 case Intrinsic::setjmp: 1523 return "_setjmp"+!TLI.usesUnderscoreSetJmpLongJmp(); 1524 break; 1525 case Intrinsic::longjmp: 1526 return "_longjmp"+!TLI.usesUnderscoreSetJmpLongJmp(); 1527 break; 1528 case Intrinsic::memcpy_i32: 1529 case Intrinsic::memcpy_i64: 1530 visitMemIntrinsic(I, ISD::MEMCPY); 1531 return 0; 1532 case Intrinsic::memset_i32: 1533 case Intrinsic::memset_i64: 1534 visitMemIntrinsic(I, ISD::MEMSET); 1535 return 0; 1536 case Intrinsic::memmove_i32: 1537 case Intrinsic::memmove_i64: 1538 visitMemIntrinsic(I, ISD::MEMMOVE); 1539 return 0; 1540 1541 case Intrinsic::dbg_stoppoint: { 1542 MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo(); 1543 DbgStopPointInst &SPI = cast<DbgStopPointInst>(I); 1544 if (DebugInfo && SPI.getContext() && DebugInfo->Verify(SPI.getContext())) { 1545 SDOperand Ops[5]; 1546 1547 Ops[0] = getRoot(); 1548 Ops[1] = getValue(SPI.getLineValue()); 1549 Ops[2] = getValue(SPI.getColumnValue()); 1550 1551 DebugInfoDesc *DD = DebugInfo->getDescFor(SPI.getContext()); 1552 assert(DD && "Not a debug information descriptor"); 1553 CompileUnitDesc *CompileUnit = cast<CompileUnitDesc>(DD); 1554 1555 Ops[3] = DAG.getString(CompileUnit->getFileName()); 1556 Ops[4] = DAG.getString(CompileUnit->getDirectory()); 1557 1558 DAG.setRoot(DAG.getNode(ISD::LOCATION, MVT::Other, Ops, 5)); 1559 } 1560 1561 return 0; 1562 } 1563 case Intrinsic::dbg_region_start: { 1564 MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo(); 1565 DbgRegionStartInst &RSI = cast<DbgRegionStartInst>(I); 1566 if (DebugInfo && RSI.getContext() && DebugInfo->Verify(RSI.getContext())) { 1567 unsigned LabelID = DebugInfo->RecordRegionStart(RSI.getContext()); 1568 DAG.setRoot(DAG.getNode(ISD::DEBUG_LABEL, MVT::Other, getRoot(), 1569 DAG.getConstant(LabelID, MVT::i32))); 1570 } 1571 1572 return 0; 1573 } 1574 case Intrinsic::dbg_region_end: { 1575 MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo(); 1576 DbgRegionEndInst &REI = cast<DbgRegionEndInst>(I); 1577 if (DebugInfo && REI.getContext() && DebugInfo->Verify(REI.getContext())) { 1578 unsigned LabelID = DebugInfo->RecordRegionEnd(REI.getContext()); 1579 DAG.setRoot(DAG.getNode(ISD::DEBUG_LABEL, MVT::Other, 1580 getRoot(), DAG.getConstant(LabelID, MVT::i32))); 1581 } 1582 1583 return 0; 1584 } 1585 case Intrinsic::dbg_func_start: { 1586 MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo(); 1587 DbgFuncStartInst &FSI = cast<DbgFuncStartInst>(I); 1588 if (DebugInfo && FSI.getSubprogram() && 1589 DebugInfo->Verify(FSI.getSubprogram())) { 1590 unsigned LabelID = DebugInfo->RecordRegionStart(FSI.getSubprogram()); 1591 DAG.setRoot(DAG.getNode(ISD::DEBUG_LABEL, MVT::Other, 1592 getRoot(), DAG.getConstant(LabelID, MVT::i32))); 1593 } 1594 1595 return 0; 1596 } 1597 case Intrinsic::dbg_declare: { 1598 MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo(); 1599 DbgDeclareInst &DI = cast<DbgDeclareInst>(I); 1600 if (DebugInfo && DI.getVariable() && DebugInfo->Verify(DI.getVariable())) { 1601 SDOperand AddressOp = getValue(DI.getAddress()); 1602 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(AddressOp)) 1603 DebugInfo->RecordVariable(DI.getVariable(), FI->getIndex()); 1604 } 1605 1606 return 0; 1607 } 1608 1609 case Intrinsic::isunordered_f32: 1610 case Intrinsic::isunordered_f64: 1611 setValue(&I, DAG.getSetCC(MVT::i1,getValue(I.getOperand(1)), 1612 getValue(I.getOperand(2)), ISD::SETUO)); 1613 return 0; 1614 1615 case Intrinsic::sqrt_f32: 1616 case Intrinsic::sqrt_f64: 1617 setValue(&I, DAG.getNode(ISD::FSQRT, 1618 getValue(I.getOperand(1)).getValueType(), 1619 getValue(I.getOperand(1)))); 1620 return 0; 1621 case Intrinsic::powi_f32: 1622 case Intrinsic::powi_f64: 1623 setValue(&I, DAG.getNode(ISD::FPOWI, 1624 getValue(I.getOperand(1)).getValueType(), 1625 getValue(I.getOperand(1)), 1626 getValue(I.getOperand(2)))); 1627 return 0; 1628 case Intrinsic::pcmarker: { 1629 SDOperand Tmp = getValue(I.getOperand(1)); 1630 DAG.setRoot(DAG.getNode(ISD::PCMARKER, MVT::Other, getRoot(), Tmp)); 1631 return 0; 1632 } 1633 case Intrinsic::readcyclecounter: { 1634 SDOperand Op = getRoot(); 1635 SDOperand Tmp = DAG.getNode(ISD::READCYCLECOUNTER, 1636 DAG.getNodeValueTypes(MVT::i64, MVT::Other), 2, 1637 &Op, 1); 1638 setValue(&I, Tmp); 1639 DAG.setRoot(Tmp.getValue(1)); 1640 return 0; 1641 } 1642 case Intrinsic::bswap_i16: 1643 case Intrinsic::bswap_i32: 1644 case Intrinsic::bswap_i64: 1645 setValue(&I, DAG.getNode(ISD::BSWAP, 1646 getValue(I.getOperand(1)).getValueType(), 1647 getValue(I.getOperand(1)))); 1648 return 0; 1649 case Intrinsic::cttz_i8: 1650 case Intrinsic::cttz_i16: 1651 case Intrinsic::cttz_i32: 1652 case Intrinsic::cttz_i64: 1653 setValue(&I, DAG.getNode(ISD::CTTZ, 1654 getValue(I.getOperand(1)).getValueType(), 1655 getValue(I.getOperand(1)))); 1656 return 0; 1657 case Intrinsic::ctlz_i8: 1658 case Intrinsic::ctlz_i16: 1659 case Intrinsic::ctlz_i32: 1660 case Intrinsic::ctlz_i64: 1661 setValue(&I, DAG.getNode(ISD::CTLZ, 1662 getValue(I.getOperand(1)).getValueType(), 1663 getValue(I.getOperand(1)))); 1664 return 0; 1665 case Intrinsic::ctpop_i8: 1666 case Intrinsic::ctpop_i16: 1667 case Intrinsic::ctpop_i32: 1668 case Intrinsic::ctpop_i64: 1669 setValue(&I, DAG.getNode(ISD::CTPOP, 1670 getValue(I.getOperand(1)).getValueType(), 1671 getValue(I.getOperand(1)))); 1672 return 0; 1673 case Intrinsic::stacksave: { 1674 SDOperand Op = getRoot(); 1675 SDOperand Tmp = DAG.getNode(ISD::STACKSAVE, 1676 DAG.getNodeValueTypes(TLI.getPointerTy(), MVT::Other), 2, &Op, 1); 1677 setValue(&I, Tmp); 1678 DAG.setRoot(Tmp.getValue(1)); 1679 return 0; 1680 } 1681 case Intrinsic::stackrestore: { 1682 SDOperand Tmp = getValue(I.getOperand(1)); 1683 DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, MVT::Other, getRoot(), Tmp)); 1684 return 0; 1685 } 1686 case Intrinsic::prefetch: 1687 // FIXME: Currently discarding prefetches. 1688 return 0; 1689 } 1690} 1691 1692 1693void SelectionDAGLowering::visitCall(CallInst &I) { 1694 const char *RenameFn = 0; 1695 if (Function *F = I.getCalledFunction()) { 1696 if (F->isExternal()) 1697 if (unsigned IID = F->getIntrinsicID()) { 1698 RenameFn = visitIntrinsicCall(I, IID); 1699 if (!RenameFn) 1700 return; 1701 } else { // Not an LLVM intrinsic. 1702 const std::string &Name = F->getName(); 1703 if (Name[0] == 'c' && (Name == "copysign" || Name == "copysignf")) { 1704 if (I.getNumOperands() == 3 && // Basic sanity checks. 1705 I.getOperand(1)->getType()->isFloatingPoint() && 1706 I.getType() == I.getOperand(1)->getType() && 1707 I.getType() == I.getOperand(2)->getType()) { 1708 SDOperand LHS = getValue(I.getOperand(1)); 1709 SDOperand RHS = getValue(I.getOperand(2)); 1710 setValue(&I, DAG.getNode(ISD::FCOPYSIGN, LHS.getValueType(), 1711 LHS, RHS)); 1712 return; 1713 } 1714 } else if (Name[0] == 'f' && (Name == "fabs" || Name == "fabsf")) { 1715 if (I.getNumOperands() == 2 && // Basic sanity checks. 1716 I.getOperand(1)->getType()->isFloatingPoint() && 1717 I.getType() == I.getOperand(1)->getType()) { 1718 SDOperand Tmp = getValue(I.getOperand(1)); 1719 setValue(&I, DAG.getNode(ISD::FABS, Tmp.getValueType(), Tmp)); 1720 return; 1721 } 1722 } else if (Name[0] == 's' && (Name == "sin" || Name == "sinf")) { 1723 if (I.getNumOperands() == 2 && // Basic sanity checks. 1724 I.getOperand(1)->getType()->isFloatingPoint() && 1725 I.getType() == I.getOperand(1)->getType()) { 1726 SDOperand Tmp = getValue(I.getOperand(1)); 1727 setValue(&I, DAG.getNode(ISD::FSIN, Tmp.getValueType(), Tmp)); 1728 return; 1729 } 1730 } else if (Name[0] == 'c' && (Name == "cos" || Name == "cosf")) { 1731 if (I.getNumOperands() == 2 && // Basic sanity checks. 1732 I.getOperand(1)->getType()->isFloatingPoint() && 1733 I.getType() == I.getOperand(1)->getType()) { 1734 SDOperand Tmp = getValue(I.getOperand(1)); 1735 setValue(&I, DAG.getNode(ISD::FCOS, Tmp.getValueType(), Tmp)); 1736 return; 1737 } 1738 } 1739 } 1740 } else if (isa<InlineAsm>(I.getOperand(0))) { 1741 visitInlineAsm(I); 1742 return; 1743 } 1744 1745 SDOperand Callee; 1746 if (!RenameFn) 1747 Callee = getValue(I.getOperand(0)); 1748 else 1749 Callee = DAG.getExternalSymbol(RenameFn, TLI.getPointerTy()); 1750 std::vector<std::pair<SDOperand, const Type*> > Args; 1751 Args.reserve(I.getNumOperands()); 1752 for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) { 1753 Value *Arg = I.getOperand(i); 1754 SDOperand ArgNode = getValue(Arg); 1755 Args.push_back(std::make_pair(ArgNode, Arg->getType())); 1756 } 1757 1758 const PointerType *PT = cast<PointerType>(I.getCalledValue()->getType()); 1759 const FunctionType *FTy = cast<FunctionType>(PT->getElementType()); 1760 1761 std::pair<SDOperand,SDOperand> Result = 1762 TLI.LowerCallTo(getRoot(), I.getType(), FTy->isVarArg(), I.getCallingConv(), 1763 I.isTailCall(), Callee, Args, DAG); 1764 if (I.getType() != Type::VoidTy) 1765 setValue(&I, Result.first); 1766 DAG.setRoot(Result.second); 1767} 1768 1769SDOperand RegsForValue::getCopyFromRegs(SelectionDAG &DAG, 1770 SDOperand &Chain, SDOperand &Flag)const{ 1771 SDOperand Val = DAG.getCopyFromReg(Chain, Regs[0], RegVT, Flag); 1772 Chain = Val.getValue(1); 1773 Flag = Val.getValue(2); 1774 1775 // If the result was expanded, copy from the top part. 1776 if (Regs.size() > 1) { 1777 assert(Regs.size() == 2 && 1778 "Cannot expand to more than 2 elts yet!"); 1779 SDOperand Hi = DAG.getCopyFromReg(Chain, Regs[1], RegVT, Flag); 1780 Chain = Hi.getValue(1); 1781 Flag = Hi.getValue(2); 1782 if (DAG.getTargetLoweringInfo().isLittleEndian()) 1783 return DAG.getNode(ISD::BUILD_PAIR, ValueVT, Val, Hi); 1784 else 1785 return DAG.getNode(ISD::BUILD_PAIR, ValueVT, Hi, Val); 1786 } 1787 1788 // Otherwise, if the return value was promoted or extended, truncate it to the 1789 // appropriate type. 1790 if (RegVT == ValueVT) 1791 return Val; 1792 1793 if (MVT::isInteger(RegVT)) { 1794 if (ValueVT < RegVT) 1795 return DAG.getNode(ISD::TRUNCATE, ValueVT, Val); 1796 else 1797 return DAG.getNode(ISD::ANY_EXTEND, ValueVT, Val); 1798 } else { 1799 return DAG.getNode(ISD::FP_ROUND, ValueVT, Val); 1800 } 1801} 1802 1803/// getCopyToRegs - Emit a series of CopyToReg nodes that copies the 1804/// specified value into the registers specified by this object. This uses 1805/// Chain/Flag as the input and updates them for the output Chain/Flag. 1806void RegsForValue::getCopyToRegs(SDOperand Val, SelectionDAG &DAG, 1807 SDOperand &Chain, SDOperand &Flag, 1808 MVT::ValueType PtrVT) const { 1809 if (Regs.size() == 1) { 1810 // If there is a single register and the types differ, this must be 1811 // a promotion. 1812 if (RegVT != ValueVT) { 1813 if (MVT::isInteger(RegVT)) { 1814 if (RegVT < ValueVT) 1815 Val = DAG.getNode(ISD::TRUNCATE, RegVT, Val); 1816 else 1817 Val = DAG.getNode(ISD::ANY_EXTEND, RegVT, Val); 1818 } else 1819 Val = DAG.getNode(ISD::FP_EXTEND, RegVT, Val); 1820 } 1821 Chain = DAG.getCopyToReg(Chain, Regs[0], Val, Flag); 1822 Flag = Chain.getValue(1); 1823 } else { 1824 std::vector<unsigned> R(Regs); 1825 if (!DAG.getTargetLoweringInfo().isLittleEndian()) 1826 std::reverse(R.begin(), R.end()); 1827 1828 for (unsigned i = 0, e = R.size(); i != e; ++i) { 1829 SDOperand Part = DAG.getNode(ISD::EXTRACT_ELEMENT, RegVT, Val, 1830 DAG.getConstant(i, PtrVT)); 1831 Chain = DAG.getCopyToReg(Chain, R[i], Part, Flag); 1832 Flag = Chain.getValue(1); 1833 } 1834 } 1835} 1836 1837/// AddInlineAsmOperands - Add this value to the specified inlineasm node 1838/// operand list. This adds the code marker and includes the number of 1839/// values added into it. 1840void RegsForValue::AddInlineAsmOperands(unsigned Code, SelectionDAG &DAG, 1841 std::vector<SDOperand> &Ops) const { 1842 Ops.push_back(DAG.getConstant(Code | (Regs.size() << 3), MVT::i32)); 1843 for (unsigned i = 0, e = Regs.size(); i != e; ++i) 1844 Ops.push_back(DAG.getRegister(Regs[i], RegVT)); 1845} 1846 1847/// isAllocatableRegister - If the specified register is safe to allocate, 1848/// i.e. it isn't a stack pointer or some other special register, return the 1849/// register class for the register. Otherwise, return null. 1850static const TargetRegisterClass * 1851isAllocatableRegister(unsigned Reg, MachineFunction &MF, 1852 const TargetLowering &TLI, const MRegisterInfo *MRI) { 1853 MVT::ValueType FoundVT = MVT::Other; 1854 const TargetRegisterClass *FoundRC = 0; 1855 for (MRegisterInfo::regclass_iterator RCI = MRI->regclass_begin(), 1856 E = MRI->regclass_end(); RCI != E; ++RCI) { 1857 MVT::ValueType ThisVT = MVT::Other; 1858 1859 const TargetRegisterClass *RC = *RCI; 1860 // If none of the the value types for this register class are valid, we 1861 // can't use it. For example, 64-bit reg classes on 32-bit targets. 1862 for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end(); 1863 I != E; ++I) { 1864 if (TLI.isTypeLegal(*I)) { 1865 // If we have already found this register in a different register class, 1866 // choose the one with the largest VT specified. For example, on 1867 // PowerPC, we favor f64 register classes over f32. 1868 if (FoundVT == MVT::Other || 1869 MVT::getSizeInBits(FoundVT) < MVT::getSizeInBits(*I)) { 1870 ThisVT = *I; 1871 break; 1872 } 1873 } 1874 } 1875 1876 if (ThisVT == MVT::Other) continue; 1877 1878 // NOTE: This isn't ideal. In particular, this might allocate the 1879 // frame pointer in functions that need it (due to them not being taken 1880 // out of allocation, because a variable sized allocation hasn't been seen 1881 // yet). This is a slight code pessimization, but should still work. 1882 for (TargetRegisterClass::iterator I = RC->allocation_order_begin(MF), 1883 E = RC->allocation_order_end(MF); I != E; ++I) 1884 if (*I == Reg) { 1885 // We found a matching register class. Keep looking at others in case 1886 // we find one with larger registers that this physreg is also in. 1887 FoundRC = RC; 1888 FoundVT = ThisVT; 1889 break; 1890 } 1891 } 1892 return FoundRC; 1893} 1894 1895RegsForValue SelectionDAGLowering:: 1896GetRegistersForValue(const std::string &ConstrCode, 1897 MVT::ValueType VT, bool isOutReg, bool isInReg, 1898 std::set<unsigned> &OutputRegs, 1899 std::set<unsigned> &InputRegs) { 1900 std::pair<unsigned, const TargetRegisterClass*> PhysReg = 1901 TLI.getRegForInlineAsmConstraint(ConstrCode, VT); 1902 std::vector<unsigned> Regs; 1903 1904 unsigned NumRegs = VT != MVT::Other ? TLI.getNumElements(VT) : 1; 1905 MVT::ValueType RegVT; 1906 MVT::ValueType ValueVT = VT; 1907 1908 if (PhysReg.first) { 1909 if (VT == MVT::Other) 1910 ValueVT = *PhysReg.second->vt_begin(); 1911 1912 // Get the actual register value type. This is important, because the user 1913 // may have asked for (e.g.) the AX register in i32 type. We need to 1914 // remember that AX is actually i16 to get the right extension. 1915 RegVT = *PhysReg.second->vt_begin(); 1916 1917 // This is a explicit reference to a physical register. 1918 Regs.push_back(PhysReg.first); 1919 1920 // If this is an expanded reference, add the rest of the regs to Regs. 1921 if (NumRegs != 1) { 1922 TargetRegisterClass::iterator I = PhysReg.second->begin(); 1923 TargetRegisterClass::iterator E = PhysReg.second->end(); 1924 for (; *I != PhysReg.first; ++I) 1925 assert(I != E && "Didn't find reg!"); 1926 1927 // Already added the first reg. 1928 --NumRegs; ++I; 1929 for (; NumRegs; --NumRegs, ++I) { 1930 assert(I != E && "Ran out of registers to allocate!"); 1931 Regs.push_back(*I); 1932 } 1933 } 1934 return RegsForValue(Regs, RegVT, ValueVT); 1935 } 1936 1937 // This is a reference to a register class. Allocate NumRegs consecutive, 1938 // available, registers from the class. 1939 std::vector<unsigned> RegClassRegs = 1940 TLI.getRegClassForInlineAsmConstraint(ConstrCode, VT); 1941 1942 const MRegisterInfo *MRI = DAG.getTarget().getRegisterInfo(); 1943 MachineFunction &MF = *CurMBB->getParent(); 1944 unsigned NumAllocated = 0; 1945 for (unsigned i = 0, e = RegClassRegs.size(); i != e; ++i) { 1946 unsigned Reg = RegClassRegs[i]; 1947 // See if this register is available. 1948 if ((isOutReg && OutputRegs.count(Reg)) || // Already used. 1949 (isInReg && InputRegs.count(Reg))) { // Already used. 1950 // Make sure we find consecutive registers. 1951 NumAllocated = 0; 1952 continue; 1953 } 1954 1955 // Check to see if this register is allocatable (i.e. don't give out the 1956 // stack pointer). 1957 const TargetRegisterClass *RC = isAllocatableRegister(Reg, MF, TLI, MRI); 1958 if (!RC) { 1959 // Make sure we find consecutive registers. 1960 NumAllocated = 0; 1961 continue; 1962 } 1963 1964 // Okay, this register is good, we can use it. 1965 ++NumAllocated; 1966 1967 // If we allocated enough consecutive 1968 if (NumAllocated == NumRegs) { 1969 unsigned RegStart = (i-NumAllocated)+1; 1970 unsigned RegEnd = i+1; 1971 // Mark all of the allocated registers used. 1972 for (unsigned i = RegStart; i != RegEnd; ++i) { 1973 unsigned Reg = RegClassRegs[i]; 1974 Regs.push_back(Reg); 1975 if (isOutReg) OutputRegs.insert(Reg); // Mark reg used. 1976 if (isInReg) InputRegs.insert(Reg); // Mark reg used. 1977 } 1978 1979 return RegsForValue(Regs, *RC->vt_begin(), VT); 1980 } 1981 } 1982 1983 // Otherwise, we couldn't allocate enough registers for this. 1984 return RegsForValue(); 1985} 1986 1987 1988/// visitInlineAsm - Handle a call to an InlineAsm object. 1989/// 1990void SelectionDAGLowering::visitInlineAsm(CallInst &I) { 1991 InlineAsm *IA = cast<InlineAsm>(I.getOperand(0)); 1992 1993 SDOperand AsmStr = DAG.getTargetExternalSymbol(IA->getAsmString().c_str(), 1994 MVT::Other); 1995 1996 // Note, we treat inline asms both with and without side-effects as the same. 1997 // If an inline asm doesn't have side effects and doesn't access memory, we 1998 // could not choose to not chain it. 1999 bool hasSideEffects = IA->hasSideEffects(); 2000 2001 std::vector<InlineAsm::ConstraintInfo> Constraints = IA->ParseConstraints(); 2002 std::vector<MVT::ValueType> ConstraintVTs; 2003 2004 /// AsmNodeOperands - A list of pairs. The first element is a register, the 2005 /// second is a bitfield where bit #0 is set if it is a use and bit #1 is set 2006 /// if it is a def of that register. 2007 std::vector<SDOperand> AsmNodeOperands; 2008 AsmNodeOperands.push_back(SDOperand()); // reserve space for input chain 2009 AsmNodeOperands.push_back(AsmStr); 2010 2011 SDOperand Chain = getRoot(); 2012 SDOperand Flag; 2013 2014 // We fully assign registers here at isel time. This is not optimal, but 2015 // should work. For register classes that correspond to LLVM classes, we 2016 // could let the LLVM RA do its thing, but we currently don't. Do a prepass 2017 // over the constraints, collecting fixed registers that we know we can't use. 2018 std::set<unsigned> OutputRegs, InputRegs; 2019 unsigned OpNum = 1; 2020 for (unsigned i = 0, e = Constraints.size(); i != e; ++i) { 2021 assert(Constraints[i].Codes.size() == 1 && "Only handles one code so far!"); 2022 std::string &ConstraintCode = Constraints[i].Codes[0]; 2023 2024 MVT::ValueType OpVT; 2025 2026 // Compute the value type for each operand and add it to ConstraintVTs. 2027 switch (Constraints[i].Type) { 2028 case InlineAsm::isOutput: 2029 if (!Constraints[i].isIndirectOutput) { 2030 assert(I.getType() != Type::VoidTy && "Bad inline asm!"); 2031 OpVT = TLI.getValueType(I.getType()); 2032 } else { 2033 const Type *OpTy = I.getOperand(OpNum)->getType(); 2034 OpVT = TLI.getValueType(cast<PointerType>(OpTy)->getElementType()); 2035 OpNum++; // Consumes a call operand. 2036 } 2037 break; 2038 case InlineAsm::isInput: 2039 OpVT = TLI.getValueType(I.getOperand(OpNum)->getType()); 2040 OpNum++; // Consumes a call operand. 2041 break; 2042 case InlineAsm::isClobber: 2043 OpVT = MVT::Other; 2044 break; 2045 } 2046 2047 ConstraintVTs.push_back(OpVT); 2048 2049 if (TLI.getRegForInlineAsmConstraint(ConstraintCode, OpVT).first == 0) 2050 continue; // Not assigned a fixed reg. 2051 2052 // Build a list of regs that this operand uses. This always has a single 2053 // element for promoted/expanded operands. 2054 RegsForValue Regs = GetRegistersForValue(ConstraintCode, OpVT, 2055 false, false, 2056 OutputRegs, InputRegs); 2057 2058 switch (Constraints[i].Type) { 2059 case InlineAsm::isOutput: 2060 // We can't assign any other output to this register. 2061 OutputRegs.insert(Regs.Regs.begin(), Regs.Regs.end()); 2062 // If this is an early-clobber output, it cannot be assigned to the same 2063 // value as the input reg. 2064 if (Constraints[i].isEarlyClobber || Constraints[i].hasMatchingInput) 2065 InputRegs.insert(Regs.Regs.begin(), Regs.Regs.end()); 2066 break; 2067 case InlineAsm::isInput: 2068 // We can't assign any other input to this register. 2069 InputRegs.insert(Regs.Regs.begin(), Regs.Regs.end()); 2070 break; 2071 case InlineAsm::isClobber: 2072 // Clobbered regs cannot be used as inputs or outputs. 2073 InputRegs.insert(Regs.Regs.begin(), Regs.Regs.end()); 2074 OutputRegs.insert(Regs.Regs.begin(), Regs.Regs.end()); 2075 break; 2076 } 2077 } 2078 2079 // Loop over all of the inputs, copying the operand values into the 2080 // appropriate registers and processing the output regs. 2081 RegsForValue RetValRegs; 2082 std::vector<std::pair<RegsForValue, Value*> > IndirectStoresToEmit; 2083 OpNum = 1; 2084 2085 for (unsigned i = 0, e = Constraints.size(); i != e; ++i) { 2086 assert(Constraints[i].Codes.size() == 1 && "Only handles one code so far!"); 2087 std::string &ConstraintCode = Constraints[i].Codes[0]; 2088 2089 switch (Constraints[i].Type) { 2090 case InlineAsm::isOutput: { 2091 TargetLowering::ConstraintType CTy = TargetLowering::C_RegisterClass; 2092 if (ConstraintCode.size() == 1) // not a physreg name. 2093 CTy = TLI.getConstraintType(ConstraintCode[0]); 2094 2095 if (CTy == TargetLowering::C_Memory) { 2096 // Memory output. 2097 SDOperand InOperandVal = getValue(I.getOperand(OpNum)); 2098 2099 // Check that the operand (the address to store to) isn't a float. 2100 if (!MVT::isInteger(InOperandVal.getValueType())) 2101 assert(0 && "MATCH FAIL!"); 2102 2103 if (!Constraints[i].isIndirectOutput) 2104 assert(0 && "MATCH FAIL!"); 2105 2106 OpNum++; // Consumes a call operand. 2107 2108 // Extend/truncate to the right pointer type if needed. 2109 MVT::ValueType PtrType = TLI.getPointerTy(); 2110 if (InOperandVal.getValueType() < PtrType) 2111 InOperandVal = DAG.getNode(ISD::ZERO_EXTEND, PtrType, InOperandVal); 2112 else if (InOperandVal.getValueType() > PtrType) 2113 InOperandVal = DAG.getNode(ISD::TRUNCATE, PtrType, InOperandVal); 2114 2115 // Add information to the INLINEASM node to know about this output. 2116 unsigned ResOpType = 4/*MEM*/ | (1 << 3); 2117 AsmNodeOperands.push_back(DAG.getConstant(ResOpType, MVT::i32)); 2118 AsmNodeOperands.push_back(InOperandVal); 2119 break; 2120 } 2121 2122 // Otherwise, this is a register output. 2123 assert(CTy == TargetLowering::C_RegisterClass && "Unknown op type!"); 2124 2125 // If this is an early-clobber output, or if there is an input 2126 // constraint that matches this, we need to reserve the input register 2127 // so no other inputs allocate to it. 2128 bool UsesInputRegister = false; 2129 if (Constraints[i].isEarlyClobber || Constraints[i].hasMatchingInput) 2130 UsesInputRegister = true; 2131 2132 // Copy the output from the appropriate register. Find a register that 2133 // we can use. 2134 RegsForValue Regs = 2135 GetRegistersForValue(ConstraintCode, ConstraintVTs[i], 2136 true, UsesInputRegister, 2137 OutputRegs, InputRegs); 2138 assert(!Regs.Regs.empty() && "Couldn't allocate output reg!"); 2139 2140 if (!Constraints[i].isIndirectOutput) { 2141 assert(RetValRegs.Regs.empty() && 2142 "Cannot have multiple output constraints yet!"); 2143 assert(I.getType() != Type::VoidTy && "Bad inline asm!"); 2144 RetValRegs = Regs; 2145 } else { 2146 IndirectStoresToEmit.push_back(std::make_pair(Regs, 2147 I.getOperand(OpNum))); 2148 OpNum++; // Consumes a call operand. 2149 } 2150 2151 // Add information to the INLINEASM node to know that this register is 2152 // set. 2153 Regs.AddInlineAsmOperands(2 /*REGDEF*/, DAG, AsmNodeOperands); 2154 break; 2155 } 2156 case InlineAsm::isInput: { 2157 SDOperand InOperandVal = getValue(I.getOperand(OpNum)); 2158 OpNum++; // Consumes a call operand. 2159 2160 if (isdigit(ConstraintCode[0])) { // Matching constraint? 2161 // If this is required to match an output register we have already set, 2162 // just use its register. 2163 unsigned OperandNo = atoi(ConstraintCode.c_str()); 2164 2165 // Scan until we find the definition we already emitted of this operand. 2166 // When we find it, create a RegsForValue operand. 2167 unsigned CurOp = 2; // The first operand. 2168 for (; OperandNo; --OperandNo) { 2169 // Advance to the next operand. 2170 unsigned NumOps = 2171 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getValue(); 2172 assert(((NumOps & 7) == 2 /*REGDEF*/ || 2173 (NumOps & 7) == 4 /*MEM*/) && 2174 "Skipped past definitions?"); 2175 CurOp += (NumOps>>3)+1; 2176 } 2177 2178 unsigned NumOps = 2179 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getValue(); 2180 assert((NumOps & 7) == 2 /*REGDEF*/ && 2181 "Skipped past definitions?"); 2182 2183 // Add NumOps>>3 registers to MatchedRegs. 2184 RegsForValue MatchedRegs; 2185 MatchedRegs.ValueVT = InOperandVal.getValueType(); 2186 MatchedRegs.RegVT = AsmNodeOperands[CurOp+1].getValueType(); 2187 for (unsigned i = 0, e = NumOps>>3; i != e; ++i) { 2188 unsigned Reg=cast<RegisterSDNode>(AsmNodeOperands[++CurOp])->getReg(); 2189 MatchedRegs.Regs.push_back(Reg); 2190 } 2191 2192 // Use the produced MatchedRegs object to 2193 MatchedRegs.getCopyToRegs(InOperandVal, DAG, Chain, Flag, 2194 TLI.getPointerTy()); 2195 MatchedRegs.AddInlineAsmOperands(1 /*REGUSE*/, DAG, AsmNodeOperands); 2196 break; 2197 } 2198 2199 TargetLowering::ConstraintType CTy = TargetLowering::C_RegisterClass; 2200 if (ConstraintCode.size() == 1) // not a physreg name. 2201 CTy = TLI.getConstraintType(ConstraintCode[0]); 2202 2203 if (CTy == TargetLowering::C_Other) { 2204 if (!TLI.isOperandValidForConstraint(InOperandVal, ConstraintCode[0])) 2205 assert(0 && "MATCH FAIL!"); 2206 2207 // Add information to the INLINEASM node to know about this input. 2208 unsigned ResOpType = 3 /*IMM*/ | (1 << 3); 2209 AsmNodeOperands.push_back(DAG.getConstant(ResOpType, MVT::i32)); 2210 AsmNodeOperands.push_back(InOperandVal); 2211 break; 2212 } else if (CTy == TargetLowering::C_Memory) { 2213 // Memory input. 2214 2215 // Check that the operand isn't a float. 2216 if (!MVT::isInteger(InOperandVal.getValueType())) 2217 assert(0 && "MATCH FAIL!"); 2218 2219 // Extend/truncate to the right pointer type if needed. 2220 MVT::ValueType PtrType = TLI.getPointerTy(); 2221 if (InOperandVal.getValueType() < PtrType) 2222 InOperandVal = DAG.getNode(ISD::ZERO_EXTEND, PtrType, InOperandVal); 2223 else if (InOperandVal.getValueType() > PtrType) 2224 InOperandVal = DAG.getNode(ISD::TRUNCATE, PtrType, InOperandVal); 2225 2226 // Add information to the INLINEASM node to know about this input. 2227 unsigned ResOpType = 4/*MEM*/ | (1 << 3); 2228 AsmNodeOperands.push_back(DAG.getConstant(ResOpType, MVT::i32)); 2229 AsmNodeOperands.push_back(InOperandVal); 2230 break; 2231 } 2232 2233 assert(CTy == TargetLowering::C_RegisterClass && "Unknown op type!"); 2234 2235 // Copy the input into the appropriate registers. 2236 RegsForValue InRegs = 2237 GetRegistersForValue(ConstraintCode, ConstraintVTs[i], 2238 false, true, OutputRegs, InputRegs); 2239 // FIXME: should be match fail. 2240 assert(!InRegs.Regs.empty() && "Couldn't allocate input reg!"); 2241 2242 InRegs.getCopyToRegs(InOperandVal, DAG, Chain, Flag, TLI.getPointerTy()); 2243 2244 InRegs.AddInlineAsmOperands(1/*REGUSE*/, DAG, AsmNodeOperands); 2245 break; 2246 } 2247 case InlineAsm::isClobber: { 2248 RegsForValue ClobberedRegs = 2249 GetRegistersForValue(ConstraintCode, MVT::Other, false, false, 2250 OutputRegs, InputRegs); 2251 // Add the clobbered value to the operand list, so that the register 2252 // allocator is aware that the physreg got clobbered. 2253 if (!ClobberedRegs.Regs.empty()) 2254 ClobberedRegs.AddInlineAsmOperands(2/*REGDEF*/, DAG, AsmNodeOperands); 2255 break; 2256 } 2257 } 2258 } 2259 2260 // Finish up input operands. 2261 AsmNodeOperands[0] = Chain; 2262 if (Flag.Val) AsmNodeOperands.push_back(Flag); 2263 2264 Chain = DAG.getNode(ISD::INLINEASM, 2265 DAG.getNodeValueTypes(MVT::Other, MVT::Flag), 2, 2266 &AsmNodeOperands[0], AsmNodeOperands.size()); 2267 Flag = Chain.getValue(1); 2268 2269 // If this asm returns a register value, copy the result from that register 2270 // and set it as the value of the call. 2271 if (!RetValRegs.Regs.empty()) 2272 setValue(&I, RetValRegs.getCopyFromRegs(DAG, Chain, Flag)); 2273 2274 std::vector<std::pair<SDOperand, Value*> > StoresToEmit; 2275 2276 // Process indirect outputs, first output all of the flagged copies out of 2277 // physregs. 2278 for (unsigned i = 0, e = IndirectStoresToEmit.size(); i != e; ++i) { 2279 RegsForValue &OutRegs = IndirectStoresToEmit[i].first; 2280 Value *Ptr = IndirectStoresToEmit[i].second; 2281 SDOperand OutVal = OutRegs.getCopyFromRegs(DAG, Chain, Flag); 2282 StoresToEmit.push_back(std::make_pair(OutVal, Ptr)); 2283 } 2284 2285 // Emit the non-flagged stores from the physregs. 2286 SmallVector<SDOperand, 8> OutChains; 2287 for (unsigned i = 0, e = StoresToEmit.size(); i != e; ++i) 2288 OutChains.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain, 2289 StoresToEmit[i].first, 2290 getValue(StoresToEmit[i].second), 2291 DAG.getSrcValue(StoresToEmit[i].second))); 2292 if (!OutChains.empty()) 2293 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 2294 &OutChains[0], OutChains.size()); 2295 DAG.setRoot(Chain); 2296} 2297 2298 2299void SelectionDAGLowering::visitMalloc(MallocInst &I) { 2300 SDOperand Src = getValue(I.getOperand(0)); 2301 2302 MVT::ValueType IntPtr = TLI.getPointerTy(); 2303 2304 if (IntPtr < Src.getValueType()) 2305 Src = DAG.getNode(ISD::TRUNCATE, IntPtr, Src); 2306 else if (IntPtr > Src.getValueType()) 2307 Src = DAG.getNode(ISD::ZERO_EXTEND, IntPtr, Src); 2308 2309 // Scale the source by the type size. 2310 uint64_t ElementSize = TD->getTypeSize(I.getType()->getElementType()); 2311 Src = DAG.getNode(ISD::MUL, Src.getValueType(), 2312 Src, getIntPtrConstant(ElementSize)); 2313 2314 std::vector<std::pair<SDOperand, const Type*> > Args; 2315 Args.push_back(std::make_pair(Src, TLI.getTargetData()->getIntPtrType())); 2316 2317 std::pair<SDOperand,SDOperand> Result = 2318 TLI.LowerCallTo(getRoot(), I.getType(), false, CallingConv::C, true, 2319 DAG.getExternalSymbol("malloc", IntPtr), 2320 Args, DAG); 2321 setValue(&I, Result.first); // Pointers always fit in registers 2322 DAG.setRoot(Result.second); 2323} 2324 2325void SelectionDAGLowering::visitFree(FreeInst &I) { 2326 std::vector<std::pair<SDOperand, const Type*> > Args; 2327 Args.push_back(std::make_pair(getValue(I.getOperand(0)), 2328 TLI.getTargetData()->getIntPtrType())); 2329 MVT::ValueType IntPtr = TLI.getPointerTy(); 2330 std::pair<SDOperand,SDOperand> Result = 2331 TLI.LowerCallTo(getRoot(), Type::VoidTy, false, CallingConv::C, true, 2332 DAG.getExternalSymbol("free", IntPtr), Args, DAG); 2333 DAG.setRoot(Result.second); 2334} 2335 2336// InsertAtEndOfBasicBlock - This method should be implemented by targets that 2337// mark instructions with the 'usesCustomDAGSchedInserter' flag. These 2338// instructions are special in various ways, which require special support to 2339// insert. The specified MachineInstr is created but not inserted into any 2340// basic blocks, and the scheduler passes ownership of it to this method. 2341MachineBasicBlock *TargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI, 2342 MachineBasicBlock *MBB) { 2343 std::cerr << "If a target marks an instruction with " 2344 "'usesCustomDAGSchedInserter', it must implement " 2345 "TargetLowering::InsertAtEndOfBasicBlock!\n"; 2346 abort(); 2347 return 0; 2348} 2349 2350void SelectionDAGLowering::visitVAStart(CallInst &I) { 2351 DAG.setRoot(DAG.getNode(ISD::VASTART, MVT::Other, getRoot(), 2352 getValue(I.getOperand(1)), 2353 DAG.getSrcValue(I.getOperand(1)))); 2354} 2355 2356void SelectionDAGLowering::visitVAArg(VAArgInst &I) { 2357 SDOperand V = DAG.getVAArg(TLI.getValueType(I.getType()), getRoot(), 2358 getValue(I.getOperand(0)), 2359 DAG.getSrcValue(I.getOperand(0))); 2360 setValue(&I, V); 2361 DAG.setRoot(V.getValue(1)); 2362} 2363 2364void SelectionDAGLowering::visitVAEnd(CallInst &I) { 2365 DAG.setRoot(DAG.getNode(ISD::VAEND, MVT::Other, getRoot(), 2366 getValue(I.getOperand(1)), 2367 DAG.getSrcValue(I.getOperand(1)))); 2368} 2369 2370void SelectionDAGLowering::visitVACopy(CallInst &I) { 2371 DAG.setRoot(DAG.getNode(ISD::VACOPY, MVT::Other, getRoot(), 2372 getValue(I.getOperand(1)), 2373 getValue(I.getOperand(2)), 2374 DAG.getSrcValue(I.getOperand(1)), 2375 DAG.getSrcValue(I.getOperand(2)))); 2376} 2377 2378/// TargetLowering::LowerArguments - This is the default LowerArguments 2379/// implementation, which just inserts a FORMAL_ARGUMENTS node. FIXME: When all 2380/// targets are migrated to using FORMAL_ARGUMENTS, this hook should be 2381/// integrated into SDISel. 2382std::vector<SDOperand> 2383TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) { 2384 // Add CC# and isVararg as operands to the FORMAL_ARGUMENTS node. 2385 std::vector<SDOperand> Ops; 2386 Ops.push_back(DAG.getRoot()); 2387 Ops.push_back(DAG.getConstant(F.getCallingConv(), getPointerTy())); 2388 Ops.push_back(DAG.getConstant(F.isVarArg(), getPointerTy())); 2389 2390 // Add one result value for each formal argument. 2391 std::vector<MVT::ValueType> RetVals; 2392 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) { 2393 MVT::ValueType VT = getValueType(I->getType()); 2394 2395 switch (getTypeAction(VT)) { 2396 default: assert(0 && "Unknown type action!"); 2397 case Legal: 2398 RetVals.push_back(VT); 2399 break; 2400 case Promote: 2401 RetVals.push_back(getTypeToTransformTo(VT)); 2402 break; 2403 case Expand: 2404 if (VT != MVT::Vector) { 2405 // If this is a large integer, it needs to be broken up into small 2406 // integers. Figure out what the destination type is and how many small 2407 // integers it turns into. 2408 MVT::ValueType NVT = getTypeToTransformTo(VT); 2409 unsigned NumVals = MVT::getSizeInBits(VT)/MVT::getSizeInBits(NVT); 2410 for (unsigned i = 0; i != NumVals; ++i) 2411 RetVals.push_back(NVT); 2412 } else { 2413 // Otherwise, this is a vector type. We only support legal vectors 2414 // right now. 2415 unsigned NumElems = cast<PackedType>(I->getType())->getNumElements(); 2416 const Type *EltTy = cast<PackedType>(I->getType())->getElementType(); 2417 2418 // Figure out if there is a Packed type corresponding to this Vector 2419 // type. If so, convert to the packed type. 2420 MVT::ValueType TVT = MVT::getVectorType(getValueType(EltTy), NumElems); 2421 if (TVT != MVT::Other && isTypeLegal(TVT)) { 2422 RetVals.push_back(TVT); 2423 } else { 2424 assert(0 && "Don't support illegal by-val vector arguments yet!"); 2425 } 2426 } 2427 break; 2428 } 2429 } 2430 2431 RetVals.push_back(MVT::Other); 2432 2433 // Create the node. 2434 SDNode *Result = DAG.getNode(ISD::FORMAL_ARGUMENTS, 2435 DAG.getNodeValueTypes(RetVals), RetVals.size(), 2436 &Ops[0], Ops.size()).Val; 2437 2438 DAG.setRoot(SDOperand(Result, Result->getNumValues()-1)); 2439 2440 // Set up the return result vector. 2441 Ops.clear(); 2442 unsigned i = 0; 2443 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) { 2444 MVT::ValueType VT = getValueType(I->getType()); 2445 2446 switch (getTypeAction(VT)) { 2447 default: assert(0 && "Unknown type action!"); 2448 case Legal: 2449 Ops.push_back(SDOperand(Result, i++)); 2450 break; 2451 case Promote: { 2452 SDOperand Op(Result, i++); 2453 if (MVT::isInteger(VT)) { 2454 unsigned AssertOp = I->getType()->isSigned() ? ISD::AssertSext 2455 : ISD::AssertZext; 2456 Op = DAG.getNode(AssertOp, Op.getValueType(), Op, DAG.getValueType(VT)); 2457 Op = DAG.getNode(ISD::TRUNCATE, VT, Op); 2458 } else { 2459 assert(MVT::isFloatingPoint(VT) && "Not int or FP?"); 2460 Op = DAG.getNode(ISD::FP_ROUND, VT, Op); 2461 } 2462 Ops.push_back(Op); 2463 break; 2464 } 2465 case Expand: 2466 if (VT != MVT::Vector) { 2467 // If this is a large integer, it needs to be reassembled from small 2468 // integers. Figure out what the source elt type is and how many small 2469 // integers it is. 2470 MVT::ValueType NVT = getTypeToTransformTo(VT); 2471 unsigned NumVals = MVT::getSizeInBits(VT)/MVT::getSizeInBits(NVT); 2472 if (NumVals == 2) { 2473 SDOperand Lo = SDOperand(Result, i++); 2474 SDOperand Hi = SDOperand(Result, i++); 2475 2476 if (!isLittleEndian()) 2477 std::swap(Lo, Hi); 2478 2479 Ops.push_back(DAG.getNode(ISD::BUILD_PAIR, VT, Lo, Hi)); 2480 } else { 2481 // Value scalarized into many values. Unimp for now. 2482 assert(0 && "Cannot expand i64 -> i16 yet!"); 2483 } 2484 } else { 2485 // Otherwise, this is a vector type. We only support legal vectors 2486 // right now. 2487 const PackedType *PTy = cast<PackedType>(I->getType()); 2488 unsigned NumElems = PTy->getNumElements(); 2489 const Type *EltTy = PTy->getElementType(); 2490 2491 // Figure out if there is a Packed type corresponding to this Vector 2492 // type. If so, convert to the packed type. 2493 MVT::ValueType TVT = MVT::getVectorType(getValueType(EltTy), NumElems); 2494 if (TVT != MVT::Other && isTypeLegal(TVT)) { 2495 SDOperand N = SDOperand(Result, i++); 2496 // Handle copies from generic vectors to registers. 2497 N = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, N, 2498 DAG.getConstant(NumElems, MVT::i32), 2499 DAG.getValueType(getValueType(EltTy))); 2500 Ops.push_back(N); 2501 } else { 2502 assert(0 && "Don't support illegal by-val vector arguments yet!"); 2503 abort(); 2504 } 2505 } 2506 break; 2507 } 2508 } 2509 return Ops; 2510} 2511 2512 2513/// TargetLowering::LowerCallTo - This is the default LowerCallTo 2514/// implementation, which just inserts an ISD::CALL node, which is later custom 2515/// lowered by the target to something concrete. FIXME: When all targets are 2516/// migrated to using ISD::CALL, this hook should be integrated into SDISel. 2517std::pair<SDOperand, SDOperand> 2518TargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy, bool isVarArg, 2519 unsigned CallingConv, bool isTailCall, 2520 SDOperand Callee, 2521 ArgListTy &Args, SelectionDAG &DAG) { 2522 SmallVector<SDOperand, 32> Ops; 2523 Ops.push_back(Chain); // Op#0 - Chain 2524 Ops.push_back(DAG.getConstant(CallingConv, getPointerTy())); // Op#1 - CC 2525 Ops.push_back(DAG.getConstant(isVarArg, getPointerTy())); // Op#2 - VarArg 2526 Ops.push_back(DAG.getConstant(isTailCall, getPointerTy())); // Op#3 - Tail 2527 Ops.push_back(Callee); 2528 2529 // Handle all of the outgoing arguments. 2530 for (unsigned i = 0, e = Args.size(); i != e; ++i) { 2531 MVT::ValueType VT = getValueType(Args[i].second); 2532 SDOperand Op = Args[i].first; 2533 bool isSigned = Args[i].second->isSigned(); 2534 switch (getTypeAction(VT)) { 2535 default: assert(0 && "Unknown type action!"); 2536 case Legal: 2537 Ops.push_back(Op); 2538 Ops.push_back(DAG.getConstant(isSigned, MVT::i32)); 2539 break; 2540 case Promote: 2541 if (MVT::isInteger(VT)) { 2542 unsigned ExtOp = isSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 2543 Op = DAG.getNode(ExtOp, getTypeToTransformTo(VT), Op); 2544 } else { 2545 assert(MVT::isFloatingPoint(VT) && "Not int or FP?"); 2546 Op = DAG.getNode(ISD::FP_EXTEND, getTypeToTransformTo(VT), Op); 2547 } 2548 Ops.push_back(Op); 2549 Ops.push_back(DAG.getConstant(isSigned, MVT::i32)); 2550 break; 2551 case Expand: 2552 if (VT != MVT::Vector) { 2553 // If this is a large integer, it needs to be broken down into small 2554 // integers. Figure out what the source elt type is and how many small 2555 // integers it is. 2556 MVT::ValueType NVT = getTypeToTransformTo(VT); 2557 unsigned NumVals = MVT::getSizeInBits(VT)/MVT::getSizeInBits(NVT); 2558 if (NumVals == 2) { 2559 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, NVT, Op, 2560 DAG.getConstant(0, getPointerTy())); 2561 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, NVT, Op, 2562 DAG.getConstant(1, getPointerTy())); 2563 if (!isLittleEndian()) 2564 std::swap(Lo, Hi); 2565 2566 Ops.push_back(Lo); 2567 Ops.push_back(DAG.getConstant(isSigned, MVT::i32)); 2568 Ops.push_back(Hi); 2569 Ops.push_back(DAG.getConstant(isSigned, MVT::i32)); 2570 } else { 2571 // Value scalarized into many values. Unimp for now. 2572 assert(0 && "Cannot expand i64 -> i16 yet!"); 2573 } 2574 } else { 2575 // Otherwise, this is a vector type. We only support legal vectors 2576 // right now. 2577 const PackedType *PTy = cast<PackedType>(Args[i].second); 2578 unsigned NumElems = PTy->getNumElements(); 2579 const Type *EltTy = PTy->getElementType(); 2580 2581 // Figure out if there is a Packed type corresponding to this Vector 2582 // type. If so, convert to the packed type. 2583 MVT::ValueType TVT = MVT::getVectorType(getValueType(EltTy), NumElems); 2584 if (TVT != MVT::Other && isTypeLegal(TVT)) { 2585 // Insert a VBIT_CONVERT of the MVT::Vector type to the packed type. 2586 Op = DAG.getNode(ISD::VBIT_CONVERT, TVT, Op); 2587 Ops.push_back(Op); 2588 Ops.push_back(DAG.getConstant(isSigned, MVT::i32)); 2589 } else { 2590 assert(0 && "Don't support illegal by-val vector call args yet!"); 2591 abort(); 2592 } 2593 } 2594 break; 2595 } 2596 } 2597 2598 // Figure out the result value types. 2599 SmallVector<MVT::ValueType, 4> RetTys; 2600 2601 if (RetTy != Type::VoidTy) { 2602 MVT::ValueType VT = getValueType(RetTy); 2603 switch (getTypeAction(VT)) { 2604 default: assert(0 && "Unknown type action!"); 2605 case Legal: 2606 RetTys.push_back(VT); 2607 break; 2608 case Promote: 2609 RetTys.push_back(getTypeToTransformTo(VT)); 2610 break; 2611 case Expand: 2612 if (VT != MVT::Vector) { 2613 // If this is a large integer, it needs to be reassembled from small 2614 // integers. Figure out what the source elt type is and how many small 2615 // integers it is. 2616 MVT::ValueType NVT = getTypeToTransformTo(VT); 2617 unsigned NumVals = MVT::getSizeInBits(VT)/MVT::getSizeInBits(NVT); 2618 for (unsigned i = 0; i != NumVals; ++i) 2619 RetTys.push_back(NVT); 2620 } else { 2621 // Otherwise, this is a vector type. We only support legal vectors 2622 // right now. 2623 const PackedType *PTy = cast<PackedType>(RetTy); 2624 unsigned NumElems = PTy->getNumElements(); 2625 const Type *EltTy = PTy->getElementType(); 2626 2627 // Figure out if there is a Packed type corresponding to this Vector 2628 // type. If so, convert to the packed type. 2629 MVT::ValueType TVT = MVT::getVectorType(getValueType(EltTy), NumElems); 2630 if (TVT != MVT::Other && isTypeLegal(TVT)) { 2631 RetTys.push_back(TVT); 2632 } else { 2633 assert(0 && "Don't support illegal by-val vector call results yet!"); 2634 abort(); 2635 } 2636 } 2637 } 2638 } 2639 2640 RetTys.push_back(MVT::Other); // Always has a chain. 2641 2642 // Finally, create the CALL node. 2643 SDOperand Res = DAG.getNode(ISD::CALL, 2644 DAG.getVTList(&RetTys[0], RetTys.size()), 2645 &Ops[0], Ops.size()); 2646 2647 // This returns a pair of operands. The first element is the 2648 // return value for the function (if RetTy is not VoidTy). The second 2649 // element is the outgoing token chain. 2650 SDOperand ResVal; 2651 if (RetTys.size() != 1) { 2652 MVT::ValueType VT = getValueType(RetTy); 2653 if (RetTys.size() == 2) { 2654 ResVal = Res; 2655 2656 // If this value was promoted, truncate it down. 2657 if (ResVal.getValueType() != VT) { 2658 if (VT == MVT::Vector) { 2659 // Insert a VBITCONVERT to convert from the packed result type to the 2660 // MVT::Vector type. 2661 unsigned NumElems = cast<PackedType>(RetTy)->getNumElements(); 2662 const Type *EltTy = cast<PackedType>(RetTy)->getElementType(); 2663 2664 // Figure out if there is a Packed type corresponding to this Vector 2665 // type. If so, convert to the packed type. 2666 MVT::ValueType TVT = MVT::getVectorType(getValueType(EltTy), NumElems); 2667 if (TVT != MVT::Other && isTypeLegal(TVT)) { 2668 // Insert a VBIT_CONVERT of the FORMAL_ARGUMENTS to a 2669 // "N x PTyElementVT" MVT::Vector type. 2670 ResVal = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, ResVal, 2671 DAG.getConstant(NumElems, MVT::i32), 2672 DAG.getValueType(getValueType(EltTy))); 2673 } else { 2674 abort(); 2675 } 2676 } else if (MVT::isInteger(VT)) { 2677 unsigned AssertOp = RetTy->isSigned() ? 2678 ISD::AssertSext : ISD::AssertZext; 2679 ResVal = DAG.getNode(AssertOp, ResVal.getValueType(), ResVal, 2680 DAG.getValueType(VT)); 2681 ResVal = DAG.getNode(ISD::TRUNCATE, VT, ResVal); 2682 } else { 2683 assert(MVT::isFloatingPoint(VT)); 2684 ResVal = DAG.getNode(ISD::FP_ROUND, VT, ResVal); 2685 } 2686 } 2687 } else if (RetTys.size() == 3) { 2688 ResVal = DAG.getNode(ISD::BUILD_PAIR, VT, 2689 Res.getValue(0), Res.getValue(1)); 2690 2691 } else { 2692 assert(0 && "Case not handled yet!"); 2693 } 2694 } 2695 2696 return std::make_pair(ResVal, Res.getValue(Res.Val->getNumValues()-1)); 2697} 2698 2699 2700 2701// It is always conservatively correct for llvm.returnaddress and 2702// llvm.frameaddress to return 0. 2703// 2704// FIXME: Change this to insert a FRAMEADDR/RETURNADDR node, and have that be 2705// expanded to 0 if the target wants. 2706std::pair<SDOperand, SDOperand> 2707TargetLowering::LowerFrameReturnAddress(bool isFrameAddr, SDOperand Chain, 2708 unsigned Depth, SelectionDAG &DAG) { 2709 return std::make_pair(DAG.getConstant(0, getPointerTy()), Chain); 2710} 2711 2712SDOperand TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { 2713 assert(0 && "LowerOperation not implemented for this target!"); 2714 abort(); 2715 return SDOperand(); 2716} 2717 2718SDOperand TargetLowering::CustomPromoteOperation(SDOperand Op, 2719 SelectionDAG &DAG) { 2720 assert(0 && "CustomPromoteOperation not implemented for this target!"); 2721 abort(); 2722 return SDOperand(); 2723} 2724 2725void SelectionDAGLowering::visitFrameReturnAddress(CallInst &I, bool isFrame) { 2726 unsigned Depth = (unsigned)cast<ConstantUInt>(I.getOperand(1))->getValue(); 2727 std::pair<SDOperand,SDOperand> Result = 2728 TLI.LowerFrameReturnAddress(isFrame, getRoot(), Depth, DAG); 2729 setValue(&I, Result.first); 2730 DAG.setRoot(Result.second); 2731} 2732 2733/// getMemsetValue - Vectorized representation of the memset value 2734/// operand. 2735static SDOperand getMemsetValue(SDOperand Value, MVT::ValueType VT, 2736 SelectionDAG &DAG) { 2737 MVT::ValueType CurVT = VT; 2738 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) { 2739 uint64_t Val = C->getValue() & 255; 2740 unsigned Shift = 8; 2741 while (CurVT != MVT::i8) { 2742 Val = (Val << Shift) | Val; 2743 Shift <<= 1; 2744 CurVT = (MVT::ValueType)((unsigned)CurVT - 1); 2745 } 2746 return DAG.getConstant(Val, VT); 2747 } else { 2748 Value = DAG.getNode(ISD::ZERO_EXTEND, VT, Value); 2749 unsigned Shift = 8; 2750 while (CurVT != MVT::i8) { 2751 Value = 2752 DAG.getNode(ISD::OR, VT, 2753 DAG.getNode(ISD::SHL, VT, Value, 2754 DAG.getConstant(Shift, MVT::i8)), Value); 2755 Shift <<= 1; 2756 CurVT = (MVT::ValueType)((unsigned)CurVT - 1); 2757 } 2758 2759 return Value; 2760 } 2761} 2762 2763/// getMemsetStringVal - Similar to getMemsetValue. Except this is only 2764/// used when a memcpy is turned into a memset when the source is a constant 2765/// string ptr. 2766static SDOperand getMemsetStringVal(MVT::ValueType VT, 2767 SelectionDAG &DAG, TargetLowering &TLI, 2768 std::string &Str, unsigned Offset) { 2769 MVT::ValueType CurVT = VT; 2770 uint64_t Val = 0; 2771 unsigned MSB = getSizeInBits(VT) / 8; 2772 if (TLI.isLittleEndian()) 2773 Offset = Offset + MSB - 1; 2774 for (unsigned i = 0; i != MSB; ++i) { 2775 Val = (Val << 8) | Str[Offset]; 2776 Offset += TLI.isLittleEndian() ? -1 : 1; 2777 } 2778 return DAG.getConstant(Val, VT); 2779} 2780 2781/// getMemBasePlusOffset - Returns base and offset node for the 2782static SDOperand getMemBasePlusOffset(SDOperand Base, unsigned Offset, 2783 SelectionDAG &DAG, TargetLowering &TLI) { 2784 MVT::ValueType VT = Base.getValueType(); 2785 return DAG.getNode(ISD::ADD, VT, Base, DAG.getConstant(Offset, VT)); 2786} 2787 2788/// MeetsMaxMemopRequirement - Determines if the number of memory ops required 2789/// to replace the memset / memcpy is below the threshold. It also returns the 2790/// types of the sequence of memory ops to perform memset / memcpy. 2791static bool MeetsMaxMemopRequirement(std::vector<MVT::ValueType> &MemOps, 2792 unsigned Limit, uint64_t Size, 2793 unsigned Align, TargetLowering &TLI) { 2794 MVT::ValueType VT; 2795 2796 if (TLI.allowsUnalignedMemoryAccesses()) { 2797 VT = MVT::i64; 2798 } else { 2799 switch (Align & 7) { 2800 case 0: 2801 VT = MVT::i64; 2802 break; 2803 case 4: 2804 VT = MVT::i32; 2805 break; 2806 case 2: 2807 VT = MVT::i16; 2808 break; 2809 default: 2810 VT = MVT::i8; 2811 break; 2812 } 2813 } 2814 2815 MVT::ValueType LVT = MVT::i64; 2816 while (!TLI.isTypeLegal(LVT)) 2817 LVT = (MVT::ValueType)((unsigned)LVT - 1); 2818 assert(MVT::isInteger(LVT)); 2819 2820 if (VT > LVT) 2821 VT = LVT; 2822 2823 unsigned NumMemOps = 0; 2824 while (Size != 0) { 2825 unsigned VTSize = getSizeInBits(VT) / 8; 2826 while (VTSize > Size) { 2827 VT = (MVT::ValueType)((unsigned)VT - 1); 2828 VTSize >>= 1; 2829 } 2830 assert(MVT::isInteger(VT)); 2831 2832 if (++NumMemOps > Limit) 2833 return false; 2834 MemOps.push_back(VT); 2835 Size -= VTSize; 2836 } 2837 2838 return true; 2839} 2840 2841void SelectionDAGLowering::visitMemIntrinsic(CallInst &I, unsigned Op) { 2842 SDOperand Op1 = getValue(I.getOperand(1)); 2843 SDOperand Op2 = getValue(I.getOperand(2)); 2844 SDOperand Op3 = getValue(I.getOperand(3)); 2845 SDOperand Op4 = getValue(I.getOperand(4)); 2846 unsigned Align = (unsigned)cast<ConstantSDNode>(Op4)->getValue(); 2847 if (Align == 0) Align = 1; 2848 2849 if (ConstantSDNode *Size = dyn_cast<ConstantSDNode>(Op3)) { 2850 std::vector<MVT::ValueType> MemOps; 2851 2852 // Expand memset / memcpy to a series of load / store ops 2853 // if the size operand falls below a certain threshold. 2854 SmallVector<SDOperand, 8> OutChains; 2855 switch (Op) { 2856 default: break; // Do nothing for now. 2857 case ISD::MEMSET: { 2858 if (MeetsMaxMemopRequirement(MemOps, TLI.getMaxStoresPerMemset(), 2859 Size->getValue(), Align, TLI)) { 2860 unsigned NumMemOps = MemOps.size(); 2861 unsigned Offset = 0; 2862 for (unsigned i = 0; i < NumMemOps; i++) { 2863 MVT::ValueType VT = MemOps[i]; 2864 unsigned VTSize = getSizeInBits(VT) / 8; 2865 SDOperand Value = getMemsetValue(Op2, VT, DAG); 2866 SDOperand Store = DAG.getNode(ISD::STORE, MVT::Other, getRoot(), 2867 Value, 2868 getMemBasePlusOffset(Op1, Offset, DAG, TLI), 2869 DAG.getSrcValue(I.getOperand(1), Offset)); 2870 OutChains.push_back(Store); 2871 Offset += VTSize; 2872 } 2873 } 2874 break; 2875 } 2876 case ISD::MEMCPY: { 2877 if (MeetsMaxMemopRequirement(MemOps, TLI.getMaxStoresPerMemcpy(), 2878 Size->getValue(), Align, TLI)) { 2879 unsigned NumMemOps = MemOps.size(); 2880 unsigned SrcOff = 0, DstOff = 0, SrcDelta = 0; 2881 GlobalAddressSDNode *G = NULL; 2882 std::string Str; 2883 bool CopyFromStr = false; 2884 2885 if (Op2.getOpcode() == ISD::GlobalAddress) 2886 G = cast<GlobalAddressSDNode>(Op2); 2887 else if (Op2.getOpcode() == ISD::ADD && 2888 Op2.getOperand(0).getOpcode() == ISD::GlobalAddress && 2889 Op2.getOperand(1).getOpcode() == ISD::Constant) { 2890 G = cast<GlobalAddressSDNode>(Op2.getOperand(0)); 2891 SrcDelta = cast<ConstantSDNode>(Op2.getOperand(1))->getValue(); 2892 } 2893 if (G) { 2894 GlobalVariable *GV = dyn_cast<GlobalVariable>(G->getGlobal()); 2895 if (GV) { 2896 Str = GV->getStringValue(false); 2897 if (!Str.empty()) { 2898 CopyFromStr = true; 2899 SrcOff += SrcDelta; 2900 } 2901 } 2902 } 2903 2904 for (unsigned i = 0; i < NumMemOps; i++) { 2905 MVT::ValueType VT = MemOps[i]; 2906 unsigned VTSize = getSizeInBits(VT) / 8; 2907 SDOperand Value, Chain, Store; 2908 2909 if (CopyFromStr) { 2910 Value = getMemsetStringVal(VT, DAG, TLI, Str, SrcOff); 2911 Chain = getRoot(); 2912 Store = 2913 DAG.getNode(ISD::STORE, MVT::Other, Chain, Value, 2914 getMemBasePlusOffset(Op1, DstOff, DAG, TLI), 2915 DAG.getSrcValue(I.getOperand(1), DstOff)); 2916 } else { 2917 Value = DAG.getLoad(VT, getRoot(), 2918 getMemBasePlusOffset(Op2, SrcOff, DAG, TLI), 2919 DAG.getSrcValue(I.getOperand(2), SrcOff)); 2920 Chain = Value.getValue(1); 2921 Store = 2922 DAG.getNode(ISD::STORE, MVT::Other, Chain, Value, 2923 getMemBasePlusOffset(Op1, DstOff, DAG, TLI), 2924 DAG.getSrcValue(I.getOperand(1), DstOff)); 2925 } 2926 OutChains.push_back(Store); 2927 SrcOff += VTSize; 2928 DstOff += VTSize; 2929 } 2930 } 2931 break; 2932 } 2933 } 2934 2935 if (!OutChains.empty()) { 2936 DAG.setRoot(DAG.getNode(ISD::TokenFactor, MVT::Other, 2937 &OutChains[0], OutChains.size())); 2938 return; 2939 } 2940 } 2941 2942 DAG.setRoot(DAG.getNode(Op, MVT::Other, getRoot(), Op1, Op2, Op3, Op4)); 2943} 2944 2945//===----------------------------------------------------------------------===// 2946// SelectionDAGISel code 2947//===----------------------------------------------------------------------===// 2948 2949unsigned SelectionDAGISel::MakeReg(MVT::ValueType VT) { 2950 return RegMap->createVirtualRegister(TLI.getRegClassFor(VT)); 2951} 2952 2953void SelectionDAGISel::getAnalysisUsage(AnalysisUsage &AU) const { 2954 // FIXME: we only modify the CFG to split critical edges. This 2955 // updates dom and loop info. 2956} 2957 2958 2959/// OptimizeNoopCopyExpression - We have determined that the specified cast 2960/// instruction is a noop copy (e.g. it's casting from one pointer type to 2961/// another, int->uint, or int->sbyte on PPC. 2962/// 2963/// Return true if any changes are made. 2964static bool OptimizeNoopCopyExpression(CastInst *CI) { 2965 BasicBlock *DefBB = CI->getParent(); 2966 2967 /// InsertedCasts - Only insert a cast in each block once. 2968 std::map<BasicBlock*, CastInst*> InsertedCasts; 2969 2970 bool MadeChange = false; 2971 for (Value::use_iterator UI = CI->use_begin(), E = CI->use_end(); 2972 UI != E; ) { 2973 Use &TheUse = UI.getUse(); 2974 Instruction *User = cast<Instruction>(*UI); 2975 2976 // Figure out which BB this cast is used in. For PHI's this is the 2977 // appropriate predecessor block. 2978 BasicBlock *UserBB = User->getParent(); 2979 if (PHINode *PN = dyn_cast<PHINode>(User)) { 2980 unsigned OpVal = UI.getOperandNo()/2; 2981 UserBB = PN->getIncomingBlock(OpVal); 2982 } 2983 2984 // Preincrement use iterator so we don't invalidate it. 2985 ++UI; 2986 2987 // If this user is in the same block as the cast, don't change the cast. 2988 if (UserBB == DefBB) continue; 2989 2990 // If we have already inserted a cast into this block, use it. 2991 CastInst *&InsertedCast = InsertedCasts[UserBB]; 2992 2993 if (!InsertedCast) { 2994 BasicBlock::iterator InsertPt = UserBB->begin(); 2995 while (isa<PHINode>(InsertPt)) ++InsertPt; 2996 2997 InsertedCast = 2998 new CastInst(CI->getOperand(0), CI->getType(), "", InsertPt); 2999 MadeChange = true; 3000 } 3001 3002 // Replace a use of the cast with a use of the new casat. 3003 TheUse = InsertedCast; 3004 } 3005 3006 // If we removed all uses, nuke the cast. 3007 if (CI->use_empty()) 3008 CI->eraseFromParent(); 3009 3010 return MadeChange; 3011} 3012 3013/// InsertGEPComputeCode - Insert code into BB to compute Ptr+PtrOffset, 3014/// casting to the type of GEPI. 3015static Instruction *InsertGEPComputeCode(Instruction *&V, BasicBlock *BB, 3016 Instruction *GEPI, Value *Ptr, 3017 Value *PtrOffset) { 3018 if (V) return V; // Already computed. 3019 3020 BasicBlock::iterator InsertPt; 3021 if (BB == GEPI->getParent()) { 3022 // If insert into the GEP's block, insert right after the GEP. 3023 InsertPt = GEPI; 3024 ++InsertPt; 3025 } else { 3026 // Otherwise, insert at the top of BB, after any PHI nodes 3027 InsertPt = BB->begin(); 3028 while (isa<PHINode>(InsertPt)) ++InsertPt; 3029 } 3030 3031 // If Ptr is itself a cast, but in some other BB, emit a copy of the cast into 3032 // BB so that there is only one value live across basic blocks (the cast 3033 // operand). 3034 if (CastInst *CI = dyn_cast<CastInst>(Ptr)) 3035 if (CI->getParent() != BB && isa<PointerType>(CI->getOperand(0)->getType())) 3036 Ptr = new CastInst(CI->getOperand(0), CI->getType(), "", InsertPt); 3037 3038 // Add the offset, cast it to the right type. 3039 Ptr = BinaryOperator::createAdd(Ptr, PtrOffset, "", InsertPt); 3040 return V = new CastInst(Ptr, GEPI->getType(), "", InsertPt); 3041} 3042 3043/// ReplaceUsesOfGEPInst - Replace all uses of RepPtr with inserted code to 3044/// compute its value. The RepPtr value can be computed with Ptr+PtrOffset. One 3045/// trivial way of doing this would be to evaluate Ptr+PtrOffset in RepPtr's 3046/// block, then ReplaceAllUsesWith'ing everything. However, we would prefer to 3047/// sink PtrOffset into user blocks where doing so will likely allow us to fold 3048/// the constant add into a load or store instruction. Additionally, if a user 3049/// is a pointer-pointer cast, we look through it to find its users. 3050static void ReplaceUsesOfGEPInst(Instruction *RepPtr, Value *Ptr, 3051 Constant *PtrOffset, BasicBlock *DefBB, 3052 GetElementPtrInst *GEPI, 3053 std::map<BasicBlock*,Instruction*> &InsertedExprs) { 3054 while (!RepPtr->use_empty()) { 3055 Instruction *User = cast<Instruction>(RepPtr->use_back()); 3056 3057 // If the user is a Pointer-Pointer cast, recurse. 3058 if (isa<CastInst>(User) && isa<PointerType>(User->getType())) { 3059 ReplaceUsesOfGEPInst(User, Ptr, PtrOffset, DefBB, GEPI, InsertedExprs); 3060 3061 // Drop the use of RepPtr. The cast is dead. Don't delete it now, else we 3062 // could invalidate an iterator. 3063 User->setOperand(0, UndefValue::get(RepPtr->getType())); 3064 continue; 3065 } 3066 3067 // If this is a load of the pointer, or a store through the pointer, emit 3068 // the increment into the load/store block. 3069 Instruction *NewVal; 3070 if (isa<LoadInst>(User) || 3071 (isa<StoreInst>(User) && User->getOperand(0) != RepPtr)) { 3072 NewVal = InsertGEPComputeCode(InsertedExprs[User->getParent()], 3073 User->getParent(), GEPI, 3074 Ptr, PtrOffset); 3075 } else { 3076 // If this use is not foldable into the addressing mode, use a version 3077 // emitted in the GEP block. 3078 NewVal = InsertGEPComputeCode(InsertedExprs[DefBB], DefBB, GEPI, 3079 Ptr, PtrOffset); 3080 } 3081 3082 if (GEPI->getType() != RepPtr->getType()) { 3083 BasicBlock::iterator IP = NewVal; 3084 ++IP; 3085 NewVal = new CastInst(NewVal, RepPtr->getType(), "", IP); 3086 } 3087 User->replaceUsesOfWith(RepPtr, NewVal); 3088 } 3089} 3090 3091 3092/// OptimizeGEPExpression - Since we are doing basic-block-at-a-time instruction 3093/// selection, we want to be a bit careful about some things. In particular, if 3094/// we have a GEP instruction that is used in a different block than it is 3095/// defined, the addressing expression of the GEP cannot be folded into loads or 3096/// stores that use it. In this case, decompose the GEP and move constant 3097/// indices into blocks that use it. 3098static bool OptimizeGEPExpression(GetElementPtrInst *GEPI, 3099 const TargetData *TD) { 3100 // If this GEP is only used inside the block it is defined in, there is no 3101 // need to rewrite it. 3102 bool isUsedOutsideDefBB = false; 3103 BasicBlock *DefBB = GEPI->getParent(); 3104 for (Value::use_iterator UI = GEPI->use_begin(), E = GEPI->use_end(); 3105 UI != E; ++UI) { 3106 if (cast<Instruction>(*UI)->getParent() != DefBB) { 3107 isUsedOutsideDefBB = true; 3108 break; 3109 } 3110 } 3111 if (!isUsedOutsideDefBB) return false; 3112 3113 // If this GEP has no non-zero constant indices, there is nothing we can do, 3114 // ignore it. 3115 bool hasConstantIndex = false; 3116 bool hasVariableIndex = false; 3117 for (GetElementPtrInst::op_iterator OI = GEPI->op_begin()+1, 3118 E = GEPI->op_end(); OI != E; ++OI) { 3119 if (ConstantInt *CI = dyn_cast<ConstantInt>(*OI)) { 3120 if (CI->getRawValue()) { 3121 hasConstantIndex = true; 3122 break; 3123 } 3124 } else { 3125 hasVariableIndex = true; 3126 } 3127 } 3128 3129 // If this is a "GEP X, 0, 0, 0", turn this into a cast. 3130 if (!hasConstantIndex && !hasVariableIndex) { 3131 Value *NC = new CastInst(GEPI->getOperand(0), GEPI->getType(), 3132 GEPI->getName(), GEPI); 3133 GEPI->replaceAllUsesWith(NC); 3134 GEPI->eraseFromParent(); 3135 return true; 3136 } 3137 3138 // If this is a GEP &Alloca, 0, 0, forward subst the frame index into uses. 3139 if (!hasConstantIndex && !isa<AllocaInst>(GEPI->getOperand(0))) 3140 return false; 3141 3142 // Otherwise, decompose the GEP instruction into multiplies and adds. Sum the 3143 // constant offset (which we now know is non-zero) and deal with it later. 3144 uint64_t ConstantOffset = 0; 3145 const Type *UIntPtrTy = TD->getIntPtrType(); 3146 Value *Ptr = new CastInst(GEPI->getOperand(0), UIntPtrTy, "", GEPI); 3147 const Type *Ty = GEPI->getOperand(0)->getType(); 3148 3149 for (GetElementPtrInst::op_iterator OI = GEPI->op_begin()+1, 3150 E = GEPI->op_end(); OI != E; ++OI) { 3151 Value *Idx = *OI; 3152 if (const StructType *StTy = dyn_cast<StructType>(Ty)) { 3153 unsigned Field = cast<ConstantUInt>(Idx)->getValue(); 3154 if (Field) 3155 ConstantOffset += TD->getStructLayout(StTy)->MemberOffsets[Field]; 3156 Ty = StTy->getElementType(Field); 3157 } else { 3158 Ty = cast<SequentialType>(Ty)->getElementType(); 3159 3160 // Handle constant subscripts. 3161 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) { 3162 if (CI->getRawValue() == 0) continue; 3163 3164 if (ConstantSInt *CSI = dyn_cast<ConstantSInt>(CI)) 3165 ConstantOffset += (int64_t)TD->getTypeSize(Ty)*CSI->getValue(); 3166 else 3167 ConstantOffset+=TD->getTypeSize(Ty)*cast<ConstantUInt>(CI)->getValue(); 3168 continue; 3169 } 3170 3171 // Ptr = Ptr + Idx * ElementSize; 3172 3173 // Cast Idx to UIntPtrTy if needed. 3174 Idx = new CastInst(Idx, UIntPtrTy, "", GEPI); 3175 3176 uint64_t ElementSize = TD->getTypeSize(Ty); 3177 // Mask off bits that should not be set. 3178 ElementSize &= ~0ULL >> (64-UIntPtrTy->getPrimitiveSizeInBits()); 3179 Constant *SizeCst = ConstantUInt::get(UIntPtrTy, ElementSize); 3180 3181 // Multiply by the element size and add to the base. 3182 Idx = BinaryOperator::createMul(Idx, SizeCst, "", GEPI); 3183 Ptr = BinaryOperator::createAdd(Ptr, Idx, "", GEPI); 3184 } 3185 } 3186 3187 // Make sure that the offset fits in uintptr_t. 3188 ConstantOffset &= ~0ULL >> (64-UIntPtrTy->getPrimitiveSizeInBits()); 3189 Constant *PtrOffset = ConstantUInt::get(UIntPtrTy, ConstantOffset); 3190 3191 // Okay, we have now emitted all of the variable index parts to the BB that 3192 // the GEP is defined in. Loop over all of the using instructions, inserting 3193 // an "add Ptr, ConstantOffset" into each block that uses it and update the 3194 // instruction to use the newly computed value, making GEPI dead. When the 3195 // user is a load or store instruction address, we emit the add into the user 3196 // block, otherwise we use a canonical version right next to the gep (these 3197 // won't be foldable as addresses, so we might as well share the computation). 3198 3199 std::map<BasicBlock*,Instruction*> InsertedExprs; 3200 ReplaceUsesOfGEPInst(GEPI, Ptr, PtrOffset, DefBB, GEPI, InsertedExprs); 3201 3202 // Finally, the GEP is dead, remove it. 3203 GEPI->eraseFromParent(); 3204 3205 return true; 3206} 3207 3208/// SplitCritEdgesForPHIConstants - If this block has any PHI nodes with 3209/// constant operands, and if any of the edges feeding the PHI node are 3210/// critical, split them so that the assignments of a constant to a register 3211/// will not be executed on a path that isn't relevant. 3212void SelectionDAGISel::SplitCritEdgesForPHIConstants(BasicBlock *BB) { 3213 PHINode *PN; 3214 BasicBlock::iterator BBI = BB->begin(); 3215 while ((PN = dyn_cast<PHINode>(BBI++))) { 3216 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 3217 if (isa<Constant>(PN->getIncomingValue(i))) 3218 SplitCriticalEdge(PN->getIncomingBlock(i), BB); 3219 } 3220} 3221 3222 3223bool SelectionDAGISel::runOnFunction(Function &Fn) { 3224 MachineFunction &MF = MachineFunction::construct(&Fn, TLI.getTargetMachine()); 3225 RegMap = MF.getSSARegMap(); 3226 DEBUG(std::cerr << "\n\n\n=== " << Fn.getName() << "\n"); 3227 3228 // First, split all critical edges for PHI nodes with incoming values that are 3229 // constants, this way the load of the constant into a vreg will not be placed 3230 // into MBBs that are used some other way. 3231 // 3232 // In this pass we also look for GEP and cast instructions that are used 3233 // across basic blocks and rewrite them to improve basic-block-at-a-time 3234 // selection. 3235 // 3236 // 3237 bool MadeChange = true; 3238 while (MadeChange) { 3239 MadeChange = false; 3240 for (Function::iterator BB = Fn.begin(), E = Fn.end(); BB != E; ++BB) { 3241 // If this block has any PHI nodes with constant operands, and if any of the 3242 // edges feeding the PHI node are critical, split them. 3243 if (isa<PHINode>(BB->begin())) 3244 SplitCritEdgesForPHIConstants(BB); 3245 3246 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) { 3247 Instruction *I = BBI++; 3248 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) { 3249 MadeChange |= OptimizeGEPExpression(GEPI, TLI.getTargetData()); 3250 } else if (CastInst *CI = dyn_cast<CastInst>(I)) { 3251 // If the source of the cast is a constant, then this should have 3252 // already been constant folded. The only reason NOT to constant fold 3253 // it is if something (e.g. LSR) was careful to place the constant 3254 // evaluation in a block other than then one that uses it (e.g. to hoist 3255 // the address of globals out of a loop). If this is the case, we don't 3256 // want to forward-subst the cast. 3257 if (isa<Constant>(CI->getOperand(0))) 3258 continue; 3259 3260 // If this is a noop copy, sink it into user blocks to reduce the number 3261 // of virtual registers that must be created and coallesced. 3262 MVT::ValueType SrcVT = TLI.getValueType(CI->getOperand(0)->getType()); 3263 MVT::ValueType DstVT = TLI.getValueType(CI->getType()); 3264 3265 // This is an fp<->int conversion? 3266 if (MVT::isInteger(SrcVT) != MVT::isInteger(DstVT)) 3267 continue; 3268 3269 // If this is an extension, it will be a zero or sign extension, which 3270 // isn't a noop. 3271 if (SrcVT < DstVT) continue; 3272 3273 // If these values will be promoted, find out what they will be promoted 3274 // to. This helps us consider truncates on PPC as noop copies when they 3275 // are. 3276 if (TLI.getTypeAction(SrcVT) == TargetLowering::Promote) 3277 SrcVT = TLI.getTypeToTransformTo(SrcVT); 3278 if (TLI.getTypeAction(DstVT) == TargetLowering::Promote) 3279 DstVT = TLI.getTypeToTransformTo(DstVT); 3280 3281 // If, after promotion, these are the same types, this is a noop copy. 3282 if (SrcVT == DstVT) 3283 MadeChange |= OptimizeNoopCopyExpression(CI); 3284 } 3285 } 3286 } 3287 } 3288 3289 FunctionLoweringInfo FuncInfo(TLI, Fn, MF); 3290 3291 for (Function::iterator I = Fn.begin(), E = Fn.end(); I != E; ++I) 3292 SelectBasicBlock(I, MF, FuncInfo); 3293 3294 return true; 3295} 3296 3297 3298SDOperand SelectionDAGISel:: 3299CopyValueToVirtualRegister(SelectionDAGLowering &SDL, Value *V, unsigned Reg) { 3300 SDOperand Op = SDL.getValue(V); 3301 assert((Op.getOpcode() != ISD::CopyFromReg || 3302 cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) && 3303 "Copy from a reg to the same reg!"); 3304 3305 // If this type is not legal, we must make sure to not create an invalid 3306 // register use. 3307 MVT::ValueType SrcVT = Op.getValueType(); 3308 MVT::ValueType DestVT = TLI.getTypeToTransformTo(SrcVT); 3309 SelectionDAG &DAG = SDL.DAG; 3310 if (SrcVT == DestVT) { 3311 return DAG.getCopyToReg(SDL.getRoot(), Reg, Op); 3312 } else if (SrcVT == MVT::Vector) { 3313 // Handle copies from generic vectors to registers. 3314 MVT::ValueType PTyElementVT, PTyLegalElementVT; 3315 unsigned NE = TLI.getPackedTypeBreakdown(cast<PackedType>(V->getType()), 3316 PTyElementVT, PTyLegalElementVT); 3317 3318 // Insert a VBIT_CONVERT of the input vector to a "N x PTyElementVT" 3319 // MVT::Vector type. 3320 Op = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, Op, 3321 DAG.getConstant(NE, MVT::i32), 3322 DAG.getValueType(PTyElementVT)); 3323 3324 // Loop over all of the elements of the resultant vector, 3325 // VEXTRACT_VECTOR_ELT'ing them, converting them to PTyLegalElementVT, then 3326 // copying them into output registers. 3327 SmallVector<SDOperand, 8> OutChains; 3328 SDOperand Root = SDL.getRoot(); 3329 for (unsigned i = 0; i != NE; ++i) { 3330 SDOperand Elt = DAG.getNode(ISD::VEXTRACT_VECTOR_ELT, PTyElementVT, 3331 Op, DAG.getConstant(i, TLI.getPointerTy())); 3332 if (PTyElementVT == PTyLegalElementVT) { 3333 // Elements are legal. 3334 OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Elt)); 3335 } else if (PTyLegalElementVT > PTyElementVT) { 3336 // Elements are promoted. 3337 if (MVT::isFloatingPoint(PTyLegalElementVT)) 3338 Elt = DAG.getNode(ISD::FP_EXTEND, PTyLegalElementVT, Elt); 3339 else 3340 Elt = DAG.getNode(ISD::ANY_EXTEND, PTyLegalElementVT, Elt); 3341 OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Elt)); 3342 } else { 3343 // Elements are expanded. 3344 // The src value is expanded into multiple registers. 3345 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, PTyLegalElementVT, 3346 Elt, DAG.getConstant(0, TLI.getPointerTy())); 3347 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, PTyLegalElementVT, 3348 Elt, DAG.getConstant(1, TLI.getPointerTy())); 3349 OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Lo)); 3350 OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Hi)); 3351 } 3352 } 3353 return DAG.getNode(ISD::TokenFactor, MVT::Other, 3354 &OutChains[0], OutChains.size()); 3355 } else if (SrcVT < DestVT) { 3356 // The src value is promoted to the register. 3357 if (MVT::isFloatingPoint(SrcVT)) 3358 Op = DAG.getNode(ISD::FP_EXTEND, DestVT, Op); 3359 else 3360 Op = DAG.getNode(ISD::ANY_EXTEND, DestVT, Op); 3361 return DAG.getCopyToReg(SDL.getRoot(), Reg, Op); 3362 } else { 3363 // The src value is expanded into multiple registers. 3364 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DestVT, 3365 Op, DAG.getConstant(0, TLI.getPointerTy())); 3366 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DestVT, 3367 Op, DAG.getConstant(1, TLI.getPointerTy())); 3368 Op = DAG.getCopyToReg(SDL.getRoot(), Reg, Lo); 3369 return DAG.getCopyToReg(Op, Reg+1, Hi); 3370 } 3371} 3372 3373void SelectionDAGISel:: 3374LowerArguments(BasicBlock *BB, SelectionDAGLowering &SDL, 3375 std::vector<SDOperand> &UnorderedChains) { 3376 // If this is the entry block, emit arguments. 3377 Function &F = *BB->getParent(); 3378 FunctionLoweringInfo &FuncInfo = SDL.FuncInfo; 3379 SDOperand OldRoot = SDL.DAG.getRoot(); 3380 std::vector<SDOperand> Args = TLI.LowerArguments(F, SDL.DAG); 3381 3382 unsigned a = 0; 3383 for (Function::arg_iterator AI = F.arg_begin(), E = F.arg_end(); 3384 AI != E; ++AI, ++a) 3385 if (!AI->use_empty()) { 3386 SDL.setValue(AI, Args[a]); 3387 3388 // If this argument is live outside of the entry block, insert a copy from 3389 // whereever we got it to the vreg that other BB's will reference it as. 3390 if (FuncInfo.ValueMap.count(AI)) { 3391 SDOperand Copy = 3392 CopyValueToVirtualRegister(SDL, AI, FuncInfo.ValueMap[AI]); 3393 UnorderedChains.push_back(Copy); 3394 } 3395 } 3396 3397 // Finally, if the target has anything special to do, allow it to do so. 3398 // FIXME: this should insert code into the DAG! 3399 EmitFunctionEntryCode(F, SDL.DAG.getMachineFunction()); 3400} 3401 3402void SelectionDAGISel::BuildSelectionDAG(SelectionDAG &DAG, BasicBlock *LLVMBB, 3403 std::vector<std::pair<MachineInstr*, unsigned> > &PHINodesToUpdate, 3404 FunctionLoweringInfo &FuncInfo) { 3405 SelectionDAGLowering SDL(DAG, TLI, FuncInfo); 3406 3407 std::vector<SDOperand> UnorderedChains; 3408 3409 // Lower any arguments needed in this block if this is the entry block. 3410 if (LLVMBB == &LLVMBB->getParent()->front()) 3411 LowerArguments(LLVMBB, SDL, UnorderedChains); 3412 3413 BB = FuncInfo.MBBMap[LLVMBB]; 3414 SDL.setCurrentBasicBlock(BB); 3415 3416 // Lower all of the non-terminator instructions. 3417 for (BasicBlock::iterator I = LLVMBB->begin(), E = --LLVMBB->end(); 3418 I != E; ++I) 3419 SDL.visit(*I); 3420 3421 // Ensure that all instructions which are used outside of their defining 3422 // blocks are available as virtual registers. 3423 for (BasicBlock::iterator I = LLVMBB->begin(), E = LLVMBB->end(); I != E;++I) 3424 if (!I->use_empty() && !isa<PHINode>(I)) { 3425 std::map<const Value*, unsigned>::iterator VMI =FuncInfo.ValueMap.find(I); 3426 if (VMI != FuncInfo.ValueMap.end()) 3427 UnorderedChains.push_back( 3428 CopyValueToVirtualRegister(SDL, I, VMI->second)); 3429 } 3430 3431 // Handle PHI nodes in successor blocks. Emit code into the SelectionDAG to 3432 // ensure constants are generated when needed. Remember the virtual registers 3433 // that need to be added to the Machine PHI nodes as input. We cannot just 3434 // directly add them, because expansion might result in multiple MBB's for one 3435 // BB. As such, the start of the BB might correspond to a different MBB than 3436 // the end. 3437 // 3438 3439 // Emit constants only once even if used by multiple PHI nodes. 3440 std::map<Constant*, unsigned> ConstantsOut; 3441 3442 // Check successor nodes PHI nodes that expect a constant to be available from 3443 // this block. 3444 TerminatorInst *TI = LLVMBB->getTerminator(); 3445 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) { 3446 BasicBlock *SuccBB = TI->getSuccessor(succ); 3447 if (!isa<PHINode>(SuccBB->begin())) continue; 3448 3449 MachineBasicBlock::iterator MBBI = FuncInfo.MBBMap[SuccBB]->begin(); 3450 PHINode *PN; 3451 3452 // At this point we know that there is a 1-1 correspondence between LLVM PHI 3453 // nodes and Machine PHI nodes, but the incoming operands have not been 3454 // emitted yet. 3455 for (BasicBlock::iterator I = SuccBB->begin(); 3456 (PN = dyn_cast<PHINode>(I)); ++I) 3457 if (!PN->use_empty()) { 3458 unsigned Reg; 3459 Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB); 3460 if (Constant *C = dyn_cast<Constant>(PHIOp)) { 3461 unsigned &RegOut = ConstantsOut[C]; 3462 if (RegOut == 0) { 3463 RegOut = FuncInfo.CreateRegForValue(C); 3464 UnorderedChains.push_back( 3465 CopyValueToVirtualRegister(SDL, C, RegOut)); 3466 } 3467 Reg = RegOut; 3468 } else { 3469 Reg = FuncInfo.ValueMap[PHIOp]; 3470 if (Reg == 0) { 3471 assert(isa<AllocaInst>(PHIOp) && 3472 FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) && 3473 "Didn't codegen value into a register!??"); 3474 Reg = FuncInfo.CreateRegForValue(PHIOp); 3475 UnorderedChains.push_back( 3476 CopyValueToVirtualRegister(SDL, PHIOp, Reg)); 3477 } 3478 } 3479 3480 // Remember that this register needs to added to the machine PHI node as 3481 // the input for this MBB. 3482 MVT::ValueType VT = TLI.getValueType(PN->getType()); 3483 unsigned NumElements; 3484 if (VT != MVT::Vector) 3485 NumElements = TLI.getNumElements(VT); 3486 else { 3487 MVT::ValueType VT1,VT2; 3488 NumElements = 3489 TLI.getPackedTypeBreakdown(cast<PackedType>(PN->getType()), 3490 VT1, VT2); 3491 } 3492 for (unsigned i = 0, e = NumElements; i != e; ++i) 3493 PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg+i)); 3494 } 3495 } 3496 ConstantsOut.clear(); 3497 3498 // Turn all of the unordered chains into one factored node. 3499 if (!UnorderedChains.empty()) { 3500 SDOperand Root = SDL.getRoot(); 3501 if (Root.getOpcode() != ISD::EntryToken) { 3502 unsigned i = 0, e = UnorderedChains.size(); 3503 for (; i != e; ++i) { 3504 assert(UnorderedChains[i].Val->getNumOperands() > 1); 3505 if (UnorderedChains[i].Val->getOperand(0) == Root) 3506 break; // Don't add the root if we already indirectly depend on it. 3507 } 3508 3509 if (i == e) 3510 UnorderedChains.push_back(Root); 3511 } 3512 DAG.setRoot(DAG.getNode(ISD::TokenFactor, MVT::Other, 3513 &UnorderedChains[0], UnorderedChains.size())); 3514 } 3515 3516 // Lower the terminator after the copies are emitted. 3517 SDL.visit(*LLVMBB->getTerminator()); 3518 3519 // Copy over any CaseBlock records that may now exist due to SwitchInst 3520 // lowering, as well as any jump table information. 3521 SwitchCases.clear(); 3522 SwitchCases = SDL.SwitchCases; 3523 JT = SDL.JT; 3524 3525 // Make sure the root of the DAG is up-to-date. 3526 DAG.setRoot(SDL.getRoot()); 3527} 3528 3529void SelectionDAGISel::CodeGenAndEmitDAG(SelectionDAG &DAG) { 3530 // Run the DAG combiner in pre-legalize mode. 3531 DAG.Combine(false); 3532 3533 DEBUG(std::cerr << "Lowered selection DAG:\n"); 3534 DEBUG(DAG.dump()); 3535 3536 // Second step, hack on the DAG until it only uses operations and types that 3537 // the target supports. 3538 DAG.Legalize(); 3539 3540 DEBUG(std::cerr << "Legalized selection DAG:\n"); 3541 DEBUG(DAG.dump()); 3542 3543 // Run the DAG combiner in post-legalize mode. 3544 DAG.Combine(true); 3545 3546 if (ViewISelDAGs) DAG.viewGraph(); 3547 3548 // Third, instruction select all of the operations to machine code, adding the 3549 // code to the MachineBasicBlock. 3550 InstructionSelectBasicBlock(DAG); 3551 3552 DEBUG(std::cerr << "Selected machine code:\n"); 3553 DEBUG(BB->dump()); 3554} 3555 3556void SelectionDAGISel::SelectBasicBlock(BasicBlock *LLVMBB, MachineFunction &MF, 3557 FunctionLoweringInfo &FuncInfo) { 3558 std::vector<std::pair<MachineInstr*, unsigned> > PHINodesToUpdate; 3559 { 3560 SelectionDAG DAG(TLI, MF, getAnalysisToUpdate<MachineDebugInfo>()); 3561 CurDAG = &DAG; 3562 3563 // First step, lower LLVM code to some DAG. This DAG may use operations and 3564 // types that are not supported by the target. 3565 BuildSelectionDAG(DAG, LLVMBB, PHINodesToUpdate, FuncInfo); 3566 3567 // Second step, emit the lowered DAG as machine code. 3568 CodeGenAndEmitDAG(DAG); 3569 } 3570 3571 // Next, now that we know what the last MBB the LLVM BB expanded is, update 3572 // PHI nodes in successors. 3573 if (SwitchCases.empty() && JT.Reg == 0) { 3574 for (unsigned i = 0, e = PHINodesToUpdate.size(); i != e; ++i) { 3575 MachineInstr *PHI = PHINodesToUpdate[i].first; 3576 assert(PHI->getOpcode() == TargetInstrInfo::PHI && 3577 "This is not a machine PHI node that we are updating!"); 3578 PHI->addRegOperand(PHINodesToUpdate[i].second, false); 3579 PHI->addMachineBasicBlockOperand(BB); 3580 } 3581 return; 3582 } 3583 3584 // If the JumpTable record is filled in, then we need to emit a jump table. 3585 // Updating the PHI nodes is tricky in this case, since we need to determine 3586 // whether the PHI is a successor of the range check MBB or the jump table MBB 3587 if (JT.Reg) { 3588 assert(SwitchCases.empty() && "Cannot have jump table and lowered switch"); 3589 SelectionDAG SDAG(TLI, MF, getAnalysisToUpdate<MachineDebugInfo>()); 3590 CurDAG = &SDAG; 3591 SelectionDAGLowering SDL(SDAG, TLI, FuncInfo); 3592 MachineBasicBlock *RangeBB = BB; 3593 // Set the current basic block to the mbb we wish to insert the code into 3594 BB = JT.MBB; 3595 SDL.setCurrentBasicBlock(BB); 3596 // Emit the code 3597 SDL.visitJumpTable(JT); 3598 SDAG.setRoot(SDL.getRoot()); 3599 CodeGenAndEmitDAG(SDAG); 3600 // Update PHI Nodes 3601 for (unsigned pi = 0, pe = PHINodesToUpdate.size(); pi != pe; ++pi) { 3602 MachineInstr *PHI = PHINodesToUpdate[pi].first; 3603 MachineBasicBlock *PHIBB = PHI->getParent(); 3604 assert(PHI->getOpcode() == TargetInstrInfo::PHI && 3605 "This is not a machine PHI node that we are updating!"); 3606 if (PHIBB == JT.Default) { 3607 PHI->addRegOperand(PHINodesToUpdate[pi].second, false); 3608 PHI->addMachineBasicBlockOperand(RangeBB); 3609 } 3610 if (BB->succ_end() != std::find(BB->succ_begin(),BB->succ_end(), PHIBB)) { 3611 PHI->addRegOperand(PHINodesToUpdate[pi].second, false); 3612 PHI->addMachineBasicBlockOperand(BB); 3613 } 3614 } 3615 return; 3616 } 3617 3618 // If we generated any switch lowering information, build and codegen any 3619 // additional DAGs necessary. 3620 for (unsigned i = 0, e = SwitchCases.size(); i != e; ++i) { 3621 SelectionDAG SDAG(TLI, MF, getAnalysisToUpdate<MachineDebugInfo>()); 3622 CurDAG = &SDAG; 3623 SelectionDAGLowering SDL(SDAG, TLI, FuncInfo); 3624 3625 // Set the current basic block to the mbb we wish to insert the code into 3626 BB = SwitchCases[i].ThisBB; 3627 SDL.setCurrentBasicBlock(BB); 3628 3629 // Emit the code 3630 SDL.visitSwitchCase(SwitchCases[i]); 3631 SDAG.setRoot(SDL.getRoot()); 3632 CodeGenAndEmitDAG(SDAG); 3633 3634 // Handle any PHI nodes in successors of this chunk, as if we were coming 3635 // from the original BB before switch expansion. Note that PHI nodes can 3636 // occur multiple times in PHINodesToUpdate. We have to be very careful to 3637 // handle them the right number of times. 3638 while ((BB = SwitchCases[i].LHSBB)) { // Handle LHS and RHS. 3639 for (MachineBasicBlock::iterator Phi = BB->begin(); 3640 Phi != BB->end() && Phi->getOpcode() == TargetInstrInfo::PHI; ++Phi){ 3641 // This value for this PHI node is recorded in PHINodesToUpdate, get it. 3642 for (unsigned pn = 0; ; ++pn) { 3643 assert(pn != PHINodesToUpdate.size() && "Didn't find PHI entry!"); 3644 if (PHINodesToUpdate[pn].first == Phi) { 3645 Phi->addRegOperand(PHINodesToUpdate[pn].second, false); 3646 Phi->addMachineBasicBlockOperand(SwitchCases[i].ThisBB); 3647 break; 3648 } 3649 } 3650 } 3651 3652 // Don't process RHS if same block as LHS. 3653 if (BB == SwitchCases[i].RHSBB) 3654 SwitchCases[i].RHSBB = 0; 3655 3656 // If we haven't handled the RHS, do so now. Otherwise, we're done. 3657 SwitchCases[i].LHSBB = SwitchCases[i].RHSBB; 3658 SwitchCases[i].RHSBB = 0; 3659 } 3660 assert(SwitchCases[i].LHSBB == 0 && SwitchCases[i].RHSBB == 0); 3661 } 3662} 3663 3664 3665//===----------------------------------------------------------------------===// 3666/// ScheduleAndEmitDAG - Pick a safe ordering and emit instructions for each 3667/// target node in the graph. 3668void SelectionDAGISel::ScheduleAndEmitDAG(SelectionDAG &DAG) { 3669 if (ViewSchedDAGs) DAG.viewGraph(); 3670 3671 RegisterScheduler::FunctionPassCtor Ctor = RegisterScheduler::getDefault(); 3672 3673 if (!Ctor) { 3674 Ctor = ISHeuristic; 3675 RegisterScheduler::setDefault(Ctor); 3676 } 3677 3678 ScheduleDAG *SL = Ctor(this, &DAG, BB); 3679 BB = SL->Run(); 3680 delete SL; 3681} 3682 3683 3684HazardRecognizer *SelectionDAGISel::CreateTargetHazardRecognizer() { 3685 return new HazardRecognizer(); 3686} 3687 3688 3689/// SelectInlineAsmMemoryOperands - Calls to this are automatically generated 3690/// by tblgen. Others should not call it. 3691void SelectionDAGISel:: 3692SelectInlineAsmMemoryOperands(std::vector<SDOperand> &Ops, SelectionDAG &DAG) { 3693 std::vector<SDOperand> InOps; 3694 std::swap(InOps, Ops); 3695 3696 Ops.push_back(InOps[0]); // input chain. 3697 Ops.push_back(InOps[1]); // input asm string. 3698 3699 unsigned i = 2, e = InOps.size(); 3700 if (InOps[e-1].getValueType() == MVT::Flag) 3701 --e; // Don't process a flag operand if it is here. 3702 3703 while (i != e) { 3704 unsigned Flags = cast<ConstantSDNode>(InOps[i])->getValue(); 3705 if ((Flags & 7) != 4 /*MEM*/) { 3706 // Just skip over this operand, copying the operands verbatim. 3707 Ops.insert(Ops.end(), InOps.begin()+i, InOps.begin()+i+(Flags >> 3) + 1); 3708 i += (Flags >> 3) + 1; 3709 } else { 3710 assert((Flags >> 3) == 1 && "Memory operand with multiple values?"); 3711 // Otherwise, this is a memory operand. Ask the target to select it. 3712 std::vector<SDOperand> SelOps; 3713 if (SelectInlineAsmMemoryOperand(InOps[i+1], 'm', SelOps, DAG)) { 3714 std::cerr << "Could not match memory address. Inline asm failure!\n"; 3715 exit(1); 3716 } 3717 3718 // Add this to the output node. 3719 Ops.push_back(DAG.getConstant(4/*MEM*/ | (SelOps.size() << 3), MVT::i32)); 3720 Ops.insert(Ops.end(), SelOps.begin(), SelOps.end()); 3721 i += 2; 3722 } 3723 } 3724 3725 // Add the flag input back if present. 3726 if (e != InOps.size()) 3727 Ops.push_back(InOps.back()); 3728} 3729