SelectionDAGISel.cpp revision ecb7a77885b174cf4d001a9b48533b3979e7810d
1//===-- SelectionDAGISel.cpp - Implement the SelectionDAGISel class -------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file was developed by the LLVM research group and is distributed under 6// the University of Illinois Open Source License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This implements the SelectionDAGISel class. 11// 12//===----------------------------------------------------------------------===// 13 14#define DEBUG_TYPE "isel" 15#include "llvm/Analysis/AliasAnalysis.h" 16#include "llvm/Analysis/LoopInfo.h" 17#include "llvm/CodeGen/SelectionDAGISel.h" 18#include "llvm/CodeGen/ScheduleDAG.h" 19#include "llvm/CallingConv.h" 20#include "llvm/Constants.h" 21#include "llvm/DerivedTypes.h" 22#include "llvm/Function.h" 23#include "llvm/GlobalVariable.h" 24#include "llvm/InlineAsm.h" 25#include "llvm/Instructions.h" 26#include "llvm/Intrinsics.h" 27#include "llvm/IntrinsicInst.h" 28#include "llvm/CodeGen/MachineModuleInfo.h" 29#include "llvm/CodeGen/MachineFunction.h" 30#include "llvm/CodeGen/MachineFrameInfo.h" 31#include "llvm/CodeGen/MachineJumpTableInfo.h" 32#include "llvm/CodeGen/MachineInstrBuilder.h" 33#include "llvm/CodeGen/SchedulerRegistry.h" 34#include "llvm/CodeGen/SelectionDAG.h" 35#include "llvm/CodeGen/SSARegMap.h" 36#include "llvm/Target/MRegisterInfo.h" 37#include "llvm/Target/TargetAsmInfo.h" 38#include "llvm/Target/TargetData.h" 39#include "llvm/Target/TargetFrameInfo.h" 40#include "llvm/Target/TargetInstrInfo.h" 41#include "llvm/Target/TargetLowering.h" 42#include "llvm/Target/TargetMachine.h" 43#include "llvm/Target/TargetOptions.h" 44#include "llvm/Transforms/Utils/BasicBlockUtils.h" 45#include "llvm/Support/MathExtras.h" 46#include "llvm/Support/Debug.h" 47#include "llvm/Support/Compiler.h" 48#include <algorithm> 49using namespace llvm; 50 51#ifndef NDEBUG 52static cl::opt<bool> 53ViewISelDAGs("view-isel-dags", cl::Hidden, 54 cl::desc("Pop up a window to show isel dags as they are selected")); 55static cl::opt<bool> 56ViewSchedDAGs("view-sched-dags", cl::Hidden, 57 cl::desc("Pop up a window to show sched dags as they are processed")); 58#else 59static const bool ViewISelDAGs = 0, ViewSchedDAGs = 0; 60#endif 61 62//===---------------------------------------------------------------------===// 63/// 64/// RegisterScheduler class - Track the registration of instruction schedulers. 65/// 66//===---------------------------------------------------------------------===// 67MachinePassRegistry RegisterScheduler::Registry; 68 69//===---------------------------------------------------------------------===// 70/// 71/// ISHeuristic command line option for instruction schedulers. 72/// 73//===---------------------------------------------------------------------===// 74namespace { 75 cl::opt<RegisterScheduler::FunctionPassCtor, false, 76 RegisterPassParser<RegisterScheduler> > 77 ISHeuristic("sched", 78 cl::init(&createDefaultScheduler), 79 cl::desc("Instruction schedulers available:")); 80 81 static RegisterScheduler 82 defaultListDAGScheduler("default", " Best scheduler for the target", 83 createDefaultScheduler); 84} // namespace 85 86namespace { 87 /// RegsForValue - This struct represents the physical registers that a 88 /// particular value is assigned and the type information about the value. 89 /// This is needed because values can be promoted into larger registers and 90 /// expanded into multiple smaller registers than the value. 91 struct VISIBILITY_HIDDEN RegsForValue { 92 /// Regs - This list hold the register (for legal and promoted values) 93 /// or register set (for expanded values) that the value should be assigned 94 /// to. 95 std::vector<unsigned> Regs; 96 97 /// RegVT - The value type of each register. 98 /// 99 MVT::ValueType RegVT; 100 101 /// ValueVT - The value type of the LLVM value, which may be promoted from 102 /// RegVT or made from merging the two expanded parts. 103 MVT::ValueType ValueVT; 104 105 RegsForValue() : RegVT(MVT::Other), ValueVT(MVT::Other) {} 106 107 RegsForValue(unsigned Reg, MVT::ValueType regvt, MVT::ValueType valuevt) 108 : RegVT(regvt), ValueVT(valuevt) { 109 Regs.push_back(Reg); 110 } 111 RegsForValue(const std::vector<unsigned> ®s, 112 MVT::ValueType regvt, MVT::ValueType valuevt) 113 : Regs(regs), RegVT(regvt), ValueVT(valuevt) { 114 } 115 116 /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from 117 /// this value and returns the result as a ValueVT value. This uses 118 /// Chain/Flag as the input and updates them for the output Chain/Flag. 119 SDOperand getCopyFromRegs(SelectionDAG &DAG, 120 SDOperand &Chain, SDOperand &Flag) const; 121 122 /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the 123 /// specified value into the registers specified by this object. This uses 124 /// Chain/Flag as the input and updates them for the output Chain/Flag. 125 void getCopyToRegs(SDOperand Val, SelectionDAG &DAG, 126 SDOperand &Chain, SDOperand &Flag, 127 MVT::ValueType PtrVT) const; 128 129 /// AddInlineAsmOperands - Add this value to the specified inlineasm node 130 /// operand list. This adds the code marker and includes the number of 131 /// values added into it. 132 void AddInlineAsmOperands(unsigned Code, SelectionDAG &DAG, 133 std::vector<SDOperand> &Ops) const; 134 }; 135} 136 137namespace llvm { 138 //===--------------------------------------------------------------------===// 139 /// createDefaultScheduler - This creates an instruction scheduler appropriate 140 /// for the target. 141 ScheduleDAG* createDefaultScheduler(SelectionDAGISel *IS, 142 SelectionDAG *DAG, 143 MachineBasicBlock *BB) { 144 TargetLowering &TLI = IS->getTargetLowering(); 145 146 if (TLI.getSchedulingPreference() == TargetLowering::SchedulingForLatency) { 147 return createTDListDAGScheduler(IS, DAG, BB); 148 } else { 149 assert(TLI.getSchedulingPreference() == 150 TargetLowering::SchedulingForRegPressure && "Unknown sched type!"); 151 return createBURRListDAGScheduler(IS, DAG, BB); 152 } 153 } 154 155 156 //===--------------------------------------------------------------------===// 157 /// FunctionLoweringInfo - This contains information that is global to a 158 /// function that is used when lowering a region of the function. 159 class FunctionLoweringInfo { 160 public: 161 TargetLowering &TLI; 162 Function &Fn; 163 MachineFunction &MF; 164 SSARegMap *RegMap; 165 166 FunctionLoweringInfo(TargetLowering &TLI, Function &Fn,MachineFunction &MF); 167 168 /// MBBMap - A mapping from LLVM basic blocks to their machine code entry. 169 std::map<const BasicBlock*, MachineBasicBlock *> MBBMap; 170 171 /// ValueMap - Since we emit code for the function a basic block at a time, 172 /// we must remember which virtual registers hold the values for 173 /// cross-basic-block values. 174 DenseMap<const Value*, unsigned> ValueMap; 175 176 /// StaticAllocaMap - Keep track of frame indices for fixed sized allocas in 177 /// the entry block. This allows the allocas to be efficiently referenced 178 /// anywhere in the function. 179 std::map<const AllocaInst*, int> StaticAllocaMap; 180 181 unsigned MakeReg(MVT::ValueType VT) { 182 return RegMap->createVirtualRegister(TLI.getRegClassFor(VT)); 183 } 184 185 /// isExportedInst - Return true if the specified value is an instruction 186 /// exported from its block. 187 bool isExportedInst(const Value *V) { 188 return ValueMap.count(V); 189 } 190 191 unsigned CreateRegForValue(const Value *V); 192 193 unsigned InitializeRegForValue(const Value *V) { 194 unsigned &R = ValueMap[V]; 195 assert(R == 0 && "Already initialized this value register!"); 196 return R = CreateRegForValue(V); 197 } 198 }; 199} 200 201/// isUsedOutsideOfDefiningBlock - Return true if this instruction is used by 202/// PHI nodes or outside of the basic block that defines it, or used by a 203/// switch instruction, which may expand to multiple basic blocks. 204static bool isUsedOutsideOfDefiningBlock(Instruction *I) { 205 if (isa<PHINode>(I)) return true; 206 BasicBlock *BB = I->getParent(); 207 for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E; ++UI) 208 if (cast<Instruction>(*UI)->getParent() != BB || isa<PHINode>(*UI) || 209 // FIXME: Remove switchinst special case. 210 isa<SwitchInst>(*UI)) 211 return true; 212 return false; 213} 214 215/// isOnlyUsedInEntryBlock - If the specified argument is only used in the 216/// entry block, return true. This includes arguments used by switches, since 217/// the switch may expand into multiple basic blocks. 218static bool isOnlyUsedInEntryBlock(Argument *A) { 219 BasicBlock *Entry = A->getParent()->begin(); 220 for (Value::use_iterator UI = A->use_begin(), E = A->use_end(); UI != E; ++UI) 221 if (cast<Instruction>(*UI)->getParent() != Entry || isa<SwitchInst>(*UI)) 222 return false; // Use not in entry block. 223 return true; 224} 225 226FunctionLoweringInfo::FunctionLoweringInfo(TargetLowering &tli, 227 Function &fn, MachineFunction &mf) 228 : TLI(tli), Fn(fn), MF(mf), RegMap(MF.getSSARegMap()) { 229 230 // Create a vreg for each argument register that is not dead and is used 231 // outside of the entry block for the function. 232 for (Function::arg_iterator AI = Fn.arg_begin(), E = Fn.arg_end(); 233 AI != E; ++AI) 234 if (!isOnlyUsedInEntryBlock(AI)) 235 InitializeRegForValue(AI); 236 237 // Initialize the mapping of values to registers. This is only set up for 238 // instruction values that are used outside of the block that defines 239 // them. 240 Function::iterator BB = Fn.begin(), EB = Fn.end(); 241 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) 242 if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) 243 if (ConstantInt *CUI = dyn_cast<ConstantInt>(AI->getArraySize())) { 244 const Type *Ty = AI->getAllocatedType(); 245 uint64_t TySize = TLI.getTargetData()->getTypeSize(Ty); 246 unsigned Align = 247 std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty), 248 AI->getAlignment()); 249 250 TySize *= CUI->getZExtValue(); // Get total allocated size. 251 if (TySize == 0) TySize = 1; // Don't create zero-sized stack objects. 252 StaticAllocaMap[AI] = 253 MF.getFrameInfo()->CreateStackObject((unsigned)TySize, Align); 254 } 255 256 for (; BB != EB; ++BB) 257 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) 258 if (!I->use_empty() && isUsedOutsideOfDefiningBlock(I)) 259 if (!isa<AllocaInst>(I) || 260 !StaticAllocaMap.count(cast<AllocaInst>(I))) 261 InitializeRegForValue(I); 262 263 // Create an initial MachineBasicBlock for each LLVM BasicBlock in F. This 264 // also creates the initial PHI MachineInstrs, though none of the input 265 // operands are populated. 266 for (BB = Fn.begin(), EB = Fn.end(); BB != EB; ++BB) { 267 MachineBasicBlock *MBB = new MachineBasicBlock(BB); 268 MBBMap[BB] = MBB; 269 MF.getBasicBlockList().push_back(MBB); 270 271 // Create Machine PHI nodes for LLVM PHI nodes, lowering them as 272 // appropriate. 273 PHINode *PN; 274 for (BasicBlock::iterator I = BB->begin();(PN = dyn_cast<PHINode>(I)); ++I){ 275 if (PN->use_empty()) continue; 276 277 MVT::ValueType VT = TLI.getValueType(PN->getType()); 278 unsigned NumElements; 279 if (VT != MVT::Vector) 280 NumElements = TLI.getNumElements(VT); 281 else { 282 MVT::ValueType VT1,VT2; 283 NumElements = 284 TLI.getVectorTypeBreakdown(cast<VectorType>(PN->getType()), 285 VT1, VT2); 286 } 287 unsigned PHIReg = ValueMap[PN]; 288 assert(PHIReg && "PHI node does not have an assigned virtual register!"); 289 const TargetInstrInfo *TII = TLI.getTargetMachine().getInstrInfo(); 290 for (unsigned i = 0; i != NumElements; ++i) 291 BuildMI(MBB, TII->get(TargetInstrInfo::PHI), PHIReg+i); 292 } 293 } 294} 295 296/// CreateRegForValue - Allocate the appropriate number of virtual registers of 297/// the correctly promoted or expanded types. Assign these registers 298/// consecutive vreg numbers and return the first assigned number. 299unsigned FunctionLoweringInfo::CreateRegForValue(const Value *V) { 300 MVT::ValueType VT = TLI.getValueType(V->getType()); 301 302 // The number of multiples of registers that we need, to, e.g., split up 303 // a <2 x int64> -> 4 x i32 registers. 304 unsigned NumVectorRegs = 1; 305 306 // If this is a vector type, figure out what type it will decompose into 307 // and how many of the elements it will use. 308 if (VT == MVT::Vector) { 309 const VectorType *PTy = cast<VectorType>(V->getType()); 310 unsigned NumElts = PTy->getNumElements(); 311 MVT::ValueType EltTy = TLI.getValueType(PTy->getElementType()); 312 313 // Divide the input until we get to a supported size. This will always 314 // end with a scalar if the target doesn't support vectors. 315 while (NumElts > 1 && !TLI.isTypeLegal(getVectorType(EltTy, NumElts))) { 316 NumElts >>= 1; 317 NumVectorRegs <<= 1; 318 } 319 if (NumElts == 1) 320 VT = EltTy; 321 else 322 VT = getVectorType(EltTy, NumElts); 323 } 324 325 // The common case is that we will only create one register for this 326 // value. If we have that case, create and return the virtual register. 327 unsigned NV = TLI.getNumElements(VT); 328 if (NV == 1) { 329 // If we are promoting this value, pick the next largest supported type. 330 MVT::ValueType PromotedType = TLI.getTypeToTransformTo(VT); 331 unsigned Reg = MakeReg(PromotedType); 332 // If this is a vector of supported or promoted types (e.g. 4 x i16), 333 // create all of the registers. 334 for (unsigned i = 1; i != NumVectorRegs; ++i) 335 MakeReg(PromotedType); 336 return Reg; 337 } 338 339 // If this value is represented with multiple target registers, make sure 340 // to create enough consecutive registers of the right (smaller) type. 341 VT = TLI.getTypeToExpandTo(VT); 342 unsigned R = MakeReg(VT); 343 for (unsigned i = 1; i != NV*NumVectorRegs; ++i) 344 MakeReg(VT); 345 return R; 346} 347 348//===----------------------------------------------------------------------===// 349/// SelectionDAGLowering - This is the common target-independent lowering 350/// implementation that is parameterized by a TargetLowering object. 351/// Also, targets can overload any lowering method. 352/// 353namespace llvm { 354class SelectionDAGLowering { 355 MachineBasicBlock *CurMBB; 356 357 DenseMap<const Value*, SDOperand> NodeMap; 358 359 /// PendingLoads - Loads are not emitted to the program immediately. We bunch 360 /// them up and then emit token factor nodes when possible. This allows us to 361 /// get simple disambiguation between loads without worrying about alias 362 /// analysis. 363 std::vector<SDOperand> PendingLoads; 364 365 /// Case - A pair of values to record the Value for a switch case, and the 366 /// case's target basic block. 367 typedef std::pair<Constant*, MachineBasicBlock*> Case; 368 typedef std::vector<Case>::iterator CaseItr; 369 typedef std::pair<CaseItr, CaseItr> CaseRange; 370 371 /// CaseRec - A struct with ctor used in lowering switches to a binary tree 372 /// of conditional branches. 373 struct CaseRec { 374 CaseRec(MachineBasicBlock *bb, Constant *lt, Constant *ge, CaseRange r) : 375 CaseBB(bb), LT(lt), GE(ge), Range(r) {} 376 377 /// CaseBB - The MBB in which to emit the compare and branch 378 MachineBasicBlock *CaseBB; 379 /// LT, GE - If nonzero, we know the current case value must be less-than or 380 /// greater-than-or-equal-to these Constants. 381 Constant *LT; 382 Constant *GE; 383 /// Range - A pair of iterators representing the range of case values to be 384 /// processed at this point in the binary search tree. 385 CaseRange Range; 386 }; 387 388 /// The comparison function for sorting Case values. 389 struct CaseCmp { 390 bool operator () (const Case& C1, const Case& C2) { 391 assert(isa<ConstantInt>(C1.first) && isa<ConstantInt>(C2.first)); 392 return cast<const ConstantInt>(C1.first)->getSExtValue() < 393 cast<const ConstantInt>(C2.first)->getSExtValue(); 394 } 395 }; 396 397public: 398 // TLI - This is information that describes the available target features we 399 // need for lowering. This indicates when operations are unavailable, 400 // implemented with a libcall, etc. 401 TargetLowering &TLI; 402 SelectionDAG &DAG; 403 const TargetData *TD; 404 405 /// SwitchCases - Vector of CaseBlock structures used to communicate 406 /// SwitchInst code generation information. 407 std::vector<SelectionDAGISel::CaseBlock> SwitchCases; 408 SelectionDAGISel::JumpTable JT; 409 410 /// FuncInfo - Information about the function as a whole. 411 /// 412 FunctionLoweringInfo &FuncInfo; 413 414 SelectionDAGLowering(SelectionDAG &dag, TargetLowering &tli, 415 FunctionLoweringInfo &funcinfo) 416 : TLI(tli), DAG(dag), TD(DAG.getTarget().getTargetData()), 417 JT(0,0,0,0), FuncInfo(funcinfo) { 418 } 419 420 /// getRoot - Return the current virtual root of the Selection DAG. 421 /// 422 SDOperand getRoot() { 423 if (PendingLoads.empty()) 424 return DAG.getRoot(); 425 426 if (PendingLoads.size() == 1) { 427 SDOperand Root = PendingLoads[0]; 428 DAG.setRoot(Root); 429 PendingLoads.clear(); 430 return Root; 431 } 432 433 // Otherwise, we have to make a token factor node. 434 SDOperand Root = DAG.getNode(ISD::TokenFactor, MVT::Other, 435 &PendingLoads[0], PendingLoads.size()); 436 PendingLoads.clear(); 437 DAG.setRoot(Root); 438 return Root; 439 } 440 441 SDOperand CopyValueToVirtualRegister(Value *V, unsigned Reg); 442 443 void visit(Instruction &I) { visit(I.getOpcode(), I); } 444 445 void visit(unsigned Opcode, User &I) { 446 // Note: this doesn't use InstVisitor, because it has to work with 447 // ConstantExpr's in addition to instructions. 448 switch (Opcode) { 449 default: assert(0 && "Unknown instruction type encountered!"); 450 abort(); 451 // Build the switch statement using the Instruction.def file. 452#define HANDLE_INST(NUM, OPCODE, CLASS) \ 453 case Instruction::OPCODE:return visit##OPCODE((CLASS&)I); 454#include "llvm/Instruction.def" 455 } 456 } 457 458 void setCurrentBasicBlock(MachineBasicBlock *MBB) { CurMBB = MBB; } 459 460 SDOperand getLoadFrom(const Type *Ty, SDOperand Ptr, 461 const Value *SV, SDOperand Root, 462 bool isVolatile); 463 464 SDOperand getIntPtrConstant(uint64_t Val) { 465 return DAG.getConstant(Val, TLI.getPointerTy()); 466 } 467 468 SDOperand getValue(const Value *V); 469 470 void setValue(const Value *V, SDOperand NewN) { 471 SDOperand &N = NodeMap[V]; 472 assert(N.Val == 0 && "Already set a value for this node!"); 473 N = NewN; 474 } 475 476 RegsForValue GetRegistersForValue(const std::string &ConstrCode, 477 MVT::ValueType VT, 478 bool OutReg, bool InReg, 479 std::set<unsigned> &OutputRegs, 480 std::set<unsigned> &InputRegs); 481 482 void FindMergedConditions(Value *Cond, MachineBasicBlock *TBB, 483 MachineBasicBlock *FBB, MachineBasicBlock *CurBB, 484 unsigned Opc); 485 bool isExportableFromCurrentBlock(Value *V, const BasicBlock *FromBB); 486 void ExportFromCurrentBlock(Value *V); 487 void LowerCallTo(Instruction &I, 488 const Type *CalledValueTy, unsigned CallingConv, 489 bool IsTailCall, SDOperand Callee, unsigned OpIdx); 490 491 // Terminator instructions. 492 void visitRet(ReturnInst &I); 493 void visitBr(BranchInst &I); 494 void visitSwitch(SwitchInst &I); 495 void visitUnreachable(UnreachableInst &I) { /* noop */ } 496 497 // Helper for visitSwitch 498 void visitSwitchCase(SelectionDAGISel::CaseBlock &CB); 499 void visitJumpTable(SelectionDAGISel::JumpTable &JT); 500 501 // These all get lowered before this pass. 502 void visitInvoke(InvokeInst &I); 503 void visitInvoke(InvokeInst &I, bool AsTerminator); 504 void visitUnwind(UnwindInst &I); 505 506 void visitScalarBinary(User &I, unsigned OpCode); 507 void visitVectorBinary(User &I, unsigned OpCode); 508 void visitEitherBinary(User &I, unsigned ScalarOp, unsigned VectorOp); 509 void visitShift(User &I, unsigned Opcode); 510 void visitAdd(User &I) { 511 if (isa<VectorType>(I.getType())) 512 visitVectorBinary(I, ISD::VADD); 513 else if (I.getType()->isFloatingPoint()) 514 visitScalarBinary(I, ISD::FADD); 515 else 516 visitScalarBinary(I, ISD::ADD); 517 } 518 void visitSub(User &I); 519 void visitMul(User &I) { 520 if (isa<VectorType>(I.getType())) 521 visitVectorBinary(I, ISD::VMUL); 522 else if (I.getType()->isFloatingPoint()) 523 visitScalarBinary(I, ISD::FMUL); 524 else 525 visitScalarBinary(I, ISD::MUL); 526 } 527 void visitURem(User &I) { visitScalarBinary(I, ISD::UREM); } 528 void visitSRem(User &I) { visitScalarBinary(I, ISD::SREM); } 529 void visitFRem(User &I) { visitScalarBinary(I, ISD::FREM); } 530 void visitUDiv(User &I) { visitEitherBinary(I, ISD::UDIV, ISD::VUDIV); } 531 void visitSDiv(User &I) { visitEitherBinary(I, ISD::SDIV, ISD::VSDIV); } 532 void visitFDiv(User &I) { visitEitherBinary(I, ISD::FDIV, ISD::VSDIV); } 533 void visitAnd (User &I) { visitEitherBinary(I, ISD::AND, ISD::VAND ); } 534 void visitOr (User &I) { visitEitherBinary(I, ISD::OR, ISD::VOR ); } 535 void visitXor (User &I) { visitEitherBinary(I, ISD::XOR, ISD::VXOR ); } 536 void visitShl (User &I) { visitShift(I, ISD::SHL); } 537 void visitLShr(User &I) { visitShift(I, ISD::SRL); } 538 void visitAShr(User &I) { visitShift(I, ISD::SRA); } 539 void visitICmp(User &I); 540 void visitFCmp(User &I); 541 // Visit the conversion instructions 542 void visitTrunc(User &I); 543 void visitZExt(User &I); 544 void visitSExt(User &I); 545 void visitFPTrunc(User &I); 546 void visitFPExt(User &I); 547 void visitFPToUI(User &I); 548 void visitFPToSI(User &I); 549 void visitUIToFP(User &I); 550 void visitSIToFP(User &I); 551 void visitPtrToInt(User &I); 552 void visitIntToPtr(User &I); 553 void visitBitCast(User &I); 554 555 void visitExtractElement(User &I); 556 void visitInsertElement(User &I); 557 void visitShuffleVector(User &I); 558 559 void visitGetElementPtr(User &I); 560 void visitSelect(User &I); 561 562 void visitMalloc(MallocInst &I); 563 void visitFree(FreeInst &I); 564 void visitAlloca(AllocaInst &I); 565 void visitLoad(LoadInst &I); 566 void visitStore(StoreInst &I); 567 void visitPHI(PHINode &I) { } // PHI nodes are handled specially. 568 void visitCall(CallInst &I); 569 void visitInlineAsm(CallInst &I); 570 const char *visitIntrinsicCall(CallInst &I, unsigned Intrinsic); 571 void visitTargetIntrinsic(CallInst &I, unsigned Intrinsic); 572 573 void visitVAStart(CallInst &I); 574 void visitVAArg(VAArgInst &I); 575 void visitVAEnd(CallInst &I); 576 void visitVACopy(CallInst &I); 577 578 void visitMemIntrinsic(CallInst &I, unsigned Op); 579 580 void visitUserOp1(Instruction &I) { 581 assert(0 && "UserOp1 should not exist at instruction selection time!"); 582 abort(); 583 } 584 void visitUserOp2(Instruction &I) { 585 assert(0 && "UserOp2 should not exist at instruction selection time!"); 586 abort(); 587 } 588}; 589} // end namespace llvm 590 591SDOperand SelectionDAGLowering::getValue(const Value *V) { 592 SDOperand &N = NodeMap[V]; 593 if (N.Val) return N; 594 595 const Type *VTy = V->getType(); 596 MVT::ValueType VT = TLI.getValueType(VTy); 597 if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(V))) { 598 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) { 599 visit(CE->getOpcode(), *CE); 600 SDOperand N1 = NodeMap[V]; 601 assert(N1.Val && "visit didn't populate the ValueMap!"); 602 return N1; 603 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(C)) { 604 return N = DAG.getGlobalAddress(GV, VT); 605 } else if (isa<ConstantPointerNull>(C)) { 606 return N = DAG.getConstant(0, TLI.getPointerTy()); 607 } else if (isa<UndefValue>(C)) { 608 if (!isa<VectorType>(VTy)) 609 return N = DAG.getNode(ISD::UNDEF, VT); 610 611 // Create a VBUILD_VECTOR of undef nodes. 612 const VectorType *PTy = cast<VectorType>(VTy); 613 unsigned NumElements = PTy->getNumElements(); 614 MVT::ValueType PVT = TLI.getValueType(PTy->getElementType()); 615 616 SmallVector<SDOperand, 8> Ops; 617 Ops.assign(NumElements, DAG.getNode(ISD::UNDEF, PVT)); 618 619 // Create a VConstant node with generic Vector type. 620 Ops.push_back(DAG.getConstant(NumElements, MVT::i32)); 621 Ops.push_back(DAG.getValueType(PVT)); 622 return N = DAG.getNode(ISD::VBUILD_VECTOR, MVT::Vector, 623 &Ops[0], Ops.size()); 624 } else if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) { 625 return N = DAG.getConstantFP(CFP->getValue(), VT); 626 } else if (const VectorType *PTy = dyn_cast<VectorType>(VTy)) { 627 unsigned NumElements = PTy->getNumElements(); 628 MVT::ValueType PVT = TLI.getValueType(PTy->getElementType()); 629 630 // Now that we know the number and type of the elements, push a 631 // Constant or ConstantFP node onto the ops list for each element of 632 // the packed constant. 633 SmallVector<SDOperand, 8> Ops; 634 if (ConstantVector *CP = dyn_cast<ConstantVector>(C)) { 635 for (unsigned i = 0; i != NumElements; ++i) 636 Ops.push_back(getValue(CP->getOperand(i))); 637 } else { 638 assert(isa<ConstantAggregateZero>(C) && "Unknown packed constant!"); 639 SDOperand Op; 640 if (MVT::isFloatingPoint(PVT)) 641 Op = DAG.getConstantFP(0, PVT); 642 else 643 Op = DAG.getConstant(0, PVT); 644 Ops.assign(NumElements, Op); 645 } 646 647 // Create a VBUILD_VECTOR node with generic Vector type. 648 Ops.push_back(DAG.getConstant(NumElements, MVT::i32)); 649 Ops.push_back(DAG.getValueType(PVT)); 650 return NodeMap[V] = DAG.getNode(ISD::VBUILD_VECTOR, MVT::Vector, &Ops[0], 651 Ops.size()); 652 } else { 653 // Canonicalize all constant ints to be unsigned. 654 return N = DAG.getConstant(cast<ConstantInt>(C)->getZExtValue(),VT); 655 } 656 } 657 658 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) { 659 std::map<const AllocaInst*, int>::iterator SI = 660 FuncInfo.StaticAllocaMap.find(AI); 661 if (SI != FuncInfo.StaticAllocaMap.end()) 662 return DAG.getFrameIndex(SI->second, TLI.getPointerTy()); 663 } 664 665 unsigned InReg = FuncInfo.ValueMap[V]; 666 assert(InReg && "Value not in map!"); 667 668 // If this type is not legal, make it so now. 669 if (VT != MVT::Vector) { 670 if (TLI.getTypeAction(VT) == TargetLowering::Expand) { 671 // Source must be expanded. This input value is actually coming from the 672 // register pair InReg and InReg+1. 673 MVT::ValueType DestVT = TLI.getTypeToExpandTo(VT); 674 unsigned NumVals = TLI.getNumElements(VT); 675 N = DAG.getCopyFromReg(DAG.getEntryNode(), InReg, DestVT); 676 if (NumVals == 1) 677 N = DAG.getNode(ISD::BIT_CONVERT, VT, N); 678 else { 679 assert(NumVals == 2 && "1 to 4 (and more) expansion not implemented!"); 680 N = DAG.getNode(ISD::BUILD_PAIR, VT, N, 681 DAG.getCopyFromReg(DAG.getEntryNode(), InReg+1, DestVT)); 682 } 683 } else { 684 MVT::ValueType DestVT = TLI.getTypeToTransformTo(VT); 685 N = DAG.getCopyFromReg(DAG.getEntryNode(), InReg, DestVT); 686 if (TLI.getTypeAction(VT) == TargetLowering::Promote) // Promotion case 687 N = MVT::isFloatingPoint(VT) 688 ? DAG.getNode(ISD::FP_ROUND, VT, N) 689 : DAG.getNode(ISD::TRUNCATE, VT, N); 690 } 691 } else { 692 // Otherwise, if this is a vector, make it available as a generic vector 693 // here. 694 MVT::ValueType PTyElementVT, PTyLegalElementVT; 695 const VectorType *PTy = cast<VectorType>(VTy); 696 unsigned NE = TLI.getVectorTypeBreakdown(PTy, PTyElementVT, 697 PTyLegalElementVT); 698 699 // Build a VBUILD_VECTOR with the input registers. 700 SmallVector<SDOperand, 8> Ops; 701 if (PTyElementVT == PTyLegalElementVT) { 702 // If the value types are legal, just VBUILD the CopyFromReg nodes. 703 for (unsigned i = 0; i != NE; ++i) 704 Ops.push_back(DAG.getCopyFromReg(DAG.getEntryNode(), InReg++, 705 PTyElementVT)); 706 } else if (PTyElementVT < PTyLegalElementVT) { 707 // If the register was promoted, use TRUNCATE of FP_ROUND as appropriate. 708 for (unsigned i = 0; i != NE; ++i) { 709 SDOperand Op = DAG.getCopyFromReg(DAG.getEntryNode(), InReg++, 710 PTyElementVT); 711 if (MVT::isFloatingPoint(PTyElementVT)) 712 Op = DAG.getNode(ISD::FP_ROUND, PTyElementVT, Op); 713 else 714 Op = DAG.getNode(ISD::TRUNCATE, PTyElementVT, Op); 715 Ops.push_back(Op); 716 } 717 } else { 718 // If the register was expanded, use BUILD_PAIR. 719 assert((NE & 1) == 0 && "Must expand into a multiple of 2 elements!"); 720 for (unsigned i = 0; i != NE/2; ++i) { 721 SDOperand Op0 = DAG.getCopyFromReg(DAG.getEntryNode(), InReg++, 722 PTyElementVT); 723 SDOperand Op1 = DAG.getCopyFromReg(DAG.getEntryNode(), InReg++, 724 PTyElementVT); 725 Ops.push_back(DAG.getNode(ISD::BUILD_PAIR, VT, Op0, Op1)); 726 } 727 } 728 729 Ops.push_back(DAG.getConstant(NE, MVT::i32)); 730 Ops.push_back(DAG.getValueType(PTyLegalElementVT)); 731 N = DAG.getNode(ISD::VBUILD_VECTOR, MVT::Vector, &Ops[0], Ops.size()); 732 733 // Finally, use a VBIT_CONVERT to make this available as the appropriate 734 // vector type. 735 N = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, N, 736 DAG.getConstant(PTy->getNumElements(), 737 MVT::i32), 738 DAG.getValueType(TLI.getValueType(PTy->getElementType()))); 739 } 740 741 return N; 742} 743 744 745void SelectionDAGLowering::visitRet(ReturnInst &I) { 746 if (I.getNumOperands() == 0) { 747 DAG.setRoot(DAG.getNode(ISD::RET, MVT::Other, getRoot())); 748 return; 749 } 750 SmallVector<SDOperand, 8> NewValues; 751 NewValues.push_back(getRoot()); 752 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) { 753 SDOperand RetOp = getValue(I.getOperand(i)); 754 755 // If this is an integer return value, we need to promote it ourselves to 756 // the full width of a register, since LegalizeOp will use ANY_EXTEND rather 757 // than sign/zero. 758 // FIXME: C calling convention requires the return type to be promoted to 759 // at least 32-bit. But this is not necessary for non-C calling conventions. 760 if (MVT::isInteger(RetOp.getValueType()) && 761 RetOp.getValueType() < MVT::i64) { 762 MVT::ValueType TmpVT; 763 if (TLI.getTypeAction(MVT::i32) == TargetLowering::Promote) 764 TmpVT = TLI.getTypeToTransformTo(MVT::i32); 765 else 766 TmpVT = MVT::i32; 767 const FunctionType *FTy = I.getParent()->getParent()->getFunctionType(); 768 ISD::NodeType ExtendKind = ISD::ANY_EXTEND; 769 if (FTy->paramHasAttr(0, FunctionType::SExtAttribute)) 770 ExtendKind = ISD::SIGN_EXTEND; 771 if (FTy->paramHasAttr(0, FunctionType::ZExtAttribute)) 772 ExtendKind = ISD::ZERO_EXTEND; 773 RetOp = DAG.getNode(ExtendKind, TmpVT, RetOp); 774 } 775 NewValues.push_back(RetOp); 776 NewValues.push_back(DAG.getConstant(false, MVT::i32)); 777 } 778 DAG.setRoot(DAG.getNode(ISD::RET, MVT::Other, 779 &NewValues[0], NewValues.size())); 780} 781 782/// ExportFromCurrentBlock - If this condition isn't known to be exported from 783/// the current basic block, add it to ValueMap now so that we'll get a 784/// CopyTo/FromReg. 785void SelectionDAGLowering::ExportFromCurrentBlock(Value *V) { 786 // No need to export constants. 787 if (!isa<Instruction>(V) && !isa<Argument>(V)) return; 788 789 // Already exported? 790 if (FuncInfo.isExportedInst(V)) return; 791 792 unsigned Reg = FuncInfo.InitializeRegForValue(V); 793 PendingLoads.push_back(CopyValueToVirtualRegister(V, Reg)); 794} 795 796bool SelectionDAGLowering::isExportableFromCurrentBlock(Value *V, 797 const BasicBlock *FromBB) { 798 // The operands of the setcc have to be in this block. We don't know 799 // how to export them from some other block. 800 if (Instruction *VI = dyn_cast<Instruction>(V)) { 801 // Can export from current BB. 802 if (VI->getParent() == FromBB) 803 return true; 804 805 // Is already exported, noop. 806 return FuncInfo.isExportedInst(V); 807 } 808 809 // If this is an argument, we can export it if the BB is the entry block or 810 // if it is already exported. 811 if (isa<Argument>(V)) { 812 if (FromBB == &FromBB->getParent()->getEntryBlock()) 813 return true; 814 815 // Otherwise, can only export this if it is already exported. 816 return FuncInfo.isExportedInst(V); 817 } 818 819 // Otherwise, constants can always be exported. 820 return true; 821} 822 823static bool InBlock(const Value *V, const BasicBlock *BB) { 824 if (const Instruction *I = dyn_cast<Instruction>(V)) 825 return I->getParent() == BB; 826 return true; 827} 828 829/// FindMergedConditions - If Cond is an expression like 830void SelectionDAGLowering::FindMergedConditions(Value *Cond, 831 MachineBasicBlock *TBB, 832 MachineBasicBlock *FBB, 833 MachineBasicBlock *CurBB, 834 unsigned Opc) { 835 // If this node is not part of the or/and tree, emit it as a branch. 836 Instruction *BOp = dyn_cast<Instruction>(Cond); 837 838 if (!BOp || !(isa<BinaryOperator>(BOp) || isa<CmpInst>(BOp)) || 839 (unsigned)BOp->getOpcode() != Opc || !BOp->hasOneUse() || 840 BOp->getParent() != CurBB->getBasicBlock() || 841 !InBlock(BOp->getOperand(0), CurBB->getBasicBlock()) || 842 !InBlock(BOp->getOperand(1), CurBB->getBasicBlock())) { 843 const BasicBlock *BB = CurBB->getBasicBlock(); 844 845 // If the leaf of the tree is a comparison, merge the condition into 846 // the caseblock. 847 if ((isa<ICmpInst>(Cond) || isa<FCmpInst>(Cond)) && 848 // The operands of the cmp have to be in this block. We don't know 849 // how to export them from some other block. If this is the first block 850 // of the sequence, no exporting is needed. 851 (CurBB == CurMBB || 852 (isExportableFromCurrentBlock(BOp->getOperand(0), BB) && 853 isExportableFromCurrentBlock(BOp->getOperand(1), BB)))) { 854 BOp = cast<Instruction>(Cond); 855 ISD::CondCode Condition; 856 if (ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) { 857 switch (IC->getPredicate()) { 858 default: assert(0 && "Unknown icmp predicate opcode!"); 859 case ICmpInst::ICMP_EQ: Condition = ISD::SETEQ; break; 860 case ICmpInst::ICMP_NE: Condition = ISD::SETNE; break; 861 case ICmpInst::ICMP_SLE: Condition = ISD::SETLE; break; 862 case ICmpInst::ICMP_ULE: Condition = ISD::SETULE; break; 863 case ICmpInst::ICMP_SGE: Condition = ISD::SETGE; break; 864 case ICmpInst::ICMP_UGE: Condition = ISD::SETUGE; break; 865 case ICmpInst::ICMP_SLT: Condition = ISD::SETLT; break; 866 case ICmpInst::ICMP_ULT: Condition = ISD::SETULT; break; 867 case ICmpInst::ICMP_SGT: Condition = ISD::SETGT; break; 868 case ICmpInst::ICMP_UGT: Condition = ISD::SETUGT; break; 869 } 870 } else if (FCmpInst *FC = dyn_cast<FCmpInst>(Cond)) { 871 ISD::CondCode FPC, FOC; 872 switch (FC->getPredicate()) { 873 default: assert(0 && "Unknown fcmp predicate opcode!"); 874 case FCmpInst::FCMP_FALSE: FOC = FPC = ISD::SETFALSE; break; 875 case FCmpInst::FCMP_OEQ: FOC = ISD::SETEQ; FPC = ISD::SETOEQ; break; 876 case FCmpInst::FCMP_OGT: FOC = ISD::SETGT; FPC = ISD::SETOGT; break; 877 case FCmpInst::FCMP_OGE: FOC = ISD::SETGE; FPC = ISD::SETOGE; break; 878 case FCmpInst::FCMP_OLT: FOC = ISD::SETLT; FPC = ISD::SETOLT; break; 879 case FCmpInst::FCMP_OLE: FOC = ISD::SETLE; FPC = ISD::SETOLE; break; 880 case FCmpInst::FCMP_ONE: FOC = ISD::SETNE; FPC = ISD::SETONE; break; 881 case FCmpInst::FCMP_ORD: FOC = ISD::SETEQ; FPC = ISD::SETO; break; 882 case FCmpInst::FCMP_UNO: FOC = ISD::SETNE; FPC = ISD::SETUO; break; 883 case FCmpInst::FCMP_UEQ: FOC = ISD::SETEQ; FPC = ISD::SETUEQ; break; 884 case FCmpInst::FCMP_UGT: FOC = ISD::SETGT; FPC = ISD::SETUGT; break; 885 case FCmpInst::FCMP_UGE: FOC = ISD::SETGE; FPC = ISD::SETUGE; break; 886 case FCmpInst::FCMP_ULT: FOC = ISD::SETLT; FPC = ISD::SETULT; break; 887 case FCmpInst::FCMP_ULE: FOC = ISD::SETLE; FPC = ISD::SETULE; break; 888 case FCmpInst::FCMP_UNE: FOC = ISD::SETNE; FPC = ISD::SETUNE; break; 889 case FCmpInst::FCMP_TRUE: FOC = FPC = ISD::SETTRUE; break; 890 } 891 if (FiniteOnlyFPMath()) 892 Condition = FOC; 893 else 894 Condition = FPC; 895 } else { 896 Condition = ISD::SETEQ; // silence warning. 897 assert(0 && "Unknown compare instruction"); 898 } 899 900 SelectionDAGISel::CaseBlock CB(Condition, BOp->getOperand(0), 901 BOp->getOperand(1), TBB, FBB, CurBB); 902 SwitchCases.push_back(CB); 903 return; 904 } 905 906 // Create a CaseBlock record representing this branch. 907 SelectionDAGISel::CaseBlock CB(ISD::SETEQ, Cond, ConstantInt::getTrue(), 908 TBB, FBB, CurBB); 909 SwitchCases.push_back(CB); 910 return; 911 } 912 913 914 // Create TmpBB after CurBB. 915 MachineFunction::iterator BBI = CurBB; 916 MachineBasicBlock *TmpBB = new MachineBasicBlock(CurBB->getBasicBlock()); 917 CurBB->getParent()->getBasicBlockList().insert(++BBI, TmpBB); 918 919 if (Opc == Instruction::Or) { 920 // Codegen X | Y as: 921 // jmp_if_X TBB 922 // jmp TmpBB 923 // TmpBB: 924 // jmp_if_Y TBB 925 // jmp FBB 926 // 927 928 // Emit the LHS condition. 929 FindMergedConditions(BOp->getOperand(0), TBB, TmpBB, CurBB, Opc); 930 931 // Emit the RHS condition into TmpBB. 932 FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, Opc); 933 } else { 934 assert(Opc == Instruction::And && "Unknown merge op!"); 935 // Codegen X & Y as: 936 // jmp_if_X TmpBB 937 // jmp FBB 938 // TmpBB: 939 // jmp_if_Y TBB 940 // jmp FBB 941 // 942 // This requires creation of TmpBB after CurBB. 943 944 // Emit the LHS condition. 945 FindMergedConditions(BOp->getOperand(0), TmpBB, FBB, CurBB, Opc); 946 947 // Emit the RHS condition into TmpBB. 948 FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, Opc); 949 } 950} 951 952/// If the set of cases should be emitted as a series of branches, return true. 953/// If we should emit this as a bunch of and/or'd together conditions, return 954/// false. 955static bool 956ShouldEmitAsBranches(const std::vector<SelectionDAGISel::CaseBlock> &Cases) { 957 if (Cases.size() != 2) return true; 958 959 // If this is two comparisons of the same values or'd or and'd together, they 960 // will get folded into a single comparison, so don't emit two blocks. 961 if ((Cases[0].CmpLHS == Cases[1].CmpLHS && 962 Cases[0].CmpRHS == Cases[1].CmpRHS) || 963 (Cases[0].CmpRHS == Cases[1].CmpLHS && 964 Cases[0].CmpLHS == Cases[1].CmpRHS)) { 965 return false; 966 } 967 968 return true; 969} 970 971void SelectionDAGLowering::visitBr(BranchInst &I) { 972 // Update machine-CFG edges. 973 MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)]; 974 975 // Figure out which block is immediately after the current one. 976 MachineBasicBlock *NextBlock = 0; 977 MachineFunction::iterator BBI = CurMBB; 978 if (++BBI != CurMBB->getParent()->end()) 979 NextBlock = BBI; 980 981 if (I.isUnconditional()) { 982 // If this is not a fall-through branch, emit the branch. 983 if (Succ0MBB != NextBlock) 984 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, getRoot(), 985 DAG.getBasicBlock(Succ0MBB))); 986 987 // Update machine-CFG edges. 988 CurMBB->addSuccessor(Succ0MBB); 989 990 return; 991 } 992 993 // If this condition is one of the special cases we handle, do special stuff 994 // now. 995 Value *CondVal = I.getCondition(); 996 MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)]; 997 998 // If this is a series of conditions that are or'd or and'd together, emit 999 // this as a sequence of branches instead of setcc's with and/or operations. 1000 // For example, instead of something like: 1001 // cmp A, B 1002 // C = seteq 1003 // cmp D, E 1004 // F = setle 1005 // or C, F 1006 // jnz foo 1007 // Emit: 1008 // cmp A, B 1009 // je foo 1010 // cmp D, E 1011 // jle foo 1012 // 1013 if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(CondVal)) { 1014 if (BOp->hasOneUse() && 1015 (BOp->getOpcode() == Instruction::And || 1016 BOp->getOpcode() == Instruction::Or)) { 1017 FindMergedConditions(BOp, Succ0MBB, Succ1MBB, CurMBB, BOp->getOpcode()); 1018 // If the compares in later blocks need to use values not currently 1019 // exported from this block, export them now. This block should always 1020 // be the first entry. 1021 assert(SwitchCases[0].ThisBB == CurMBB && "Unexpected lowering!"); 1022 1023 // Allow some cases to be rejected. 1024 if (ShouldEmitAsBranches(SwitchCases)) { 1025 for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i) { 1026 ExportFromCurrentBlock(SwitchCases[i].CmpLHS); 1027 ExportFromCurrentBlock(SwitchCases[i].CmpRHS); 1028 } 1029 1030 // Emit the branch for this block. 1031 visitSwitchCase(SwitchCases[0]); 1032 SwitchCases.erase(SwitchCases.begin()); 1033 return; 1034 } 1035 1036 // Okay, we decided not to do this, remove any inserted MBB's and clear 1037 // SwitchCases. 1038 for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i) 1039 CurMBB->getParent()->getBasicBlockList().erase(SwitchCases[i].ThisBB); 1040 1041 SwitchCases.clear(); 1042 } 1043 } 1044 1045 // Create a CaseBlock record representing this branch. 1046 SelectionDAGISel::CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(), 1047 Succ0MBB, Succ1MBB, CurMBB); 1048 // Use visitSwitchCase to actually insert the fast branch sequence for this 1049 // cond branch. 1050 visitSwitchCase(CB); 1051} 1052 1053/// visitSwitchCase - Emits the necessary code to represent a single node in 1054/// the binary search tree resulting from lowering a switch instruction. 1055void SelectionDAGLowering::visitSwitchCase(SelectionDAGISel::CaseBlock &CB) { 1056 SDOperand Cond; 1057 SDOperand CondLHS = getValue(CB.CmpLHS); 1058 1059 // Build the setcc now, fold "(X == true)" to X and "(X == false)" to !X to 1060 // handle common cases produced by branch lowering. 1061 if (CB.CmpRHS == ConstantInt::getTrue() && CB.CC == ISD::SETEQ) 1062 Cond = CondLHS; 1063 else if (CB.CmpRHS == ConstantInt::getFalse() && CB.CC == ISD::SETEQ) { 1064 SDOperand True = DAG.getConstant(1, CondLHS.getValueType()); 1065 Cond = DAG.getNode(ISD::XOR, CondLHS.getValueType(), CondLHS, True); 1066 } else 1067 Cond = DAG.getSetCC(MVT::i1, CondLHS, getValue(CB.CmpRHS), CB.CC); 1068 1069 // Set NextBlock to be the MBB immediately after the current one, if any. 1070 // This is used to avoid emitting unnecessary branches to the next block. 1071 MachineBasicBlock *NextBlock = 0; 1072 MachineFunction::iterator BBI = CurMBB; 1073 if (++BBI != CurMBB->getParent()->end()) 1074 NextBlock = BBI; 1075 1076 // If the lhs block is the next block, invert the condition so that we can 1077 // fall through to the lhs instead of the rhs block. 1078 if (CB.TrueBB == NextBlock) { 1079 std::swap(CB.TrueBB, CB.FalseBB); 1080 SDOperand True = DAG.getConstant(1, Cond.getValueType()); 1081 Cond = DAG.getNode(ISD::XOR, Cond.getValueType(), Cond, True); 1082 } 1083 SDOperand BrCond = DAG.getNode(ISD::BRCOND, MVT::Other, getRoot(), Cond, 1084 DAG.getBasicBlock(CB.TrueBB)); 1085 if (CB.FalseBB == NextBlock) 1086 DAG.setRoot(BrCond); 1087 else 1088 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, BrCond, 1089 DAG.getBasicBlock(CB.FalseBB))); 1090 // Update successor info 1091 CurMBB->addSuccessor(CB.TrueBB); 1092 CurMBB->addSuccessor(CB.FalseBB); 1093} 1094 1095void SelectionDAGLowering::visitJumpTable(SelectionDAGISel::JumpTable &JT) { 1096 // Emit the code for the jump table 1097 MVT::ValueType PTy = TLI.getPointerTy(); 1098 SDOperand Index = DAG.getCopyFromReg(getRoot(), JT.Reg, PTy); 1099 SDOperand Table = DAG.getJumpTable(JT.JTI, PTy); 1100 DAG.setRoot(DAG.getNode(ISD::BR_JT, MVT::Other, Index.getValue(1), 1101 Table, Index)); 1102 return; 1103} 1104 1105void SelectionDAGLowering::visitInvoke(InvokeInst &I) { 1106 assert(0 && "Should never be visited directly"); 1107} 1108void SelectionDAGLowering::visitInvoke(InvokeInst &I, bool AsTerminator) { 1109 // Retrieve successors. 1110 MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)]; 1111 MachineBasicBlock *LandingPad = FuncInfo.MBBMap[I.getSuccessor(1)]; 1112 1113 if (!AsTerminator) { 1114 // Mark landing pad so that it doesn't get deleted in branch folding. 1115 LandingPad->setIsLandingPad(); 1116 1117 // Insert a label before the invoke call to mark the try range. 1118 // This can be used to detect deletion of the invoke via the 1119 // MachineModuleInfo. 1120 MachineModuleInfo *MMI = DAG.getMachineModuleInfo(); 1121 unsigned BeginLabel = MMI->NextLabelID(); 1122 DAG.setRoot(DAG.getNode(ISD::LABEL, MVT::Other, getRoot(), 1123 DAG.getConstant(BeginLabel, MVT::i32))); 1124 1125 LowerCallTo(I, I.getCalledValue()->getType(), 1126 I.getCallingConv(), 1127 false, 1128 getValue(I.getOperand(0)), 1129 3); 1130 1131 // Insert a label before the invoke call to mark the try range. 1132 // This can be used to detect deletion of the invoke via the 1133 // MachineModuleInfo. 1134 unsigned EndLabel = MMI->NextLabelID(); 1135 DAG.setRoot(DAG.getNode(ISD::LABEL, MVT::Other, getRoot(), 1136 DAG.getConstant(EndLabel, MVT::i32))); 1137 1138 // Inform MachineModuleInfo of range. 1139 MMI->addInvoke(LandingPad, BeginLabel, EndLabel); 1140 1141 // Update successor info 1142 CurMBB->addSuccessor(Return); 1143 CurMBB->addSuccessor(LandingPad); 1144 } else { 1145 // Drop into normal successor. 1146 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, getRoot(), 1147 DAG.getBasicBlock(Return))); 1148 } 1149} 1150 1151void SelectionDAGLowering::visitUnwind(UnwindInst &I) { 1152} 1153 1154void SelectionDAGLowering::visitSwitch(SwitchInst &I) { 1155 // Figure out which block is immediately after the current one. 1156 MachineBasicBlock *NextBlock = 0; 1157 MachineFunction::iterator BBI = CurMBB; 1158 1159 if (++BBI != CurMBB->getParent()->end()) 1160 NextBlock = BBI; 1161 1162 MachineBasicBlock *Default = FuncInfo.MBBMap[I.getDefaultDest()]; 1163 1164 // If there is only the default destination, branch to it if it is not the 1165 // next basic block. Otherwise, just fall through. 1166 if (I.getNumOperands() == 2) { 1167 // Update machine-CFG edges. 1168 1169 // If this is not a fall-through branch, emit the branch. 1170 if (Default != NextBlock) 1171 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, getRoot(), 1172 DAG.getBasicBlock(Default))); 1173 1174 CurMBB->addSuccessor(Default); 1175 return; 1176 } 1177 1178 // If there are any non-default case statements, create a vector of Cases 1179 // representing each one, and sort the vector so that we can efficiently 1180 // create a binary search tree from them. 1181 std::vector<Case> Cases; 1182 1183 for (unsigned i = 1; i < I.getNumSuccessors(); ++i) { 1184 MachineBasicBlock *SMBB = FuncInfo.MBBMap[I.getSuccessor(i)]; 1185 Cases.push_back(Case(I.getSuccessorValue(i), SMBB)); 1186 } 1187 1188 std::sort(Cases.begin(), Cases.end(), CaseCmp()); 1189 1190 // Get the Value to be switched on and default basic blocks, which will be 1191 // inserted into CaseBlock records, representing basic blocks in the binary 1192 // search tree. 1193 Value *SV = I.getOperand(0); 1194 1195 // Get the MachineFunction which holds the current MBB. This is used during 1196 // emission of jump tables, and when inserting any additional MBBs necessary 1197 // to represent the switch. 1198 MachineFunction *CurMF = CurMBB->getParent(); 1199 const BasicBlock *LLVMBB = CurMBB->getBasicBlock(); 1200 1201 // If the switch has few cases (two or less) emit a series of specific 1202 // tests. 1203 if (Cases.size() < 3) { 1204 // TODO: If any two of the cases has the same destination, and if one value 1205 // is the same as the other, but has one bit unset that the other has set, 1206 // use bit manipulation to do two compares at once. For example: 1207 // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)" 1208 1209 // Rearrange the case blocks so that the last one falls through if possible. 1210 if (NextBlock && Default != NextBlock && Cases.back().second != NextBlock) { 1211 // The last case block won't fall through into 'NextBlock' if we emit the 1212 // branches in this order. See if rearranging a case value would help. 1213 for (unsigned i = 0, e = Cases.size()-1; i != e; ++i) { 1214 if (Cases[i].second == NextBlock) { 1215 std::swap(Cases[i], Cases.back()); 1216 break; 1217 } 1218 } 1219 } 1220 1221 // Create a CaseBlock record representing a conditional branch to 1222 // the Case's target mbb if the value being switched on SV is equal 1223 // to C. 1224 MachineBasicBlock *CurBlock = CurMBB; 1225 for (unsigned i = 0, e = Cases.size(); i != e; ++i) { 1226 MachineBasicBlock *FallThrough; 1227 if (i != e-1) { 1228 FallThrough = new MachineBasicBlock(CurMBB->getBasicBlock()); 1229 CurMF->getBasicBlockList().insert(BBI, FallThrough); 1230 } else { 1231 // If the last case doesn't match, go to the default block. 1232 FallThrough = Default; 1233 } 1234 1235 SelectionDAGISel::CaseBlock CB(ISD::SETEQ, SV, Cases[i].first, 1236 Cases[i].second, FallThrough, CurBlock); 1237 1238 // If emitting the first comparison, just call visitSwitchCase to emit the 1239 // code into the current block. Otherwise, push the CaseBlock onto the 1240 // vector to be later processed by SDISel, and insert the node's MBB 1241 // before the next MBB. 1242 if (CurBlock == CurMBB) 1243 visitSwitchCase(CB); 1244 else 1245 SwitchCases.push_back(CB); 1246 1247 CurBlock = FallThrough; 1248 } 1249 return; 1250 } 1251 1252 // If the switch has more than 5 blocks, and at least 31.25% dense, and the 1253 // target supports indirect branches, then emit a jump table rather than 1254 // lowering the switch to a binary tree of conditional branches. 1255 if ((TLI.isOperationLegal(ISD::BR_JT, MVT::Other) || 1256 TLI.isOperationLegal(ISD::BRIND, MVT::Other)) && 1257 Cases.size() > 5) { 1258 uint64_t First =cast<ConstantInt>(Cases.front().first)->getSExtValue(); 1259 uint64_t Last = cast<ConstantInt>(Cases.back().first)->getSExtValue(); 1260 double Density = (double)Cases.size() / (double)((Last - First) + 1ULL); 1261 1262 if (Density >= 0.3125) { 1263 // Create a new basic block to hold the code for loading the address 1264 // of the jump table, and jumping to it. Update successor information; 1265 // we will either branch to the default case for the switch, or the jump 1266 // table. 1267 MachineBasicBlock *JumpTableBB = new MachineBasicBlock(LLVMBB); 1268 CurMF->getBasicBlockList().insert(BBI, JumpTableBB); 1269 CurMBB->addSuccessor(Default); 1270 CurMBB->addSuccessor(JumpTableBB); 1271 1272 // Subtract the lowest switch case value from the value being switched on 1273 // and conditional branch to default mbb if the result is greater than the 1274 // difference between smallest and largest cases. 1275 SDOperand SwitchOp = getValue(SV); 1276 MVT::ValueType VT = SwitchOp.getValueType(); 1277 SDOperand SUB = DAG.getNode(ISD::SUB, VT, SwitchOp, 1278 DAG.getConstant(First, VT)); 1279 1280 // The SDNode we just created, which holds the value being switched on 1281 // minus the the smallest case value, needs to be copied to a virtual 1282 // register so it can be used as an index into the jump table in a 1283 // subsequent basic block. This value may be smaller or larger than the 1284 // target's pointer type, and therefore require extension or truncating. 1285 if (VT > TLI.getPointerTy()) 1286 SwitchOp = DAG.getNode(ISD::TRUNCATE, TLI.getPointerTy(), SUB); 1287 else 1288 SwitchOp = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(), SUB); 1289 1290 unsigned JumpTableReg = FuncInfo.MakeReg(TLI.getPointerTy()); 1291 SDOperand CopyTo = DAG.getCopyToReg(getRoot(), JumpTableReg, SwitchOp); 1292 1293 // Emit the range check for the jump table, and branch to the default 1294 // block for the switch statement if the value being switched on exceeds 1295 // the largest case in the switch. 1296 SDOperand CMP = DAG.getSetCC(TLI.getSetCCResultTy(), SUB, 1297 DAG.getConstant(Last-First,VT), ISD::SETUGT); 1298 DAG.setRoot(DAG.getNode(ISD::BRCOND, MVT::Other, CopyTo, CMP, 1299 DAG.getBasicBlock(Default))); 1300 1301 // Build a vector of destination BBs, corresponding to each target 1302 // of the jump table. If the value of the jump table slot corresponds to 1303 // a case statement, push the case's BB onto the vector, otherwise, push 1304 // the default BB. 1305 std::vector<MachineBasicBlock*> DestBBs; 1306 int64_t TEI = First; 1307 for (CaseItr ii = Cases.begin(), ee = Cases.end(); ii != ee; ++TEI) 1308 if (cast<ConstantInt>(ii->first)->getSExtValue() == TEI) { 1309 DestBBs.push_back(ii->second); 1310 ++ii; 1311 } else { 1312 DestBBs.push_back(Default); 1313 } 1314 1315 // Update successor info. Add one edge to each unique successor. 1316 // Vector bool would be better, but vector<bool> is really slow. 1317 std::vector<unsigned char> SuccsHandled; 1318 SuccsHandled.resize(CurMBB->getParent()->getNumBlockIDs()); 1319 1320 for (std::vector<MachineBasicBlock*>::iterator I = DestBBs.begin(), 1321 E = DestBBs.end(); I != E; ++I) { 1322 if (!SuccsHandled[(*I)->getNumber()]) { 1323 SuccsHandled[(*I)->getNumber()] = true; 1324 JumpTableBB->addSuccessor(*I); 1325 } 1326 } 1327 1328 // Create a jump table index for this jump table, or return an existing 1329 // one. 1330 unsigned JTI = CurMF->getJumpTableInfo()->getJumpTableIndex(DestBBs); 1331 1332 // Set the jump table information so that we can codegen it as a second 1333 // MachineBasicBlock 1334 JT.Reg = JumpTableReg; 1335 JT.JTI = JTI; 1336 JT.MBB = JumpTableBB; 1337 JT.Default = Default; 1338 return; 1339 } 1340 } 1341 1342 // Push the initial CaseRec onto the worklist 1343 std::vector<CaseRec> CaseVec; 1344 CaseVec.push_back(CaseRec(CurMBB,0,0,CaseRange(Cases.begin(),Cases.end()))); 1345 1346 while (!CaseVec.empty()) { 1347 // Grab a record representing a case range to process off the worklist 1348 CaseRec CR = CaseVec.back(); 1349 CaseVec.pop_back(); 1350 1351 // Size is the number of Cases represented by this range. If Size is 1, 1352 // then we are processing a leaf of the binary search tree. Otherwise, 1353 // we need to pick a pivot, and push left and right ranges onto the 1354 // worklist. 1355 unsigned Size = CR.Range.second - CR.Range.first; 1356 1357 if (Size == 1) { 1358 // Create a CaseBlock record representing a conditional branch to 1359 // the Case's target mbb if the value being switched on SV is equal 1360 // to C. Otherwise, branch to default. 1361 Constant *C = CR.Range.first->first; 1362 MachineBasicBlock *Target = CR.Range.first->second; 1363 SelectionDAGISel::CaseBlock CB(ISD::SETEQ, SV, C, Target, Default, 1364 CR.CaseBB); 1365 1366 // If the MBB representing the leaf node is the current MBB, then just 1367 // call visitSwitchCase to emit the code into the current block. 1368 // Otherwise, push the CaseBlock onto the vector to be later processed 1369 // by SDISel, and insert the node's MBB before the next MBB. 1370 if (CR.CaseBB == CurMBB) 1371 visitSwitchCase(CB); 1372 else 1373 SwitchCases.push_back(CB); 1374 } else { 1375 // split case range at pivot 1376 CaseItr Pivot = CR.Range.first + (Size / 2); 1377 CaseRange LHSR(CR.Range.first, Pivot); 1378 CaseRange RHSR(Pivot, CR.Range.second); 1379 Constant *C = Pivot->first; 1380 MachineBasicBlock *FalseBB = 0, *TrueBB = 0; 1381 1382 // We know that we branch to the LHS if the Value being switched on is 1383 // less than the Pivot value, C. We use this to optimize our binary 1384 // tree a bit, by recognizing that if SV is greater than or equal to the 1385 // LHS's Case Value, and that Case Value is exactly one less than the 1386 // Pivot's Value, then we can branch directly to the LHS's Target, 1387 // rather than creating a leaf node for it. 1388 if ((LHSR.second - LHSR.first) == 1 && 1389 LHSR.first->first == CR.GE && 1390 cast<ConstantInt>(C)->getZExtValue() == 1391 (cast<ConstantInt>(CR.GE)->getZExtValue() + 1ULL)) { 1392 TrueBB = LHSR.first->second; 1393 } else { 1394 TrueBB = new MachineBasicBlock(LLVMBB); 1395 CurMF->getBasicBlockList().insert(BBI, TrueBB); 1396 CaseVec.push_back(CaseRec(TrueBB, C, CR.GE, LHSR)); 1397 } 1398 1399 // Similar to the optimization above, if the Value being switched on is 1400 // known to be less than the Constant CR.LT, and the current Case Value 1401 // is CR.LT - 1, then we can branch directly to the target block for 1402 // the current Case Value, rather than emitting a RHS leaf node for it. 1403 if ((RHSR.second - RHSR.first) == 1 && CR.LT && 1404 cast<ConstantInt>(RHSR.first->first)->getZExtValue() == 1405 (cast<ConstantInt>(CR.LT)->getZExtValue() - 1ULL)) { 1406 FalseBB = RHSR.first->second; 1407 } else { 1408 FalseBB = new MachineBasicBlock(LLVMBB); 1409 CurMF->getBasicBlockList().insert(BBI, FalseBB); 1410 CaseVec.push_back(CaseRec(FalseBB,CR.LT,C,RHSR)); 1411 } 1412 1413 // Create a CaseBlock record representing a conditional branch to 1414 // the LHS node if the value being switched on SV is less than C. 1415 // Otherwise, branch to LHS. 1416 SelectionDAGISel::CaseBlock CB(ISD::SETLT, SV, C, TrueBB, FalseBB, 1417 CR.CaseBB); 1418 1419 if (CR.CaseBB == CurMBB) 1420 visitSwitchCase(CB); 1421 else 1422 SwitchCases.push_back(CB); 1423 } 1424 } 1425} 1426 1427void SelectionDAGLowering::visitSub(User &I) { 1428 // -0.0 - X --> fneg 1429 const Type *Ty = I.getType(); 1430 if (isa<VectorType>(Ty)) { 1431 visitVectorBinary(I, ISD::VSUB); 1432 } else if (Ty->isFloatingPoint()) { 1433 if (ConstantFP *CFP = dyn_cast<ConstantFP>(I.getOperand(0))) 1434 if (CFP->isExactlyValue(-0.0)) { 1435 SDOperand Op2 = getValue(I.getOperand(1)); 1436 setValue(&I, DAG.getNode(ISD::FNEG, Op2.getValueType(), Op2)); 1437 return; 1438 } 1439 visitScalarBinary(I, ISD::FSUB); 1440 } else 1441 visitScalarBinary(I, ISD::SUB); 1442} 1443 1444void SelectionDAGLowering::visitScalarBinary(User &I, unsigned OpCode) { 1445 SDOperand Op1 = getValue(I.getOperand(0)); 1446 SDOperand Op2 = getValue(I.getOperand(1)); 1447 1448 setValue(&I, DAG.getNode(OpCode, Op1.getValueType(), Op1, Op2)); 1449} 1450 1451void 1452SelectionDAGLowering::visitVectorBinary(User &I, unsigned OpCode) { 1453 assert(isa<VectorType>(I.getType())); 1454 const VectorType *Ty = cast<VectorType>(I.getType()); 1455 SDOperand Typ = DAG.getValueType(TLI.getValueType(Ty->getElementType())); 1456 1457 setValue(&I, DAG.getNode(OpCode, MVT::Vector, 1458 getValue(I.getOperand(0)), 1459 getValue(I.getOperand(1)), 1460 DAG.getConstant(Ty->getNumElements(), MVT::i32), 1461 Typ)); 1462} 1463 1464void SelectionDAGLowering::visitEitherBinary(User &I, unsigned ScalarOp, 1465 unsigned VectorOp) { 1466 if (isa<VectorType>(I.getType())) 1467 visitVectorBinary(I, VectorOp); 1468 else 1469 visitScalarBinary(I, ScalarOp); 1470} 1471 1472void SelectionDAGLowering::visitShift(User &I, unsigned Opcode) { 1473 SDOperand Op1 = getValue(I.getOperand(0)); 1474 SDOperand Op2 = getValue(I.getOperand(1)); 1475 1476 if (TLI.getShiftAmountTy() < Op2.getValueType()) 1477 Op2 = DAG.getNode(ISD::TRUNCATE, TLI.getShiftAmountTy(), Op2); 1478 else if (TLI.getShiftAmountTy() > Op2.getValueType()) 1479 Op2 = DAG.getNode(ISD::ANY_EXTEND, TLI.getShiftAmountTy(), Op2); 1480 1481 setValue(&I, DAG.getNode(Opcode, Op1.getValueType(), Op1, Op2)); 1482} 1483 1484void SelectionDAGLowering::visitICmp(User &I) { 1485 ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE; 1486 if (ICmpInst *IC = dyn_cast<ICmpInst>(&I)) 1487 predicate = IC->getPredicate(); 1488 else if (ConstantExpr *IC = dyn_cast<ConstantExpr>(&I)) 1489 predicate = ICmpInst::Predicate(IC->getPredicate()); 1490 SDOperand Op1 = getValue(I.getOperand(0)); 1491 SDOperand Op2 = getValue(I.getOperand(1)); 1492 ISD::CondCode Opcode; 1493 switch (predicate) { 1494 case ICmpInst::ICMP_EQ : Opcode = ISD::SETEQ; break; 1495 case ICmpInst::ICMP_NE : Opcode = ISD::SETNE; break; 1496 case ICmpInst::ICMP_UGT : Opcode = ISD::SETUGT; break; 1497 case ICmpInst::ICMP_UGE : Opcode = ISD::SETUGE; break; 1498 case ICmpInst::ICMP_ULT : Opcode = ISD::SETULT; break; 1499 case ICmpInst::ICMP_ULE : Opcode = ISD::SETULE; break; 1500 case ICmpInst::ICMP_SGT : Opcode = ISD::SETGT; break; 1501 case ICmpInst::ICMP_SGE : Opcode = ISD::SETGE; break; 1502 case ICmpInst::ICMP_SLT : Opcode = ISD::SETLT; break; 1503 case ICmpInst::ICMP_SLE : Opcode = ISD::SETLE; break; 1504 default: 1505 assert(!"Invalid ICmp predicate value"); 1506 Opcode = ISD::SETEQ; 1507 break; 1508 } 1509 setValue(&I, DAG.getSetCC(MVT::i1, Op1, Op2, Opcode)); 1510} 1511 1512void SelectionDAGLowering::visitFCmp(User &I) { 1513 FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE; 1514 if (FCmpInst *FC = dyn_cast<FCmpInst>(&I)) 1515 predicate = FC->getPredicate(); 1516 else if (ConstantExpr *FC = dyn_cast<ConstantExpr>(&I)) 1517 predicate = FCmpInst::Predicate(FC->getPredicate()); 1518 SDOperand Op1 = getValue(I.getOperand(0)); 1519 SDOperand Op2 = getValue(I.getOperand(1)); 1520 ISD::CondCode Condition, FOC, FPC; 1521 switch (predicate) { 1522 case FCmpInst::FCMP_FALSE: FOC = FPC = ISD::SETFALSE; break; 1523 case FCmpInst::FCMP_OEQ: FOC = ISD::SETEQ; FPC = ISD::SETOEQ; break; 1524 case FCmpInst::FCMP_OGT: FOC = ISD::SETGT; FPC = ISD::SETOGT; break; 1525 case FCmpInst::FCMP_OGE: FOC = ISD::SETGE; FPC = ISD::SETOGE; break; 1526 case FCmpInst::FCMP_OLT: FOC = ISD::SETLT; FPC = ISD::SETOLT; break; 1527 case FCmpInst::FCMP_OLE: FOC = ISD::SETLE; FPC = ISD::SETOLE; break; 1528 case FCmpInst::FCMP_ONE: FOC = ISD::SETNE; FPC = ISD::SETONE; break; 1529 case FCmpInst::FCMP_ORD: FOC = ISD::SETEQ; FPC = ISD::SETO; break; 1530 case FCmpInst::FCMP_UNO: FOC = ISD::SETNE; FPC = ISD::SETUO; break; 1531 case FCmpInst::FCMP_UEQ: FOC = ISD::SETEQ; FPC = ISD::SETUEQ; break; 1532 case FCmpInst::FCMP_UGT: FOC = ISD::SETGT; FPC = ISD::SETUGT; break; 1533 case FCmpInst::FCMP_UGE: FOC = ISD::SETGE; FPC = ISD::SETUGE; break; 1534 case FCmpInst::FCMP_ULT: FOC = ISD::SETLT; FPC = ISD::SETULT; break; 1535 case FCmpInst::FCMP_ULE: FOC = ISD::SETLE; FPC = ISD::SETULE; break; 1536 case FCmpInst::FCMP_UNE: FOC = ISD::SETNE; FPC = ISD::SETUNE; break; 1537 case FCmpInst::FCMP_TRUE: FOC = FPC = ISD::SETTRUE; break; 1538 default: 1539 assert(!"Invalid FCmp predicate value"); 1540 FOC = FPC = ISD::SETFALSE; 1541 break; 1542 } 1543 if (FiniteOnlyFPMath()) 1544 Condition = FOC; 1545 else 1546 Condition = FPC; 1547 setValue(&I, DAG.getSetCC(MVT::i1, Op1, Op2, Condition)); 1548} 1549 1550void SelectionDAGLowering::visitSelect(User &I) { 1551 SDOperand Cond = getValue(I.getOperand(0)); 1552 SDOperand TrueVal = getValue(I.getOperand(1)); 1553 SDOperand FalseVal = getValue(I.getOperand(2)); 1554 if (!isa<VectorType>(I.getType())) { 1555 setValue(&I, DAG.getNode(ISD::SELECT, TrueVal.getValueType(), Cond, 1556 TrueVal, FalseVal)); 1557 } else { 1558 setValue(&I, DAG.getNode(ISD::VSELECT, MVT::Vector, Cond, TrueVal, FalseVal, 1559 *(TrueVal.Val->op_end()-2), 1560 *(TrueVal.Val->op_end()-1))); 1561 } 1562} 1563 1564 1565void SelectionDAGLowering::visitTrunc(User &I) { 1566 // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest). 1567 SDOperand N = getValue(I.getOperand(0)); 1568 MVT::ValueType DestVT = TLI.getValueType(I.getType()); 1569 setValue(&I, DAG.getNode(ISD::TRUNCATE, DestVT, N)); 1570} 1571 1572void SelectionDAGLowering::visitZExt(User &I) { 1573 // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest). 1574 // ZExt also can't be a cast to bool for same reason. So, nothing much to do 1575 SDOperand N = getValue(I.getOperand(0)); 1576 MVT::ValueType DestVT = TLI.getValueType(I.getType()); 1577 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, DestVT, N)); 1578} 1579 1580void SelectionDAGLowering::visitSExt(User &I) { 1581 // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest). 1582 // SExt also can't be a cast to bool for same reason. So, nothing much to do 1583 SDOperand N = getValue(I.getOperand(0)); 1584 MVT::ValueType DestVT = TLI.getValueType(I.getType()); 1585 setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, DestVT, N)); 1586} 1587 1588void SelectionDAGLowering::visitFPTrunc(User &I) { 1589 // FPTrunc is never a no-op cast, no need to check 1590 SDOperand N = getValue(I.getOperand(0)); 1591 MVT::ValueType DestVT = TLI.getValueType(I.getType()); 1592 setValue(&I, DAG.getNode(ISD::FP_ROUND, DestVT, N)); 1593} 1594 1595void SelectionDAGLowering::visitFPExt(User &I){ 1596 // FPTrunc is never a no-op cast, no need to check 1597 SDOperand N = getValue(I.getOperand(0)); 1598 MVT::ValueType DestVT = TLI.getValueType(I.getType()); 1599 setValue(&I, DAG.getNode(ISD::FP_EXTEND, DestVT, N)); 1600} 1601 1602void SelectionDAGLowering::visitFPToUI(User &I) { 1603 // FPToUI is never a no-op cast, no need to check 1604 SDOperand N = getValue(I.getOperand(0)); 1605 MVT::ValueType DestVT = TLI.getValueType(I.getType()); 1606 setValue(&I, DAG.getNode(ISD::FP_TO_UINT, DestVT, N)); 1607} 1608 1609void SelectionDAGLowering::visitFPToSI(User &I) { 1610 // FPToSI is never a no-op cast, no need to check 1611 SDOperand N = getValue(I.getOperand(0)); 1612 MVT::ValueType DestVT = TLI.getValueType(I.getType()); 1613 setValue(&I, DAG.getNode(ISD::FP_TO_SINT, DestVT, N)); 1614} 1615 1616void SelectionDAGLowering::visitUIToFP(User &I) { 1617 // UIToFP is never a no-op cast, no need to check 1618 SDOperand N = getValue(I.getOperand(0)); 1619 MVT::ValueType DestVT = TLI.getValueType(I.getType()); 1620 setValue(&I, DAG.getNode(ISD::UINT_TO_FP, DestVT, N)); 1621} 1622 1623void SelectionDAGLowering::visitSIToFP(User &I){ 1624 // UIToFP is never a no-op cast, no need to check 1625 SDOperand N = getValue(I.getOperand(0)); 1626 MVT::ValueType DestVT = TLI.getValueType(I.getType()); 1627 setValue(&I, DAG.getNode(ISD::SINT_TO_FP, DestVT, N)); 1628} 1629 1630void SelectionDAGLowering::visitPtrToInt(User &I) { 1631 // What to do depends on the size of the integer and the size of the pointer. 1632 // We can either truncate, zero extend, or no-op, accordingly. 1633 SDOperand N = getValue(I.getOperand(0)); 1634 MVT::ValueType SrcVT = N.getValueType(); 1635 MVT::ValueType DestVT = TLI.getValueType(I.getType()); 1636 SDOperand Result; 1637 if (MVT::getSizeInBits(DestVT) < MVT::getSizeInBits(SrcVT)) 1638 Result = DAG.getNode(ISD::TRUNCATE, DestVT, N); 1639 else 1640 // Note: ZERO_EXTEND can handle cases where the sizes are equal too 1641 Result = DAG.getNode(ISD::ZERO_EXTEND, DestVT, N); 1642 setValue(&I, Result); 1643} 1644 1645void SelectionDAGLowering::visitIntToPtr(User &I) { 1646 // What to do depends on the size of the integer and the size of the pointer. 1647 // We can either truncate, zero extend, or no-op, accordingly. 1648 SDOperand N = getValue(I.getOperand(0)); 1649 MVT::ValueType SrcVT = N.getValueType(); 1650 MVT::ValueType DestVT = TLI.getValueType(I.getType()); 1651 if (MVT::getSizeInBits(DestVT) < MVT::getSizeInBits(SrcVT)) 1652 setValue(&I, DAG.getNode(ISD::TRUNCATE, DestVT, N)); 1653 else 1654 // Note: ZERO_EXTEND can handle cases where the sizes are equal too 1655 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, DestVT, N)); 1656} 1657 1658void SelectionDAGLowering::visitBitCast(User &I) { 1659 SDOperand N = getValue(I.getOperand(0)); 1660 MVT::ValueType DestVT = TLI.getValueType(I.getType()); 1661 if (DestVT == MVT::Vector) { 1662 // This is a cast to a vector from something else. 1663 // Get information about the output vector. 1664 const VectorType *DestTy = cast<VectorType>(I.getType()); 1665 MVT::ValueType EltVT = TLI.getValueType(DestTy->getElementType()); 1666 setValue(&I, DAG.getNode(ISD::VBIT_CONVERT, DestVT, N, 1667 DAG.getConstant(DestTy->getNumElements(),MVT::i32), 1668 DAG.getValueType(EltVT))); 1669 return; 1670 } 1671 MVT::ValueType SrcVT = N.getValueType(); 1672 if (SrcVT == MVT::Vector) { 1673 // This is a cast from a vctor to something else. 1674 // Get information about the input vector. 1675 setValue(&I, DAG.getNode(ISD::VBIT_CONVERT, DestVT, N)); 1676 return; 1677 } 1678 1679 // BitCast assures us that source and destination are the same size so this 1680 // is either a BIT_CONVERT or a no-op. 1681 if (DestVT != N.getValueType()) 1682 setValue(&I, DAG.getNode(ISD::BIT_CONVERT, DestVT, N)); // convert types 1683 else 1684 setValue(&I, N); // noop cast. 1685} 1686 1687void SelectionDAGLowering::visitInsertElement(User &I) { 1688 SDOperand InVec = getValue(I.getOperand(0)); 1689 SDOperand InVal = getValue(I.getOperand(1)); 1690 SDOperand InIdx = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(), 1691 getValue(I.getOperand(2))); 1692 1693 SDOperand Num = *(InVec.Val->op_end()-2); 1694 SDOperand Typ = *(InVec.Val->op_end()-1); 1695 setValue(&I, DAG.getNode(ISD::VINSERT_VECTOR_ELT, MVT::Vector, 1696 InVec, InVal, InIdx, Num, Typ)); 1697} 1698 1699void SelectionDAGLowering::visitExtractElement(User &I) { 1700 SDOperand InVec = getValue(I.getOperand(0)); 1701 SDOperand InIdx = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(), 1702 getValue(I.getOperand(1))); 1703 SDOperand Typ = *(InVec.Val->op_end()-1); 1704 setValue(&I, DAG.getNode(ISD::VEXTRACT_VECTOR_ELT, 1705 TLI.getValueType(I.getType()), InVec, InIdx)); 1706} 1707 1708void SelectionDAGLowering::visitShuffleVector(User &I) { 1709 SDOperand V1 = getValue(I.getOperand(0)); 1710 SDOperand V2 = getValue(I.getOperand(1)); 1711 SDOperand Mask = getValue(I.getOperand(2)); 1712 1713 SDOperand Num = *(V1.Val->op_end()-2); 1714 SDOperand Typ = *(V2.Val->op_end()-1); 1715 setValue(&I, DAG.getNode(ISD::VVECTOR_SHUFFLE, MVT::Vector, 1716 V1, V2, Mask, Num, Typ)); 1717} 1718 1719 1720void SelectionDAGLowering::visitGetElementPtr(User &I) { 1721 SDOperand N = getValue(I.getOperand(0)); 1722 const Type *Ty = I.getOperand(0)->getType(); 1723 1724 for (GetElementPtrInst::op_iterator OI = I.op_begin()+1, E = I.op_end(); 1725 OI != E; ++OI) { 1726 Value *Idx = *OI; 1727 if (const StructType *StTy = dyn_cast<StructType>(Ty)) { 1728 unsigned Field = cast<ConstantInt>(Idx)->getZExtValue(); 1729 if (Field) { 1730 // N = N + Offset 1731 uint64_t Offset = TD->getStructLayout(StTy)->getElementOffset(Field); 1732 N = DAG.getNode(ISD::ADD, N.getValueType(), N, 1733 getIntPtrConstant(Offset)); 1734 } 1735 Ty = StTy->getElementType(Field); 1736 } else { 1737 Ty = cast<SequentialType>(Ty)->getElementType(); 1738 1739 // If this is a constant subscript, handle it quickly. 1740 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) { 1741 if (CI->getZExtValue() == 0) continue; 1742 uint64_t Offs = 1743 TD->getTypeSize(Ty)*cast<ConstantInt>(CI)->getSExtValue(); 1744 N = DAG.getNode(ISD::ADD, N.getValueType(), N, getIntPtrConstant(Offs)); 1745 continue; 1746 } 1747 1748 // N = N + Idx * ElementSize; 1749 uint64_t ElementSize = TD->getTypeSize(Ty); 1750 SDOperand IdxN = getValue(Idx); 1751 1752 // If the index is smaller or larger than intptr_t, truncate or extend 1753 // it. 1754 if (IdxN.getValueType() < N.getValueType()) { 1755 IdxN = DAG.getNode(ISD::SIGN_EXTEND, N.getValueType(), IdxN); 1756 } else if (IdxN.getValueType() > N.getValueType()) 1757 IdxN = DAG.getNode(ISD::TRUNCATE, N.getValueType(), IdxN); 1758 1759 // If this is a multiply by a power of two, turn it into a shl 1760 // immediately. This is a very common case. 1761 if (isPowerOf2_64(ElementSize)) { 1762 unsigned Amt = Log2_64(ElementSize); 1763 IdxN = DAG.getNode(ISD::SHL, N.getValueType(), IdxN, 1764 DAG.getConstant(Amt, TLI.getShiftAmountTy())); 1765 N = DAG.getNode(ISD::ADD, N.getValueType(), N, IdxN); 1766 continue; 1767 } 1768 1769 SDOperand Scale = getIntPtrConstant(ElementSize); 1770 IdxN = DAG.getNode(ISD::MUL, N.getValueType(), IdxN, Scale); 1771 N = DAG.getNode(ISD::ADD, N.getValueType(), N, IdxN); 1772 } 1773 } 1774 setValue(&I, N); 1775} 1776 1777void SelectionDAGLowering::visitAlloca(AllocaInst &I) { 1778 // If this is a fixed sized alloca in the entry block of the function, 1779 // allocate it statically on the stack. 1780 if (FuncInfo.StaticAllocaMap.count(&I)) 1781 return; // getValue will auto-populate this. 1782 1783 const Type *Ty = I.getAllocatedType(); 1784 uint64_t TySize = TLI.getTargetData()->getTypeSize(Ty); 1785 unsigned Align = 1786 std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty), 1787 I.getAlignment()); 1788 1789 SDOperand AllocSize = getValue(I.getArraySize()); 1790 MVT::ValueType IntPtr = TLI.getPointerTy(); 1791 if (IntPtr < AllocSize.getValueType()) 1792 AllocSize = DAG.getNode(ISD::TRUNCATE, IntPtr, AllocSize); 1793 else if (IntPtr > AllocSize.getValueType()) 1794 AllocSize = DAG.getNode(ISD::ZERO_EXTEND, IntPtr, AllocSize); 1795 1796 AllocSize = DAG.getNode(ISD::MUL, IntPtr, AllocSize, 1797 getIntPtrConstant(TySize)); 1798 1799 // Handle alignment. If the requested alignment is less than or equal to the 1800 // stack alignment, ignore it and round the size of the allocation up to the 1801 // stack alignment size. If the size is greater than the stack alignment, we 1802 // note this in the DYNAMIC_STACKALLOC node. 1803 unsigned StackAlign = 1804 TLI.getTargetMachine().getFrameInfo()->getStackAlignment(); 1805 if (Align <= StackAlign) { 1806 Align = 0; 1807 // Add SA-1 to the size. 1808 AllocSize = DAG.getNode(ISD::ADD, AllocSize.getValueType(), AllocSize, 1809 getIntPtrConstant(StackAlign-1)); 1810 // Mask out the low bits for alignment purposes. 1811 AllocSize = DAG.getNode(ISD::AND, AllocSize.getValueType(), AllocSize, 1812 getIntPtrConstant(~(uint64_t)(StackAlign-1))); 1813 } 1814 1815 SDOperand Ops[] = { getRoot(), AllocSize, getIntPtrConstant(Align) }; 1816 const MVT::ValueType *VTs = DAG.getNodeValueTypes(AllocSize.getValueType(), 1817 MVT::Other); 1818 SDOperand DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, VTs, 2, Ops, 3); 1819 setValue(&I, DSA); 1820 DAG.setRoot(DSA.getValue(1)); 1821 1822 // Inform the Frame Information that we have just allocated a variable-sized 1823 // object. 1824 CurMBB->getParent()->getFrameInfo()->CreateVariableSizedObject(); 1825} 1826 1827void SelectionDAGLowering::visitLoad(LoadInst &I) { 1828 SDOperand Ptr = getValue(I.getOperand(0)); 1829 1830 SDOperand Root; 1831 if (I.isVolatile()) 1832 Root = getRoot(); 1833 else { 1834 // Do not serialize non-volatile loads against each other. 1835 Root = DAG.getRoot(); 1836 } 1837 1838 setValue(&I, getLoadFrom(I.getType(), Ptr, I.getOperand(0), 1839 Root, I.isVolatile())); 1840} 1841 1842SDOperand SelectionDAGLowering::getLoadFrom(const Type *Ty, SDOperand Ptr, 1843 const Value *SV, SDOperand Root, 1844 bool isVolatile) { 1845 SDOperand L; 1846 if (const VectorType *PTy = dyn_cast<VectorType>(Ty)) { 1847 MVT::ValueType PVT = TLI.getValueType(PTy->getElementType()); 1848 L = DAG.getVecLoad(PTy->getNumElements(), PVT, Root, Ptr, 1849 DAG.getSrcValue(SV)); 1850 } else { 1851 L = DAG.getLoad(TLI.getValueType(Ty), Root, Ptr, SV, 0, isVolatile); 1852 } 1853 1854 if (isVolatile) 1855 DAG.setRoot(L.getValue(1)); 1856 else 1857 PendingLoads.push_back(L.getValue(1)); 1858 1859 return L; 1860} 1861 1862 1863void SelectionDAGLowering::visitStore(StoreInst &I) { 1864 Value *SrcV = I.getOperand(0); 1865 SDOperand Src = getValue(SrcV); 1866 SDOperand Ptr = getValue(I.getOperand(1)); 1867 DAG.setRoot(DAG.getStore(getRoot(), Src, Ptr, I.getOperand(1), 0, 1868 I.isVolatile())); 1869} 1870 1871/// IntrinsicCannotAccessMemory - Return true if the specified intrinsic cannot 1872/// access memory and has no other side effects at all. 1873static bool IntrinsicCannotAccessMemory(unsigned IntrinsicID) { 1874#define GET_NO_MEMORY_INTRINSICS 1875#include "llvm/Intrinsics.gen" 1876#undef GET_NO_MEMORY_INTRINSICS 1877 return false; 1878} 1879 1880// IntrinsicOnlyReadsMemory - Return true if the specified intrinsic doesn't 1881// have any side-effects or if it only reads memory. 1882static bool IntrinsicOnlyReadsMemory(unsigned IntrinsicID) { 1883#define GET_SIDE_EFFECT_INFO 1884#include "llvm/Intrinsics.gen" 1885#undef GET_SIDE_EFFECT_INFO 1886 return false; 1887} 1888 1889/// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC 1890/// node. 1891void SelectionDAGLowering::visitTargetIntrinsic(CallInst &I, 1892 unsigned Intrinsic) { 1893 bool HasChain = !IntrinsicCannotAccessMemory(Intrinsic); 1894 bool OnlyLoad = HasChain && IntrinsicOnlyReadsMemory(Intrinsic); 1895 1896 // Build the operand list. 1897 SmallVector<SDOperand, 8> Ops; 1898 if (HasChain) { // If this intrinsic has side-effects, chainify it. 1899 if (OnlyLoad) { 1900 // We don't need to serialize loads against other loads. 1901 Ops.push_back(DAG.getRoot()); 1902 } else { 1903 Ops.push_back(getRoot()); 1904 } 1905 } 1906 1907 // Add the intrinsic ID as an integer operand. 1908 Ops.push_back(DAG.getConstant(Intrinsic, TLI.getPointerTy())); 1909 1910 // Add all operands of the call to the operand list. 1911 for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) { 1912 SDOperand Op = getValue(I.getOperand(i)); 1913 1914 // If this is a vector type, force it to the right vector type. 1915 if (Op.getValueType() == MVT::Vector) { 1916 const VectorType *OpTy = cast<VectorType>(I.getOperand(i)->getType()); 1917 MVT::ValueType EltVT = TLI.getValueType(OpTy->getElementType()); 1918 1919 MVT::ValueType VVT = MVT::getVectorType(EltVT, OpTy->getNumElements()); 1920 assert(VVT != MVT::Other && "Intrinsic uses a non-legal type?"); 1921 Op = DAG.getNode(ISD::VBIT_CONVERT, VVT, Op); 1922 } 1923 1924 assert(TLI.isTypeLegal(Op.getValueType()) && 1925 "Intrinsic uses a non-legal type?"); 1926 Ops.push_back(Op); 1927 } 1928 1929 std::vector<MVT::ValueType> VTs; 1930 if (I.getType() != Type::VoidTy) { 1931 MVT::ValueType VT = TLI.getValueType(I.getType()); 1932 if (VT == MVT::Vector) { 1933 const VectorType *DestTy = cast<VectorType>(I.getType()); 1934 MVT::ValueType EltVT = TLI.getValueType(DestTy->getElementType()); 1935 1936 VT = MVT::getVectorType(EltVT, DestTy->getNumElements()); 1937 assert(VT != MVT::Other && "Intrinsic uses a non-legal type?"); 1938 } 1939 1940 assert(TLI.isTypeLegal(VT) && "Intrinsic uses a non-legal type?"); 1941 VTs.push_back(VT); 1942 } 1943 if (HasChain) 1944 VTs.push_back(MVT::Other); 1945 1946 const MVT::ValueType *VTList = DAG.getNodeValueTypes(VTs); 1947 1948 // Create the node. 1949 SDOperand Result; 1950 if (!HasChain) 1951 Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, VTList, VTs.size(), 1952 &Ops[0], Ops.size()); 1953 else if (I.getType() != Type::VoidTy) 1954 Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, VTList, VTs.size(), 1955 &Ops[0], Ops.size()); 1956 else 1957 Result = DAG.getNode(ISD::INTRINSIC_VOID, VTList, VTs.size(), 1958 &Ops[0], Ops.size()); 1959 1960 if (HasChain) { 1961 SDOperand Chain = Result.getValue(Result.Val->getNumValues()-1); 1962 if (OnlyLoad) 1963 PendingLoads.push_back(Chain); 1964 else 1965 DAG.setRoot(Chain); 1966 } 1967 if (I.getType() != Type::VoidTy) { 1968 if (const VectorType *PTy = dyn_cast<VectorType>(I.getType())) { 1969 MVT::ValueType EVT = TLI.getValueType(PTy->getElementType()); 1970 Result = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, Result, 1971 DAG.getConstant(PTy->getNumElements(), MVT::i32), 1972 DAG.getValueType(EVT)); 1973 } 1974 setValue(&I, Result); 1975 } 1976} 1977 1978/// visitIntrinsicCall - Lower the call to the specified intrinsic function. If 1979/// we want to emit this as a call to a named external function, return the name 1980/// otherwise lower it and return null. 1981const char * 1982SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) { 1983 switch (Intrinsic) { 1984 default: 1985 // By default, turn this into a target intrinsic node. 1986 visitTargetIntrinsic(I, Intrinsic); 1987 return 0; 1988 case Intrinsic::vastart: visitVAStart(I); return 0; 1989 case Intrinsic::vaend: visitVAEnd(I); return 0; 1990 case Intrinsic::vacopy: visitVACopy(I); return 0; 1991 case Intrinsic::returnaddress: 1992 setValue(&I, DAG.getNode(ISD::RETURNADDR, TLI.getPointerTy(), 1993 getValue(I.getOperand(1)))); 1994 return 0; 1995 case Intrinsic::frameaddress: 1996 setValue(&I, DAG.getNode(ISD::FRAMEADDR, TLI.getPointerTy(), 1997 getValue(I.getOperand(1)))); 1998 return 0; 1999 case Intrinsic::setjmp: 2000 return "_setjmp"+!TLI.usesUnderscoreSetJmp(); 2001 break; 2002 case Intrinsic::longjmp: 2003 return "_longjmp"+!TLI.usesUnderscoreLongJmp(); 2004 break; 2005 case Intrinsic::memcpy_i32: 2006 case Intrinsic::memcpy_i64: 2007 visitMemIntrinsic(I, ISD::MEMCPY); 2008 return 0; 2009 case Intrinsic::memset_i32: 2010 case Intrinsic::memset_i64: 2011 visitMemIntrinsic(I, ISD::MEMSET); 2012 return 0; 2013 case Intrinsic::memmove_i32: 2014 case Intrinsic::memmove_i64: 2015 visitMemIntrinsic(I, ISD::MEMMOVE); 2016 return 0; 2017 2018 case Intrinsic::dbg_stoppoint: { 2019 MachineModuleInfo *MMI = DAG.getMachineModuleInfo(); 2020 DbgStopPointInst &SPI = cast<DbgStopPointInst>(I); 2021 if (MMI && SPI.getContext() && MMI->Verify(SPI.getContext())) { 2022 SDOperand Ops[5]; 2023 2024 Ops[0] = getRoot(); 2025 Ops[1] = getValue(SPI.getLineValue()); 2026 Ops[2] = getValue(SPI.getColumnValue()); 2027 2028 DebugInfoDesc *DD = MMI->getDescFor(SPI.getContext()); 2029 assert(DD && "Not a debug information descriptor"); 2030 CompileUnitDesc *CompileUnit = cast<CompileUnitDesc>(DD); 2031 2032 Ops[3] = DAG.getString(CompileUnit->getFileName()); 2033 Ops[4] = DAG.getString(CompileUnit->getDirectory()); 2034 2035 DAG.setRoot(DAG.getNode(ISD::LOCATION, MVT::Other, Ops, 5)); 2036 } 2037 2038 return 0; 2039 } 2040 case Intrinsic::dbg_region_start: { 2041 MachineModuleInfo *MMI = DAG.getMachineModuleInfo(); 2042 DbgRegionStartInst &RSI = cast<DbgRegionStartInst>(I); 2043 if (MMI && RSI.getContext() && MMI->Verify(RSI.getContext())) { 2044 unsigned LabelID = MMI->RecordRegionStart(RSI.getContext()); 2045 DAG.setRoot(DAG.getNode(ISD::LABEL, MVT::Other, getRoot(), 2046 DAG.getConstant(LabelID, MVT::i32))); 2047 } 2048 2049 return 0; 2050 } 2051 case Intrinsic::dbg_region_end: { 2052 MachineModuleInfo *MMI = DAG.getMachineModuleInfo(); 2053 DbgRegionEndInst &REI = cast<DbgRegionEndInst>(I); 2054 if (MMI && REI.getContext() && MMI->Verify(REI.getContext())) { 2055 unsigned LabelID = MMI->RecordRegionEnd(REI.getContext()); 2056 DAG.setRoot(DAG.getNode(ISD::LABEL, MVT::Other, 2057 getRoot(), DAG.getConstant(LabelID, MVT::i32))); 2058 } 2059 2060 return 0; 2061 } 2062 case Intrinsic::dbg_func_start: { 2063 MachineModuleInfo *MMI = DAG.getMachineModuleInfo(); 2064 DbgFuncStartInst &FSI = cast<DbgFuncStartInst>(I); 2065 if (MMI && FSI.getSubprogram() && 2066 MMI->Verify(FSI.getSubprogram())) { 2067 unsigned LabelID = MMI->RecordRegionStart(FSI.getSubprogram()); 2068 DAG.setRoot(DAG.getNode(ISD::LABEL, MVT::Other, 2069 getRoot(), DAG.getConstant(LabelID, MVT::i32))); 2070 } 2071 2072 return 0; 2073 } 2074 case Intrinsic::dbg_declare: { 2075 MachineModuleInfo *MMI = DAG.getMachineModuleInfo(); 2076 DbgDeclareInst &DI = cast<DbgDeclareInst>(I); 2077 if (MMI && DI.getVariable() && MMI->Verify(DI.getVariable())) { 2078 SDOperand AddressOp = getValue(DI.getAddress()); 2079 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(AddressOp)) 2080 MMI->RecordVariable(DI.getVariable(), FI->getIndex()); 2081 } 2082 2083 return 0; 2084 } 2085 2086 case Intrinsic::eh_exception: { 2087 MachineModuleInfo *MMI = DAG.getMachineModuleInfo(); 2088 2089 if (MMI) { 2090 // Add a label to mark the beginning of the landing pad. Deletion of the 2091 // landing pad can thus be detected via the MachineModuleInfo. 2092 unsigned LabelID = MMI->addLandingPad(CurMBB); 2093 DAG.setRoot(DAG.getNode(ISD::LABEL, MVT::Other, DAG.getEntryNode(), 2094 DAG.getConstant(LabelID, MVT::i32))); 2095 2096 // Mark exception register as live in. 2097 unsigned Reg = TLI.getExceptionAddressRegister(); 2098 if (Reg) CurMBB->addLiveIn(Reg); 2099 2100 // Insert the EXCEPTIONADDR instruction. 2101 SDVTList VTs = DAG.getVTList(TLI.getPointerTy(), MVT::Other); 2102 SDOperand Ops[1]; 2103 Ops[0] = DAG.getRoot(); 2104 SDOperand Op = DAG.getNode(ISD::EXCEPTIONADDR, VTs, Ops, 1); 2105 setValue(&I, Op); 2106 DAG.setRoot(Op.getValue(1)); 2107 } else { 2108 setValue(&I, DAG.getConstant(0, TLI.getPointerTy())); 2109 } 2110 return 0; 2111 } 2112 2113 case Intrinsic::eh_selector: 2114 case Intrinsic::eh_filter:{ 2115 MachineModuleInfo *MMI = DAG.getMachineModuleInfo(); 2116 2117 if (MMI) { 2118 // Inform the MachineModuleInfo of the personality for this landing pad. 2119 ConstantExpr *CE = dyn_cast<ConstantExpr>(I.getOperand(2)); 2120 assert(CE && CE->getOpcode() == Instruction::BitCast && 2121 isa<Function>(CE->getOperand(0)) && 2122 "Personality should be a function"); 2123 MMI->addPersonality(CurMBB, cast<Function>(CE->getOperand(0))); 2124 if (Intrinsic == Intrinsic::eh_filter) 2125 MMI->setIsFilterLandingPad(CurMBB); 2126 2127 // Gather all the type infos for this landing pad and pass them along to 2128 // MachineModuleInfo. 2129 std::vector<GlobalVariable *> TyInfo; 2130 for (unsigned i = 3, N = I.getNumOperands(); i < N; ++i) { 2131 ConstantExpr *CE = dyn_cast<ConstantExpr>(I.getOperand(i)); 2132 if (CE && CE->getOpcode() == Instruction::BitCast && 2133 isa<GlobalVariable>(CE->getOperand(0))) { 2134 TyInfo.push_back(cast<GlobalVariable>(CE->getOperand(0))); 2135 } else { 2136 ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(i)); 2137 assert(CI && CI->getZExtValue() == 0 && 2138 "TypeInfo must be a global variable typeinfo or NULL"); 2139 TyInfo.push_back(NULL); 2140 } 2141 } 2142 MMI->addCatchTypeInfo(CurMBB, TyInfo); 2143 2144 // Mark exception selector register as live in. 2145 unsigned Reg = TLI.getExceptionSelectorRegister(); 2146 if (Reg) CurMBB->addLiveIn(Reg); 2147 2148 // Insert the EHSELECTION instruction. 2149 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other); 2150 SDOperand Ops[2]; 2151 Ops[0] = getValue(I.getOperand(1)); 2152 Ops[1] = getRoot(); 2153 SDOperand Op = DAG.getNode(ISD::EHSELECTION, VTs, Ops, 2); 2154 setValue(&I, Op); 2155 DAG.setRoot(Op.getValue(1)); 2156 } else { 2157 setValue(&I, DAG.getConstant(0, MVT::i32)); 2158 } 2159 2160 return 0; 2161 } 2162 2163 case Intrinsic::eh_typeid_for: { 2164 MachineModuleInfo *MMI = DAG.getMachineModuleInfo(); 2165 2166 if (MMI) { 2167 // Find the type id for the given typeinfo. 2168 GlobalVariable *GV = NULL; 2169 ConstantExpr *CE = dyn_cast<ConstantExpr>(I.getOperand(1)); 2170 if (CE && CE->getOpcode() == Instruction::BitCast && 2171 isa<GlobalVariable>(CE->getOperand(0))) { 2172 GV = cast<GlobalVariable>(CE->getOperand(0)); 2173 } else { 2174 ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(1)); 2175 assert(CI && CI->getZExtValue() == 0 && 2176 "TypeInfo must be a global variable typeinfo or NULL"); 2177 GV = NULL; 2178 } 2179 2180 unsigned TypeID = MMI->getTypeIDFor(GV); 2181 setValue(&I, DAG.getConstant(TypeID, MVT::i32)); 2182 } else { 2183 setValue(&I, DAG.getConstant(0, MVT::i32)); 2184 } 2185 2186 return 0; 2187 } 2188 2189 case Intrinsic::sqrt_f32: 2190 case Intrinsic::sqrt_f64: 2191 setValue(&I, DAG.getNode(ISD::FSQRT, 2192 getValue(I.getOperand(1)).getValueType(), 2193 getValue(I.getOperand(1)))); 2194 return 0; 2195 case Intrinsic::powi_f32: 2196 case Intrinsic::powi_f64: 2197 setValue(&I, DAG.getNode(ISD::FPOWI, 2198 getValue(I.getOperand(1)).getValueType(), 2199 getValue(I.getOperand(1)), 2200 getValue(I.getOperand(2)))); 2201 return 0; 2202 case Intrinsic::pcmarker: { 2203 SDOperand Tmp = getValue(I.getOperand(1)); 2204 DAG.setRoot(DAG.getNode(ISD::PCMARKER, MVT::Other, getRoot(), Tmp)); 2205 return 0; 2206 } 2207 case Intrinsic::readcyclecounter: { 2208 SDOperand Op = getRoot(); 2209 SDOperand Tmp = DAG.getNode(ISD::READCYCLECOUNTER, 2210 DAG.getNodeValueTypes(MVT::i64, MVT::Other), 2, 2211 &Op, 1); 2212 setValue(&I, Tmp); 2213 DAG.setRoot(Tmp.getValue(1)); 2214 return 0; 2215 } 2216 case Intrinsic::bswap_i16: 2217 case Intrinsic::bswap_i32: 2218 case Intrinsic::bswap_i64: 2219 setValue(&I, DAG.getNode(ISD::BSWAP, 2220 getValue(I.getOperand(1)).getValueType(), 2221 getValue(I.getOperand(1)))); 2222 return 0; 2223 case Intrinsic::cttz_i8: 2224 case Intrinsic::cttz_i16: 2225 case Intrinsic::cttz_i32: 2226 case Intrinsic::cttz_i64: 2227 setValue(&I, DAG.getNode(ISD::CTTZ, 2228 getValue(I.getOperand(1)).getValueType(), 2229 getValue(I.getOperand(1)))); 2230 return 0; 2231 case Intrinsic::ctlz_i8: 2232 case Intrinsic::ctlz_i16: 2233 case Intrinsic::ctlz_i32: 2234 case Intrinsic::ctlz_i64: 2235 setValue(&I, DAG.getNode(ISD::CTLZ, 2236 getValue(I.getOperand(1)).getValueType(), 2237 getValue(I.getOperand(1)))); 2238 return 0; 2239 case Intrinsic::ctpop_i8: 2240 case Intrinsic::ctpop_i16: 2241 case Intrinsic::ctpop_i32: 2242 case Intrinsic::ctpop_i64: 2243 setValue(&I, DAG.getNode(ISD::CTPOP, 2244 getValue(I.getOperand(1)).getValueType(), 2245 getValue(I.getOperand(1)))); 2246 return 0; 2247 case Intrinsic::stacksave: { 2248 SDOperand Op = getRoot(); 2249 SDOperand Tmp = DAG.getNode(ISD::STACKSAVE, 2250 DAG.getNodeValueTypes(TLI.getPointerTy(), MVT::Other), 2, &Op, 1); 2251 setValue(&I, Tmp); 2252 DAG.setRoot(Tmp.getValue(1)); 2253 return 0; 2254 } 2255 case Intrinsic::stackrestore: { 2256 SDOperand Tmp = getValue(I.getOperand(1)); 2257 DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, MVT::Other, getRoot(), Tmp)); 2258 return 0; 2259 } 2260 case Intrinsic::prefetch: 2261 // FIXME: Currently discarding prefetches. 2262 return 0; 2263 } 2264} 2265 2266 2267void SelectionDAGLowering::LowerCallTo(Instruction &I, 2268 const Type *CalledValueTy, 2269 unsigned CallingConv, 2270 bool IsTailCall, 2271 SDOperand Callee, unsigned OpIdx) { 2272 const PointerType *PT = cast<PointerType>(CalledValueTy); 2273 const FunctionType *FTy = cast<FunctionType>(PT->getElementType()); 2274 2275 TargetLowering::ArgListTy Args; 2276 TargetLowering::ArgListEntry Entry; 2277 Args.reserve(I.getNumOperands()); 2278 for (unsigned i = OpIdx, e = I.getNumOperands(); i != e; ++i) { 2279 Value *Arg = I.getOperand(i); 2280 SDOperand ArgNode = getValue(Arg); 2281 Entry.Node = ArgNode; Entry.Ty = Arg->getType(); 2282 Entry.isSExt = FTy->paramHasAttr(i, FunctionType::SExtAttribute); 2283 Entry.isZExt = FTy->paramHasAttr(i, FunctionType::ZExtAttribute); 2284 Entry.isInReg = FTy->paramHasAttr(i, FunctionType::InRegAttribute); 2285 Entry.isSRet = FTy->paramHasAttr(i, FunctionType::StructRetAttribute); 2286 Args.push_back(Entry); 2287 } 2288 2289 std::pair<SDOperand,SDOperand> Result = 2290 TLI.LowerCallTo(getRoot(), I.getType(), 2291 FTy->paramHasAttr(0,FunctionType::SExtAttribute), 2292 FTy->isVarArg(), CallingConv, IsTailCall, 2293 Callee, Args, DAG); 2294 if (I.getType() != Type::VoidTy) 2295 setValue(&I, Result.first); 2296 DAG.setRoot(Result.second); 2297} 2298 2299 2300void SelectionDAGLowering::visitCall(CallInst &I) { 2301 const char *RenameFn = 0; 2302 if (Function *F = I.getCalledFunction()) { 2303 if (F->isDeclaration()) 2304 if (unsigned IID = F->getIntrinsicID()) { 2305 RenameFn = visitIntrinsicCall(I, IID); 2306 if (!RenameFn) 2307 return; 2308 } else { // Not an LLVM intrinsic. 2309 const std::string &Name = F->getName(); 2310 if (Name[0] == 'c' && (Name == "copysign" || Name == "copysignf")) { 2311 if (I.getNumOperands() == 3 && // Basic sanity checks. 2312 I.getOperand(1)->getType()->isFloatingPoint() && 2313 I.getType() == I.getOperand(1)->getType() && 2314 I.getType() == I.getOperand(2)->getType()) { 2315 SDOperand LHS = getValue(I.getOperand(1)); 2316 SDOperand RHS = getValue(I.getOperand(2)); 2317 setValue(&I, DAG.getNode(ISD::FCOPYSIGN, LHS.getValueType(), 2318 LHS, RHS)); 2319 return; 2320 } 2321 } else if (Name[0] == 'f' && (Name == "fabs" || Name == "fabsf")) { 2322 if (I.getNumOperands() == 2 && // Basic sanity checks. 2323 I.getOperand(1)->getType()->isFloatingPoint() && 2324 I.getType() == I.getOperand(1)->getType()) { 2325 SDOperand Tmp = getValue(I.getOperand(1)); 2326 setValue(&I, DAG.getNode(ISD::FABS, Tmp.getValueType(), Tmp)); 2327 return; 2328 } 2329 } else if (Name[0] == 's' && (Name == "sin" || Name == "sinf")) { 2330 if (I.getNumOperands() == 2 && // Basic sanity checks. 2331 I.getOperand(1)->getType()->isFloatingPoint() && 2332 I.getType() == I.getOperand(1)->getType()) { 2333 SDOperand Tmp = getValue(I.getOperand(1)); 2334 setValue(&I, DAG.getNode(ISD::FSIN, Tmp.getValueType(), Tmp)); 2335 return; 2336 } 2337 } else if (Name[0] == 'c' && (Name == "cos" || Name == "cosf")) { 2338 if (I.getNumOperands() == 2 && // Basic sanity checks. 2339 I.getOperand(1)->getType()->isFloatingPoint() && 2340 I.getType() == I.getOperand(1)->getType()) { 2341 SDOperand Tmp = getValue(I.getOperand(1)); 2342 setValue(&I, DAG.getNode(ISD::FCOS, Tmp.getValueType(), Tmp)); 2343 return; 2344 } 2345 } 2346 } 2347 } else if (isa<InlineAsm>(I.getOperand(0))) { 2348 visitInlineAsm(I); 2349 return; 2350 } 2351 2352 SDOperand Callee; 2353 if (!RenameFn) 2354 Callee = getValue(I.getOperand(0)); 2355 else 2356 Callee = DAG.getExternalSymbol(RenameFn, TLI.getPointerTy()); 2357 2358 LowerCallTo(I, I.getCalledValue()->getType(), 2359 I.getCallingConv(), 2360 I.isTailCall(), 2361 Callee, 2362 1); 2363} 2364 2365 2366SDOperand RegsForValue::getCopyFromRegs(SelectionDAG &DAG, 2367 SDOperand &Chain, SDOperand &Flag)const{ 2368 SDOperand Val = DAG.getCopyFromReg(Chain, Regs[0], RegVT, Flag); 2369 Chain = Val.getValue(1); 2370 Flag = Val.getValue(2); 2371 2372 // If the result was expanded, copy from the top part. 2373 if (Regs.size() > 1) { 2374 assert(Regs.size() == 2 && 2375 "Cannot expand to more than 2 elts yet!"); 2376 SDOperand Hi = DAG.getCopyFromReg(Chain, Regs[1], RegVT, Flag); 2377 Chain = Hi.getValue(1); 2378 Flag = Hi.getValue(2); 2379 if (DAG.getTargetLoweringInfo().isLittleEndian()) 2380 return DAG.getNode(ISD::BUILD_PAIR, ValueVT, Val, Hi); 2381 else 2382 return DAG.getNode(ISD::BUILD_PAIR, ValueVT, Hi, Val); 2383 } 2384 2385 // Otherwise, if the return value was promoted or extended, truncate it to the 2386 // appropriate type. 2387 if (RegVT == ValueVT) 2388 return Val; 2389 2390 if (MVT::isInteger(RegVT)) { 2391 if (ValueVT < RegVT) 2392 return DAG.getNode(ISD::TRUNCATE, ValueVT, Val); 2393 else 2394 return DAG.getNode(ISD::ANY_EXTEND, ValueVT, Val); 2395 } else { 2396 return DAG.getNode(ISD::FP_ROUND, ValueVT, Val); 2397 } 2398} 2399 2400/// getCopyToRegs - Emit a series of CopyToReg nodes that copies the 2401/// specified value into the registers specified by this object. This uses 2402/// Chain/Flag as the input and updates them for the output Chain/Flag. 2403void RegsForValue::getCopyToRegs(SDOperand Val, SelectionDAG &DAG, 2404 SDOperand &Chain, SDOperand &Flag, 2405 MVT::ValueType PtrVT) const { 2406 if (Regs.size() == 1) { 2407 // If there is a single register and the types differ, this must be 2408 // a promotion. 2409 if (RegVT != ValueVT) { 2410 if (MVT::isInteger(RegVT)) { 2411 if (RegVT < ValueVT) 2412 Val = DAG.getNode(ISD::TRUNCATE, RegVT, Val); 2413 else 2414 Val = DAG.getNode(ISD::ANY_EXTEND, RegVT, Val); 2415 } else 2416 Val = DAG.getNode(ISD::FP_EXTEND, RegVT, Val); 2417 } 2418 Chain = DAG.getCopyToReg(Chain, Regs[0], Val, Flag); 2419 Flag = Chain.getValue(1); 2420 } else { 2421 std::vector<unsigned> R(Regs); 2422 if (!DAG.getTargetLoweringInfo().isLittleEndian()) 2423 std::reverse(R.begin(), R.end()); 2424 2425 for (unsigned i = 0, e = R.size(); i != e; ++i) { 2426 SDOperand Part = DAG.getNode(ISD::EXTRACT_ELEMENT, RegVT, Val, 2427 DAG.getConstant(i, PtrVT)); 2428 Chain = DAG.getCopyToReg(Chain, R[i], Part, Flag); 2429 Flag = Chain.getValue(1); 2430 } 2431 } 2432} 2433 2434/// AddInlineAsmOperands - Add this value to the specified inlineasm node 2435/// operand list. This adds the code marker and includes the number of 2436/// values added into it. 2437void RegsForValue::AddInlineAsmOperands(unsigned Code, SelectionDAG &DAG, 2438 std::vector<SDOperand> &Ops) const { 2439 Ops.push_back(DAG.getConstant(Code | (Regs.size() << 3), MVT::i32)); 2440 for (unsigned i = 0, e = Regs.size(); i != e; ++i) 2441 Ops.push_back(DAG.getRegister(Regs[i], RegVT)); 2442} 2443 2444/// isAllocatableRegister - If the specified register is safe to allocate, 2445/// i.e. it isn't a stack pointer or some other special register, return the 2446/// register class for the register. Otherwise, return null. 2447static const TargetRegisterClass * 2448isAllocatableRegister(unsigned Reg, MachineFunction &MF, 2449 const TargetLowering &TLI, const MRegisterInfo *MRI) { 2450 MVT::ValueType FoundVT = MVT::Other; 2451 const TargetRegisterClass *FoundRC = 0; 2452 for (MRegisterInfo::regclass_iterator RCI = MRI->regclass_begin(), 2453 E = MRI->regclass_end(); RCI != E; ++RCI) { 2454 MVT::ValueType ThisVT = MVT::Other; 2455 2456 const TargetRegisterClass *RC = *RCI; 2457 // If none of the the value types for this register class are valid, we 2458 // can't use it. For example, 64-bit reg classes on 32-bit targets. 2459 for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end(); 2460 I != E; ++I) { 2461 if (TLI.isTypeLegal(*I)) { 2462 // If we have already found this register in a different register class, 2463 // choose the one with the largest VT specified. For example, on 2464 // PowerPC, we favor f64 register classes over f32. 2465 if (FoundVT == MVT::Other || 2466 MVT::getSizeInBits(FoundVT) < MVT::getSizeInBits(*I)) { 2467 ThisVT = *I; 2468 break; 2469 } 2470 } 2471 } 2472 2473 if (ThisVT == MVT::Other) continue; 2474 2475 // NOTE: This isn't ideal. In particular, this might allocate the 2476 // frame pointer in functions that need it (due to them not being taken 2477 // out of allocation, because a variable sized allocation hasn't been seen 2478 // yet). This is a slight code pessimization, but should still work. 2479 for (TargetRegisterClass::iterator I = RC->allocation_order_begin(MF), 2480 E = RC->allocation_order_end(MF); I != E; ++I) 2481 if (*I == Reg) { 2482 // We found a matching register class. Keep looking at others in case 2483 // we find one with larger registers that this physreg is also in. 2484 FoundRC = RC; 2485 FoundVT = ThisVT; 2486 break; 2487 } 2488 } 2489 return FoundRC; 2490} 2491 2492RegsForValue SelectionDAGLowering:: 2493GetRegistersForValue(const std::string &ConstrCode, 2494 MVT::ValueType VT, bool isOutReg, bool isInReg, 2495 std::set<unsigned> &OutputRegs, 2496 std::set<unsigned> &InputRegs) { 2497 std::pair<unsigned, const TargetRegisterClass*> PhysReg = 2498 TLI.getRegForInlineAsmConstraint(ConstrCode, VT); 2499 std::vector<unsigned> Regs; 2500 2501 unsigned NumRegs = VT != MVT::Other ? TLI.getNumElements(VT) : 1; 2502 MVT::ValueType RegVT; 2503 MVT::ValueType ValueVT = VT; 2504 2505 // If this is a constraint for a specific physical register, like {r17}, 2506 // assign it now. 2507 if (PhysReg.first) { 2508 if (VT == MVT::Other) 2509 ValueVT = *PhysReg.second->vt_begin(); 2510 2511 // Get the actual register value type. This is important, because the user 2512 // may have asked for (e.g.) the AX register in i32 type. We need to 2513 // remember that AX is actually i16 to get the right extension. 2514 RegVT = *PhysReg.second->vt_begin(); 2515 2516 // This is a explicit reference to a physical register. 2517 Regs.push_back(PhysReg.first); 2518 2519 // If this is an expanded reference, add the rest of the regs to Regs. 2520 if (NumRegs != 1) { 2521 TargetRegisterClass::iterator I = PhysReg.second->begin(); 2522 TargetRegisterClass::iterator E = PhysReg.second->end(); 2523 for (; *I != PhysReg.first; ++I) 2524 assert(I != E && "Didn't find reg!"); 2525 2526 // Already added the first reg. 2527 --NumRegs; ++I; 2528 for (; NumRegs; --NumRegs, ++I) { 2529 assert(I != E && "Ran out of registers to allocate!"); 2530 Regs.push_back(*I); 2531 } 2532 } 2533 return RegsForValue(Regs, RegVT, ValueVT); 2534 } 2535 2536 // Otherwise, if this was a reference to an LLVM register class, create vregs 2537 // for this reference. 2538 std::vector<unsigned> RegClassRegs; 2539 if (PhysReg.second) { 2540 // If this is an early clobber or tied register, our regalloc doesn't know 2541 // how to maintain the constraint. If it isn't, go ahead and create vreg 2542 // and let the regalloc do the right thing. 2543 if (!isOutReg || !isInReg) { 2544 if (VT == MVT::Other) 2545 ValueVT = *PhysReg.second->vt_begin(); 2546 RegVT = *PhysReg.second->vt_begin(); 2547 2548 // Create the appropriate number of virtual registers. 2549 SSARegMap *RegMap = DAG.getMachineFunction().getSSARegMap(); 2550 for (; NumRegs; --NumRegs) 2551 Regs.push_back(RegMap->createVirtualRegister(PhysReg.second)); 2552 2553 return RegsForValue(Regs, RegVT, ValueVT); 2554 } 2555 2556 // Otherwise, we can't allocate it. Let the code below figure out how to 2557 // maintain these constraints. 2558 RegClassRegs.assign(PhysReg.second->begin(), PhysReg.second->end()); 2559 2560 } else { 2561 // This is a reference to a register class that doesn't directly correspond 2562 // to an LLVM register class. Allocate NumRegs consecutive, available, 2563 // registers from the class. 2564 RegClassRegs = TLI.getRegClassForInlineAsmConstraint(ConstrCode, VT); 2565 } 2566 2567 const MRegisterInfo *MRI = DAG.getTarget().getRegisterInfo(); 2568 MachineFunction &MF = *CurMBB->getParent(); 2569 unsigned NumAllocated = 0; 2570 for (unsigned i = 0, e = RegClassRegs.size(); i != e; ++i) { 2571 unsigned Reg = RegClassRegs[i]; 2572 // See if this register is available. 2573 if ((isOutReg && OutputRegs.count(Reg)) || // Already used. 2574 (isInReg && InputRegs.count(Reg))) { // Already used. 2575 // Make sure we find consecutive registers. 2576 NumAllocated = 0; 2577 continue; 2578 } 2579 2580 // Check to see if this register is allocatable (i.e. don't give out the 2581 // stack pointer). 2582 const TargetRegisterClass *RC = isAllocatableRegister(Reg, MF, TLI, MRI); 2583 if (!RC) { 2584 // Make sure we find consecutive registers. 2585 NumAllocated = 0; 2586 continue; 2587 } 2588 2589 // Okay, this register is good, we can use it. 2590 ++NumAllocated; 2591 2592 // If we allocated enough consecutive 2593 if (NumAllocated == NumRegs) { 2594 unsigned RegStart = (i-NumAllocated)+1; 2595 unsigned RegEnd = i+1; 2596 // Mark all of the allocated registers used. 2597 for (unsigned i = RegStart; i != RegEnd; ++i) { 2598 unsigned Reg = RegClassRegs[i]; 2599 Regs.push_back(Reg); 2600 if (isOutReg) OutputRegs.insert(Reg); // Mark reg used. 2601 if (isInReg) InputRegs.insert(Reg); // Mark reg used. 2602 } 2603 2604 return RegsForValue(Regs, *RC->vt_begin(), VT); 2605 } 2606 } 2607 2608 // Otherwise, we couldn't allocate enough registers for this. 2609 return RegsForValue(); 2610} 2611 2612/// getConstraintGenerality - Return an integer indicating how general CT is. 2613static unsigned getConstraintGenerality(TargetLowering::ConstraintType CT) { 2614 switch (CT) { 2615 default: assert(0 && "Unknown constraint type!"); 2616 case TargetLowering::C_Other: 2617 case TargetLowering::C_Unknown: 2618 return 0; 2619 case TargetLowering::C_Register: 2620 return 1; 2621 case TargetLowering::C_RegisterClass: 2622 return 2; 2623 case TargetLowering::C_Memory: 2624 return 3; 2625 } 2626} 2627 2628static std::string GetMostGeneralConstraint(std::vector<std::string> &C, 2629 const TargetLowering &TLI) { 2630 assert(!C.empty() && "Must have at least one constraint"); 2631 if (C.size() == 1) return C[0]; 2632 2633 std::string *Current = &C[0]; 2634 // If we have multiple constraints, try to pick the most general one ahead 2635 // of time. This isn't a wonderful solution, but handles common cases. 2636 TargetLowering::ConstraintType Flavor = TLI.getConstraintType(Current[0][0]); 2637 for (unsigned j = 1, e = C.size(); j != e; ++j) { 2638 TargetLowering::ConstraintType ThisFlavor = TLI.getConstraintType(C[j][0]); 2639 if (getConstraintGenerality(ThisFlavor) > 2640 getConstraintGenerality(Flavor)) { 2641 // This constraint letter is more general than the previous one, 2642 // use it. 2643 Flavor = ThisFlavor; 2644 Current = &C[j]; 2645 } 2646 } 2647 return *Current; 2648} 2649 2650 2651/// visitInlineAsm - Handle a call to an InlineAsm object. 2652/// 2653void SelectionDAGLowering::visitInlineAsm(CallInst &I) { 2654 InlineAsm *IA = cast<InlineAsm>(I.getOperand(0)); 2655 2656 SDOperand AsmStr = DAG.getTargetExternalSymbol(IA->getAsmString().c_str(), 2657 MVT::Other); 2658 2659 std::vector<InlineAsm::ConstraintInfo> Constraints = IA->ParseConstraints(); 2660 std::vector<MVT::ValueType> ConstraintVTs; 2661 2662 /// AsmNodeOperands - A list of pairs. The first element is a register, the 2663 /// second is a bitfield where bit #0 is set if it is a use and bit #1 is set 2664 /// if it is a def of that register. 2665 std::vector<SDOperand> AsmNodeOperands; 2666 AsmNodeOperands.push_back(SDOperand()); // reserve space for input chain 2667 AsmNodeOperands.push_back(AsmStr); 2668 2669 SDOperand Chain = getRoot(); 2670 SDOperand Flag; 2671 2672 // We fully assign registers here at isel time. This is not optimal, but 2673 // should work. For register classes that correspond to LLVM classes, we 2674 // could let the LLVM RA do its thing, but we currently don't. Do a prepass 2675 // over the constraints, collecting fixed registers that we know we can't use. 2676 std::set<unsigned> OutputRegs, InputRegs; 2677 unsigned OpNum = 1; 2678 for (unsigned i = 0, e = Constraints.size(); i != e; ++i) { 2679 std::string ConstraintCode = 2680 GetMostGeneralConstraint(Constraints[i].Codes, TLI); 2681 2682 MVT::ValueType OpVT; 2683 2684 // Compute the value type for each operand and add it to ConstraintVTs. 2685 switch (Constraints[i].Type) { 2686 case InlineAsm::isOutput: 2687 if (!Constraints[i].isIndirectOutput) { 2688 assert(I.getType() != Type::VoidTy && "Bad inline asm!"); 2689 OpVT = TLI.getValueType(I.getType()); 2690 } else { 2691 const Type *OpTy = I.getOperand(OpNum)->getType(); 2692 OpVT = TLI.getValueType(cast<PointerType>(OpTy)->getElementType()); 2693 OpNum++; // Consumes a call operand. 2694 } 2695 break; 2696 case InlineAsm::isInput: 2697 OpVT = TLI.getValueType(I.getOperand(OpNum)->getType()); 2698 OpNum++; // Consumes a call operand. 2699 break; 2700 case InlineAsm::isClobber: 2701 OpVT = MVT::Other; 2702 break; 2703 } 2704 2705 ConstraintVTs.push_back(OpVT); 2706 2707 if (TLI.getRegForInlineAsmConstraint(ConstraintCode, OpVT).first == 0) 2708 continue; // Not assigned a fixed reg. 2709 2710 // Build a list of regs that this operand uses. This always has a single 2711 // element for promoted/expanded operands. 2712 RegsForValue Regs = GetRegistersForValue(ConstraintCode, OpVT, 2713 false, false, 2714 OutputRegs, InputRegs); 2715 2716 switch (Constraints[i].Type) { 2717 case InlineAsm::isOutput: 2718 // We can't assign any other output to this register. 2719 OutputRegs.insert(Regs.Regs.begin(), Regs.Regs.end()); 2720 // If this is an early-clobber output, it cannot be assigned to the same 2721 // value as the input reg. 2722 if (Constraints[i].isEarlyClobber || Constraints[i].hasMatchingInput) 2723 InputRegs.insert(Regs.Regs.begin(), Regs.Regs.end()); 2724 break; 2725 case InlineAsm::isInput: 2726 // We can't assign any other input to this register. 2727 InputRegs.insert(Regs.Regs.begin(), Regs.Regs.end()); 2728 break; 2729 case InlineAsm::isClobber: 2730 // Clobbered regs cannot be used as inputs or outputs. 2731 InputRegs.insert(Regs.Regs.begin(), Regs.Regs.end()); 2732 OutputRegs.insert(Regs.Regs.begin(), Regs.Regs.end()); 2733 break; 2734 } 2735 } 2736 2737 // Loop over all of the inputs, copying the operand values into the 2738 // appropriate registers and processing the output regs. 2739 RegsForValue RetValRegs; 2740 std::vector<std::pair<RegsForValue, Value*> > IndirectStoresToEmit; 2741 OpNum = 1; 2742 2743 for (unsigned i = 0, e = Constraints.size(); i != e; ++i) { 2744 std::string ConstraintCode = 2745 GetMostGeneralConstraint(Constraints[i].Codes, TLI); 2746 2747 switch (Constraints[i].Type) { 2748 case InlineAsm::isOutput: { 2749 TargetLowering::ConstraintType CTy = TargetLowering::C_RegisterClass; 2750 if (ConstraintCode.size() == 1) // not a physreg name. 2751 CTy = TLI.getConstraintType(ConstraintCode[0]); 2752 2753 if (CTy == TargetLowering::C_Memory) { 2754 // Memory output. 2755 SDOperand InOperandVal = getValue(I.getOperand(OpNum)); 2756 2757 // Check that the operand (the address to store to) isn't a float. 2758 if (!MVT::isInteger(InOperandVal.getValueType())) 2759 assert(0 && "MATCH FAIL!"); 2760 2761 if (!Constraints[i].isIndirectOutput) 2762 assert(0 && "MATCH FAIL!"); 2763 2764 OpNum++; // Consumes a call operand. 2765 2766 // Extend/truncate to the right pointer type if needed. 2767 MVT::ValueType PtrType = TLI.getPointerTy(); 2768 if (InOperandVal.getValueType() < PtrType) 2769 InOperandVal = DAG.getNode(ISD::ZERO_EXTEND, PtrType, InOperandVal); 2770 else if (InOperandVal.getValueType() > PtrType) 2771 InOperandVal = DAG.getNode(ISD::TRUNCATE, PtrType, InOperandVal); 2772 2773 // Add information to the INLINEASM node to know about this output. 2774 unsigned ResOpType = 4/*MEM*/ | (1 << 3); 2775 AsmNodeOperands.push_back(DAG.getConstant(ResOpType, MVT::i32)); 2776 AsmNodeOperands.push_back(InOperandVal); 2777 break; 2778 } 2779 2780 // Otherwise, this is a register output. 2781 assert(CTy == TargetLowering::C_RegisterClass && "Unknown op type!"); 2782 2783 // If this is an early-clobber output, or if there is an input 2784 // constraint that matches this, we need to reserve the input register 2785 // so no other inputs allocate to it. 2786 bool UsesInputRegister = false; 2787 if (Constraints[i].isEarlyClobber || Constraints[i].hasMatchingInput) 2788 UsesInputRegister = true; 2789 2790 // Copy the output from the appropriate register. Find a register that 2791 // we can use. 2792 RegsForValue Regs = 2793 GetRegistersForValue(ConstraintCode, ConstraintVTs[i], 2794 true, UsesInputRegister, 2795 OutputRegs, InputRegs); 2796 if (Regs.Regs.empty()) { 2797 cerr << "Couldn't allocate output reg for contraint '" 2798 << ConstraintCode << "'!\n"; 2799 exit(1); 2800 } 2801 2802 if (!Constraints[i].isIndirectOutput) { 2803 assert(RetValRegs.Regs.empty() && 2804 "Cannot have multiple output constraints yet!"); 2805 assert(I.getType() != Type::VoidTy && "Bad inline asm!"); 2806 RetValRegs = Regs; 2807 } else { 2808 IndirectStoresToEmit.push_back(std::make_pair(Regs, 2809 I.getOperand(OpNum))); 2810 OpNum++; // Consumes a call operand. 2811 } 2812 2813 // Add information to the INLINEASM node to know that this register is 2814 // set. 2815 Regs.AddInlineAsmOperands(2 /*REGDEF*/, DAG, AsmNodeOperands); 2816 break; 2817 } 2818 case InlineAsm::isInput: { 2819 SDOperand InOperandVal = getValue(I.getOperand(OpNum)); 2820 OpNum++; // Consumes a call operand. 2821 2822 if (isdigit(ConstraintCode[0])) { // Matching constraint? 2823 // If this is required to match an output register we have already set, 2824 // just use its register. 2825 unsigned OperandNo = atoi(ConstraintCode.c_str()); 2826 2827 // Scan until we find the definition we already emitted of this operand. 2828 // When we find it, create a RegsForValue operand. 2829 unsigned CurOp = 2; // The first operand. 2830 for (; OperandNo; --OperandNo) { 2831 // Advance to the next operand. 2832 unsigned NumOps = 2833 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getValue(); 2834 assert(((NumOps & 7) == 2 /*REGDEF*/ || 2835 (NumOps & 7) == 4 /*MEM*/) && 2836 "Skipped past definitions?"); 2837 CurOp += (NumOps>>3)+1; 2838 } 2839 2840 unsigned NumOps = 2841 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getValue(); 2842 if ((NumOps & 7) == 2 /*REGDEF*/) { 2843 // Add NumOps>>3 registers to MatchedRegs. 2844 RegsForValue MatchedRegs; 2845 MatchedRegs.ValueVT = InOperandVal.getValueType(); 2846 MatchedRegs.RegVT = AsmNodeOperands[CurOp+1].getValueType(); 2847 for (unsigned i = 0, e = NumOps>>3; i != e; ++i) { 2848 unsigned Reg = 2849 cast<RegisterSDNode>(AsmNodeOperands[++CurOp])->getReg(); 2850 MatchedRegs.Regs.push_back(Reg); 2851 } 2852 2853 // Use the produced MatchedRegs object to 2854 MatchedRegs.getCopyToRegs(InOperandVal, DAG, Chain, Flag, 2855 TLI.getPointerTy()); 2856 MatchedRegs.AddInlineAsmOperands(1 /*REGUSE*/, DAG, AsmNodeOperands); 2857 break; 2858 } else { 2859 assert((NumOps & 7) == 4/*MEM*/ && "Unknown matching constraint!"); 2860 assert(0 && "matching constraints for memory operands unimp"); 2861 } 2862 } 2863 2864 TargetLowering::ConstraintType CTy = TargetLowering::C_RegisterClass; 2865 if (ConstraintCode.size() == 1) // not a physreg name. 2866 CTy = TLI.getConstraintType(ConstraintCode[0]); 2867 2868 if (CTy == TargetLowering::C_Other) { 2869 InOperandVal = TLI.isOperandValidForConstraint(InOperandVal, 2870 ConstraintCode[0], DAG); 2871 if (!InOperandVal.Val) { 2872 cerr << "Invalid operand for inline asm constraint '" 2873 << ConstraintCode << "'!\n"; 2874 exit(1); 2875 } 2876 2877 // Add information to the INLINEASM node to know about this input. 2878 unsigned ResOpType = 3 /*IMM*/ | (1 << 3); 2879 AsmNodeOperands.push_back(DAG.getConstant(ResOpType, MVT::i32)); 2880 AsmNodeOperands.push_back(InOperandVal); 2881 break; 2882 } else if (CTy == TargetLowering::C_Memory) { 2883 // Memory input. 2884 2885 // If the operand is a float, spill to a constant pool entry to get its 2886 // address. 2887 if (ConstantFP *Val = dyn_cast<ConstantFP>(I.getOperand(OpNum-1))) 2888 InOperandVal = DAG.getConstantPool(Val, TLI.getPointerTy()); 2889 2890 if (!MVT::isInteger(InOperandVal.getValueType())) { 2891 cerr << "Match failed, cannot handle this yet!\n"; 2892 InOperandVal.Val->dump(); 2893 exit(1); 2894 } 2895 2896 // Extend/truncate to the right pointer type if needed. 2897 MVT::ValueType PtrType = TLI.getPointerTy(); 2898 if (InOperandVal.getValueType() < PtrType) 2899 InOperandVal = DAG.getNode(ISD::ZERO_EXTEND, PtrType, InOperandVal); 2900 else if (InOperandVal.getValueType() > PtrType) 2901 InOperandVal = DAG.getNode(ISD::TRUNCATE, PtrType, InOperandVal); 2902 2903 // Add information to the INLINEASM node to know about this input. 2904 unsigned ResOpType = 4/*MEM*/ | (1 << 3); 2905 AsmNodeOperands.push_back(DAG.getConstant(ResOpType, MVT::i32)); 2906 AsmNodeOperands.push_back(InOperandVal); 2907 break; 2908 } 2909 2910 assert(CTy == TargetLowering::C_RegisterClass && "Unknown op type!"); 2911 2912 // Copy the input into the appropriate registers. 2913 RegsForValue InRegs = 2914 GetRegistersForValue(ConstraintCode, ConstraintVTs[i], 2915 false, true, OutputRegs, InputRegs); 2916 // FIXME: should be match fail. 2917 assert(!InRegs.Regs.empty() && "Couldn't allocate input reg!"); 2918 2919 InRegs.getCopyToRegs(InOperandVal, DAG, Chain, Flag, TLI.getPointerTy()); 2920 2921 InRegs.AddInlineAsmOperands(1/*REGUSE*/, DAG, AsmNodeOperands); 2922 break; 2923 } 2924 case InlineAsm::isClobber: { 2925 RegsForValue ClobberedRegs = 2926 GetRegistersForValue(ConstraintCode, MVT::Other, false, false, 2927 OutputRegs, InputRegs); 2928 // Add the clobbered value to the operand list, so that the register 2929 // allocator is aware that the physreg got clobbered. 2930 if (!ClobberedRegs.Regs.empty()) 2931 ClobberedRegs.AddInlineAsmOperands(2/*REGDEF*/, DAG, AsmNodeOperands); 2932 break; 2933 } 2934 } 2935 } 2936 2937 // Finish up input operands. 2938 AsmNodeOperands[0] = Chain; 2939 if (Flag.Val) AsmNodeOperands.push_back(Flag); 2940 2941 Chain = DAG.getNode(ISD::INLINEASM, 2942 DAG.getNodeValueTypes(MVT::Other, MVT::Flag), 2, 2943 &AsmNodeOperands[0], AsmNodeOperands.size()); 2944 Flag = Chain.getValue(1); 2945 2946 // If this asm returns a register value, copy the result from that register 2947 // and set it as the value of the call. 2948 if (!RetValRegs.Regs.empty()) 2949 setValue(&I, RetValRegs.getCopyFromRegs(DAG, Chain, Flag)); 2950 2951 std::vector<std::pair<SDOperand, Value*> > StoresToEmit; 2952 2953 // Process indirect outputs, first output all of the flagged copies out of 2954 // physregs. 2955 for (unsigned i = 0, e = IndirectStoresToEmit.size(); i != e; ++i) { 2956 RegsForValue &OutRegs = IndirectStoresToEmit[i].first; 2957 Value *Ptr = IndirectStoresToEmit[i].second; 2958 SDOperand OutVal = OutRegs.getCopyFromRegs(DAG, Chain, Flag); 2959 StoresToEmit.push_back(std::make_pair(OutVal, Ptr)); 2960 } 2961 2962 // Emit the non-flagged stores from the physregs. 2963 SmallVector<SDOperand, 8> OutChains; 2964 for (unsigned i = 0, e = StoresToEmit.size(); i != e; ++i) 2965 OutChains.push_back(DAG.getStore(Chain, StoresToEmit[i].first, 2966 getValue(StoresToEmit[i].second), 2967 StoresToEmit[i].second, 0)); 2968 if (!OutChains.empty()) 2969 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 2970 &OutChains[0], OutChains.size()); 2971 DAG.setRoot(Chain); 2972} 2973 2974 2975void SelectionDAGLowering::visitMalloc(MallocInst &I) { 2976 SDOperand Src = getValue(I.getOperand(0)); 2977 2978 MVT::ValueType IntPtr = TLI.getPointerTy(); 2979 2980 if (IntPtr < Src.getValueType()) 2981 Src = DAG.getNode(ISD::TRUNCATE, IntPtr, Src); 2982 else if (IntPtr > Src.getValueType()) 2983 Src = DAG.getNode(ISD::ZERO_EXTEND, IntPtr, Src); 2984 2985 // Scale the source by the type size. 2986 uint64_t ElementSize = TD->getTypeSize(I.getType()->getElementType()); 2987 Src = DAG.getNode(ISD::MUL, Src.getValueType(), 2988 Src, getIntPtrConstant(ElementSize)); 2989 2990 TargetLowering::ArgListTy Args; 2991 TargetLowering::ArgListEntry Entry; 2992 Entry.Node = Src; 2993 Entry.Ty = TLI.getTargetData()->getIntPtrType(); 2994 Args.push_back(Entry); 2995 2996 std::pair<SDOperand,SDOperand> Result = 2997 TLI.LowerCallTo(getRoot(), I.getType(), false, false, CallingConv::C, true, 2998 DAG.getExternalSymbol("malloc", IntPtr), 2999 Args, DAG); 3000 setValue(&I, Result.first); // Pointers always fit in registers 3001 DAG.setRoot(Result.second); 3002} 3003 3004void SelectionDAGLowering::visitFree(FreeInst &I) { 3005 TargetLowering::ArgListTy Args; 3006 TargetLowering::ArgListEntry Entry; 3007 Entry.Node = getValue(I.getOperand(0)); 3008 Entry.Ty = TLI.getTargetData()->getIntPtrType(); 3009 Args.push_back(Entry); 3010 MVT::ValueType IntPtr = TLI.getPointerTy(); 3011 std::pair<SDOperand,SDOperand> Result = 3012 TLI.LowerCallTo(getRoot(), Type::VoidTy, false, false, CallingConv::C, true, 3013 DAG.getExternalSymbol("free", IntPtr), Args, DAG); 3014 DAG.setRoot(Result.second); 3015} 3016 3017// InsertAtEndOfBasicBlock - This method should be implemented by targets that 3018// mark instructions with the 'usesCustomDAGSchedInserter' flag. These 3019// instructions are special in various ways, which require special support to 3020// insert. The specified MachineInstr is created but not inserted into any 3021// basic blocks, and the scheduler passes ownership of it to this method. 3022MachineBasicBlock *TargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI, 3023 MachineBasicBlock *MBB) { 3024 cerr << "If a target marks an instruction with " 3025 << "'usesCustomDAGSchedInserter', it must implement " 3026 << "TargetLowering::InsertAtEndOfBasicBlock!\n"; 3027 abort(); 3028 return 0; 3029} 3030 3031void SelectionDAGLowering::visitVAStart(CallInst &I) { 3032 DAG.setRoot(DAG.getNode(ISD::VASTART, MVT::Other, getRoot(), 3033 getValue(I.getOperand(1)), 3034 DAG.getSrcValue(I.getOperand(1)))); 3035} 3036 3037void SelectionDAGLowering::visitVAArg(VAArgInst &I) { 3038 SDOperand V = DAG.getVAArg(TLI.getValueType(I.getType()), getRoot(), 3039 getValue(I.getOperand(0)), 3040 DAG.getSrcValue(I.getOperand(0))); 3041 setValue(&I, V); 3042 DAG.setRoot(V.getValue(1)); 3043} 3044 3045void SelectionDAGLowering::visitVAEnd(CallInst &I) { 3046 DAG.setRoot(DAG.getNode(ISD::VAEND, MVT::Other, getRoot(), 3047 getValue(I.getOperand(1)), 3048 DAG.getSrcValue(I.getOperand(1)))); 3049} 3050 3051void SelectionDAGLowering::visitVACopy(CallInst &I) { 3052 DAG.setRoot(DAG.getNode(ISD::VACOPY, MVT::Other, getRoot(), 3053 getValue(I.getOperand(1)), 3054 getValue(I.getOperand(2)), 3055 DAG.getSrcValue(I.getOperand(1)), 3056 DAG.getSrcValue(I.getOperand(2)))); 3057} 3058 3059/// ExpandScalarFormalArgs - Recursively expand the formal_argument node, either 3060/// bit_convert it or join a pair of them with a BUILD_PAIR when appropriate. 3061static SDOperand ExpandScalarFormalArgs(MVT::ValueType VT, SDNode *Arg, 3062 unsigned &i, SelectionDAG &DAG, 3063 TargetLowering &TLI) { 3064 if (TLI.getTypeAction(VT) != TargetLowering::Expand) 3065 return SDOperand(Arg, i++); 3066 3067 MVT::ValueType EVT = TLI.getTypeToTransformTo(VT); 3068 unsigned NumVals = MVT::getSizeInBits(VT) / MVT::getSizeInBits(EVT); 3069 if (NumVals == 1) { 3070 return DAG.getNode(ISD::BIT_CONVERT, VT, 3071 ExpandScalarFormalArgs(EVT, Arg, i, DAG, TLI)); 3072 } else if (NumVals == 2) { 3073 SDOperand Lo = ExpandScalarFormalArgs(EVT, Arg, i, DAG, TLI); 3074 SDOperand Hi = ExpandScalarFormalArgs(EVT, Arg, i, DAG, TLI); 3075 if (!TLI.isLittleEndian()) 3076 std::swap(Lo, Hi); 3077 return DAG.getNode(ISD::BUILD_PAIR, VT, Lo, Hi); 3078 } else { 3079 // Value scalarized into many values. Unimp for now. 3080 assert(0 && "Cannot expand i64 -> i16 yet!"); 3081 } 3082 return SDOperand(); 3083} 3084 3085/// TargetLowering::LowerArguments - This is the default LowerArguments 3086/// implementation, which just inserts a FORMAL_ARGUMENTS node. FIXME: When all 3087/// targets are migrated to using FORMAL_ARGUMENTS, this hook should be 3088/// integrated into SDISel. 3089std::vector<SDOperand> 3090TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) { 3091 const FunctionType *FTy = F.getFunctionType(); 3092 // Add CC# and isVararg as operands to the FORMAL_ARGUMENTS node. 3093 std::vector<SDOperand> Ops; 3094 Ops.push_back(DAG.getRoot()); 3095 Ops.push_back(DAG.getConstant(F.getCallingConv(), getPointerTy())); 3096 Ops.push_back(DAG.getConstant(F.isVarArg(), getPointerTy())); 3097 3098 // Add one result value for each formal argument. 3099 std::vector<MVT::ValueType> RetVals; 3100 unsigned j = 1; 3101 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); 3102 I != E; ++I, ++j) { 3103 MVT::ValueType VT = getValueType(I->getType()); 3104 unsigned Flags = ISD::ParamFlags::NoFlagSet; 3105 unsigned OriginalAlignment = 3106 getTargetData()->getABITypeAlignment(I->getType()); 3107 3108 // FIXME: Distinguish between a formal with no [sz]ext attribute from one 3109 // that is zero extended! 3110 if (FTy->paramHasAttr(j, FunctionType::ZExtAttribute)) 3111 Flags &= ~(ISD::ParamFlags::SExt); 3112 if (FTy->paramHasAttr(j, FunctionType::SExtAttribute)) 3113 Flags |= ISD::ParamFlags::SExt; 3114 if (FTy->paramHasAttr(j, FunctionType::InRegAttribute)) 3115 Flags |= ISD::ParamFlags::InReg; 3116 if (FTy->paramHasAttr(j, FunctionType::StructRetAttribute)) 3117 Flags |= ISD::ParamFlags::StructReturn; 3118 Flags |= (OriginalAlignment << ISD::ParamFlags::OrigAlignmentOffs); 3119 3120 switch (getTypeAction(VT)) { 3121 default: assert(0 && "Unknown type action!"); 3122 case Legal: 3123 RetVals.push_back(VT); 3124 Ops.push_back(DAG.getConstant(Flags, MVT::i32)); 3125 break; 3126 case Promote: 3127 RetVals.push_back(getTypeToTransformTo(VT)); 3128 Ops.push_back(DAG.getConstant(Flags, MVT::i32)); 3129 break; 3130 case Expand: 3131 if (VT != MVT::Vector) { 3132 // If this is a large integer, it needs to be broken up into small 3133 // integers. Figure out what the destination type is and how many small 3134 // integers it turns into. 3135 MVT::ValueType NVT = getTypeToExpandTo(VT); 3136 unsigned NumVals = getNumElements(VT); 3137 for (unsigned i = 0; i != NumVals; ++i) { 3138 RetVals.push_back(NVT); 3139 // if it isn't first piece, alignment must be 1 3140 if (i > 0) 3141 Flags = (Flags & (~ISD::ParamFlags::OrigAlignment)) | 3142 (1 << ISD::ParamFlags::OrigAlignmentOffs); 3143 Ops.push_back(DAG.getConstant(Flags, MVT::i32)); 3144 } 3145 } else { 3146 // Otherwise, this is a vector type. We only support legal vectors 3147 // right now. 3148 unsigned NumElems = cast<VectorType>(I->getType())->getNumElements(); 3149 const Type *EltTy = cast<VectorType>(I->getType())->getElementType(); 3150 3151 // Figure out if there is a Packed type corresponding to this Vector 3152 // type. If so, convert to the vector type. 3153 MVT::ValueType TVT = MVT::getVectorType(getValueType(EltTy), NumElems); 3154 if (TVT != MVT::Other && isTypeLegal(TVT)) { 3155 RetVals.push_back(TVT); 3156 Ops.push_back(DAG.getConstant(Flags, MVT::i32)); 3157 } else { 3158 assert(0 && "Don't support illegal by-val vector arguments yet!"); 3159 } 3160 } 3161 break; 3162 } 3163 } 3164 3165 RetVals.push_back(MVT::Other); 3166 3167 // Create the node. 3168 SDNode *Result = DAG.getNode(ISD::FORMAL_ARGUMENTS, 3169 DAG.getNodeValueTypes(RetVals), RetVals.size(), 3170 &Ops[0], Ops.size()).Val; 3171 3172 DAG.setRoot(SDOperand(Result, Result->getNumValues()-1)); 3173 3174 // Set up the return result vector. 3175 Ops.clear(); 3176 unsigned i = 0; 3177 unsigned Idx = 1; 3178 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; 3179 ++I, ++Idx) { 3180 MVT::ValueType VT = getValueType(I->getType()); 3181 3182 switch (getTypeAction(VT)) { 3183 default: assert(0 && "Unknown type action!"); 3184 case Legal: 3185 Ops.push_back(SDOperand(Result, i++)); 3186 break; 3187 case Promote: { 3188 SDOperand Op(Result, i++); 3189 if (MVT::isInteger(VT)) { 3190 if (FTy->paramHasAttr(Idx, FunctionType::SExtAttribute)) 3191 Op = DAG.getNode(ISD::AssertSext, Op.getValueType(), Op, 3192 DAG.getValueType(VT)); 3193 else if (FTy->paramHasAttr(Idx, FunctionType::ZExtAttribute)) 3194 Op = DAG.getNode(ISD::AssertZext, Op.getValueType(), Op, 3195 DAG.getValueType(VT)); 3196 Op = DAG.getNode(ISD::TRUNCATE, VT, Op); 3197 } else { 3198 assert(MVT::isFloatingPoint(VT) && "Not int or FP?"); 3199 Op = DAG.getNode(ISD::FP_ROUND, VT, Op); 3200 } 3201 Ops.push_back(Op); 3202 break; 3203 } 3204 case Expand: 3205 if (VT != MVT::Vector) { 3206 // If this is a large integer or a floating point node that needs to be 3207 // expanded, it needs to be reassembled from small integers. Figure out 3208 // what the source elt type is and how many small integers it is. 3209 Ops.push_back(ExpandScalarFormalArgs(VT, Result, i, DAG, *this)); 3210 } else { 3211 // Otherwise, this is a vector type. We only support legal vectors 3212 // right now. 3213 const VectorType *PTy = cast<VectorType>(I->getType()); 3214 unsigned NumElems = PTy->getNumElements(); 3215 const Type *EltTy = PTy->getElementType(); 3216 3217 // Figure out if there is a Packed type corresponding to this Vector 3218 // type. If so, convert to the vector type. 3219 MVT::ValueType TVT = MVT::getVectorType(getValueType(EltTy), NumElems); 3220 if (TVT != MVT::Other && isTypeLegal(TVT)) { 3221 SDOperand N = SDOperand(Result, i++); 3222 // Handle copies from generic vectors to registers. 3223 N = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, N, 3224 DAG.getConstant(NumElems, MVT::i32), 3225 DAG.getValueType(getValueType(EltTy))); 3226 Ops.push_back(N); 3227 } else { 3228 assert(0 && "Don't support illegal by-val vector arguments yet!"); 3229 abort(); 3230 } 3231 } 3232 break; 3233 } 3234 } 3235 return Ops; 3236} 3237 3238 3239/// ExpandScalarCallArgs - Recursively expand call argument node by 3240/// bit_converting it or extract a pair of elements from the larger node. 3241static void ExpandScalarCallArgs(MVT::ValueType VT, SDOperand Arg, 3242 unsigned Flags, 3243 SmallVector<SDOperand, 32> &Ops, 3244 SelectionDAG &DAG, 3245 TargetLowering &TLI, 3246 bool isFirst = true) { 3247 3248 if (TLI.getTypeAction(VT) != TargetLowering::Expand) { 3249 // if it isn't first piece, alignment must be 1 3250 if (!isFirst) 3251 Flags = (Flags & (~ISD::ParamFlags::OrigAlignment)) | 3252 (1 << ISD::ParamFlags::OrigAlignmentOffs); 3253 Ops.push_back(Arg); 3254 Ops.push_back(DAG.getConstant(Flags, MVT::i32)); 3255 return; 3256 } 3257 3258 MVT::ValueType EVT = TLI.getTypeToTransformTo(VT); 3259 unsigned NumVals = MVT::getSizeInBits(VT) / MVT::getSizeInBits(EVT); 3260 if (NumVals == 1) { 3261 Arg = DAG.getNode(ISD::BIT_CONVERT, EVT, Arg); 3262 ExpandScalarCallArgs(EVT, Arg, Flags, Ops, DAG, TLI, isFirst); 3263 } else if (NumVals == 2) { 3264 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, EVT, Arg, 3265 DAG.getConstant(0, TLI.getPointerTy())); 3266 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, EVT, Arg, 3267 DAG.getConstant(1, TLI.getPointerTy())); 3268 if (!TLI.isLittleEndian()) 3269 std::swap(Lo, Hi); 3270 ExpandScalarCallArgs(EVT, Lo, Flags, Ops, DAG, TLI, isFirst); 3271 ExpandScalarCallArgs(EVT, Hi, Flags, Ops, DAG, TLI, false); 3272 } else { 3273 // Value scalarized into many values. Unimp for now. 3274 assert(0 && "Cannot expand i64 -> i16 yet!"); 3275 } 3276} 3277 3278/// TargetLowering::LowerCallTo - This is the default LowerCallTo 3279/// implementation, which just inserts an ISD::CALL node, which is later custom 3280/// lowered by the target to something concrete. FIXME: When all targets are 3281/// migrated to using ISD::CALL, this hook should be integrated into SDISel. 3282std::pair<SDOperand, SDOperand> 3283TargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy, 3284 bool RetTyIsSigned, bool isVarArg, 3285 unsigned CallingConv, bool isTailCall, 3286 SDOperand Callee, 3287 ArgListTy &Args, SelectionDAG &DAG) { 3288 SmallVector<SDOperand, 32> Ops; 3289 Ops.push_back(Chain); // Op#0 - Chain 3290 Ops.push_back(DAG.getConstant(CallingConv, getPointerTy())); // Op#1 - CC 3291 Ops.push_back(DAG.getConstant(isVarArg, getPointerTy())); // Op#2 - VarArg 3292 Ops.push_back(DAG.getConstant(isTailCall, getPointerTy())); // Op#3 - Tail 3293 Ops.push_back(Callee); 3294 3295 // Handle all of the outgoing arguments. 3296 for (unsigned i = 0, e = Args.size(); i != e; ++i) { 3297 MVT::ValueType VT = getValueType(Args[i].Ty); 3298 SDOperand Op = Args[i].Node; 3299 unsigned Flags = ISD::ParamFlags::NoFlagSet; 3300 unsigned OriginalAlignment = 3301 getTargetData()->getABITypeAlignment(Args[i].Ty); 3302 3303 if (Args[i].isSExt) 3304 Flags |= ISD::ParamFlags::SExt; 3305 if (Args[i].isZExt) 3306 Flags |= ISD::ParamFlags::ZExt; 3307 if (Args[i].isInReg) 3308 Flags |= ISD::ParamFlags::InReg; 3309 if (Args[i].isSRet) 3310 Flags |= ISD::ParamFlags::StructReturn; 3311 Flags |= OriginalAlignment << ISD::ParamFlags::OrigAlignmentOffs; 3312 3313 switch (getTypeAction(VT)) { 3314 default: assert(0 && "Unknown type action!"); 3315 case Legal: 3316 Ops.push_back(Op); 3317 Ops.push_back(DAG.getConstant(Flags, MVT::i32)); 3318 break; 3319 case Promote: 3320 if (MVT::isInteger(VT)) { 3321 unsigned ExtOp; 3322 if (Args[i].isSExt) 3323 ExtOp = ISD::SIGN_EXTEND; 3324 else if (Args[i].isZExt) 3325 ExtOp = ISD::ZERO_EXTEND; 3326 else 3327 ExtOp = ISD::ANY_EXTEND; 3328 Op = DAG.getNode(ExtOp, getTypeToTransformTo(VT), Op); 3329 } else { 3330 assert(MVT::isFloatingPoint(VT) && "Not int or FP?"); 3331 Op = DAG.getNode(ISD::FP_EXTEND, getTypeToTransformTo(VT), Op); 3332 } 3333 Ops.push_back(Op); 3334 Ops.push_back(DAG.getConstant(Flags, MVT::i32)); 3335 break; 3336 case Expand: 3337 if (VT != MVT::Vector) { 3338 // If this is a large integer, it needs to be broken down into small 3339 // integers. Figure out what the source elt type is and how many small 3340 // integers it is. 3341 ExpandScalarCallArgs(VT, Op, Flags, Ops, DAG, *this); 3342 } else { 3343 // Otherwise, this is a vector type. We only support legal vectors 3344 // right now. 3345 const VectorType *PTy = cast<VectorType>(Args[i].Ty); 3346 unsigned NumElems = PTy->getNumElements(); 3347 const Type *EltTy = PTy->getElementType(); 3348 3349 // Figure out if there is a Packed type corresponding to this Vector 3350 // type. If so, convert to the vector type. 3351 MVT::ValueType TVT = MVT::getVectorType(getValueType(EltTy), NumElems); 3352 if (TVT != MVT::Other && isTypeLegal(TVT)) { 3353 // Insert a VBIT_CONVERT of the MVT::Vector type to the vector type. 3354 Op = DAG.getNode(ISD::VBIT_CONVERT, TVT, Op); 3355 Ops.push_back(Op); 3356 Ops.push_back(DAG.getConstant(Flags, MVT::i32)); 3357 } else { 3358 assert(0 && "Don't support illegal by-val vector call args yet!"); 3359 abort(); 3360 } 3361 } 3362 break; 3363 } 3364 } 3365 3366 // Figure out the result value types. 3367 SmallVector<MVT::ValueType, 4> RetTys; 3368 3369 if (RetTy != Type::VoidTy) { 3370 MVT::ValueType VT = getValueType(RetTy); 3371 switch (getTypeAction(VT)) { 3372 default: assert(0 && "Unknown type action!"); 3373 case Legal: 3374 RetTys.push_back(VT); 3375 break; 3376 case Promote: 3377 RetTys.push_back(getTypeToTransformTo(VT)); 3378 break; 3379 case Expand: 3380 if (VT != MVT::Vector) { 3381 // If this is a large integer, it needs to be reassembled from small 3382 // integers. Figure out what the source elt type is and how many small 3383 // integers it is. 3384 MVT::ValueType NVT = getTypeToExpandTo(VT); 3385 unsigned NumVals = getNumElements(VT); 3386 for (unsigned i = 0; i != NumVals; ++i) 3387 RetTys.push_back(NVT); 3388 } else { 3389 // Otherwise, this is a vector type. We only support legal vectors 3390 // right now. 3391 const VectorType *PTy = cast<VectorType>(RetTy); 3392 unsigned NumElems = PTy->getNumElements(); 3393 const Type *EltTy = PTy->getElementType(); 3394 3395 // Figure out if there is a Packed type corresponding to this Vector 3396 // type. If so, convert to the vector type. 3397 MVT::ValueType TVT = MVT::getVectorType(getValueType(EltTy), NumElems); 3398 if (TVT != MVT::Other && isTypeLegal(TVT)) { 3399 RetTys.push_back(TVT); 3400 } else { 3401 assert(0 && "Don't support illegal by-val vector call results yet!"); 3402 abort(); 3403 } 3404 } 3405 } 3406 } 3407 3408 RetTys.push_back(MVT::Other); // Always has a chain. 3409 3410 // Finally, create the CALL node. 3411 SDOperand Res = DAG.getNode(ISD::CALL, 3412 DAG.getVTList(&RetTys[0], RetTys.size()), 3413 &Ops[0], Ops.size()); 3414 3415 // This returns a pair of operands. The first element is the 3416 // return value for the function (if RetTy is not VoidTy). The second 3417 // element is the outgoing token chain. 3418 SDOperand ResVal; 3419 if (RetTys.size() != 1) { 3420 MVT::ValueType VT = getValueType(RetTy); 3421 if (RetTys.size() == 2) { 3422 ResVal = Res; 3423 3424 // If this value was promoted, truncate it down. 3425 if (ResVal.getValueType() != VT) { 3426 if (VT == MVT::Vector) { 3427 // Insert a VBITCONVERT to convert from the packed result type to the 3428 // MVT::Vector type. 3429 unsigned NumElems = cast<VectorType>(RetTy)->getNumElements(); 3430 const Type *EltTy = cast<VectorType>(RetTy)->getElementType(); 3431 3432 // Figure out if there is a Packed type corresponding to this Vector 3433 // type. If so, convert to the vector type. 3434 MVT::ValueType TVT = MVT::getVectorType(getValueType(EltTy),NumElems); 3435 if (TVT != MVT::Other && isTypeLegal(TVT)) { 3436 // Insert a VBIT_CONVERT of the FORMAL_ARGUMENTS to a 3437 // "N x PTyElementVT" MVT::Vector type. 3438 ResVal = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, ResVal, 3439 DAG.getConstant(NumElems, MVT::i32), 3440 DAG.getValueType(getValueType(EltTy))); 3441 } else { 3442 abort(); 3443 } 3444 } else if (MVT::isInteger(VT)) { 3445 unsigned AssertOp = ISD::AssertSext; 3446 if (!RetTyIsSigned) 3447 AssertOp = ISD::AssertZext; 3448 ResVal = DAG.getNode(AssertOp, ResVal.getValueType(), ResVal, 3449 DAG.getValueType(VT)); 3450 ResVal = DAG.getNode(ISD::TRUNCATE, VT, ResVal); 3451 } else { 3452 assert(MVT::isFloatingPoint(VT)); 3453 if (getTypeAction(VT) == Expand) 3454 ResVal = DAG.getNode(ISD::BIT_CONVERT, VT, ResVal); 3455 else 3456 ResVal = DAG.getNode(ISD::FP_ROUND, VT, ResVal); 3457 } 3458 } 3459 } else if (RetTys.size() == 3) { 3460 ResVal = DAG.getNode(ISD::BUILD_PAIR, VT, 3461 Res.getValue(0), Res.getValue(1)); 3462 3463 } else { 3464 assert(0 && "Case not handled yet!"); 3465 } 3466 } 3467 3468 return std::make_pair(ResVal, Res.getValue(Res.Val->getNumValues()-1)); 3469} 3470 3471SDOperand TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { 3472 assert(0 && "LowerOperation not implemented for this target!"); 3473 abort(); 3474 return SDOperand(); 3475} 3476 3477SDOperand TargetLowering::CustomPromoteOperation(SDOperand Op, 3478 SelectionDAG &DAG) { 3479 assert(0 && "CustomPromoteOperation not implemented for this target!"); 3480 abort(); 3481 return SDOperand(); 3482} 3483 3484/// getMemsetValue - Vectorized representation of the memset value 3485/// operand. 3486static SDOperand getMemsetValue(SDOperand Value, MVT::ValueType VT, 3487 SelectionDAG &DAG) { 3488 MVT::ValueType CurVT = VT; 3489 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) { 3490 uint64_t Val = C->getValue() & 255; 3491 unsigned Shift = 8; 3492 while (CurVT != MVT::i8) { 3493 Val = (Val << Shift) | Val; 3494 Shift <<= 1; 3495 CurVT = (MVT::ValueType)((unsigned)CurVT - 1); 3496 } 3497 return DAG.getConstant(Val, VT); 3498 } else { 3499 Value = DAG.getNode(ISD::ZERO_EXTEND, VT, Value); 3500 unsigned Shift = 8; 3501 while (CurVT != MVT::i8) { 3502 Value = 3503 DAG.getNode(ISD::OR, VT, 3504 DAG.getNode(ISD::SHL, VT, Value, 3505 DAG.getConstant(Shift, MVT::i8)), Value); 3506 Shift <<= 1; 3507 CurVT = (MVT::ValueType)((unsigned)CurVT - 1); 3508 } 3509 3510 return Value; 3511 } 3512} 3513 3514/// getMemsetStringVal - Similar to getMemsetValue. Except this is only 3515/// used when a memcpy is turned into a memset when the source is a constant 3516/// string ptr. 3517static SDOperand getMemsetStringVal(MVT::ValueType VT, 3518 SelectionDAG &DAG, TargetLowering &TLI, 3519 std::string &Str, unsigned Offset) { 3520 uint64_t Val = 0; 3521 unsigned MSB = getSizeInBits(VT) / 8; 3522 if (TLI.isLittleEndian()) 3523 Offset = Offset + MSB - 1; 3524 for (unsigned i = 0; i != MSB; ++i) { 3525 Val = (Val << 8) | (unsigned char)Str[Offset]; 3526 Offset += TLI.isLittleEndian() ? -1 : 1; 3527 } 3528 return DAG.getConstant(Val, VT); 3529} 3530 3531/// getMemBasePlusOffset - Returns base and offset node for the 3532static SDOperand getMemBasePlusOffset(SDOperand Base, unsigned Offset, 3533 SelectionDAG &DAG, TargetLowering &TLI) { 3534 MVT::ValueType VT = Base.getValueType(); 3535 return DAG.getNode(ISD::ADD, VT, Base, DAG.getConstant(Offset, VT)); 3536} 3537 3538/// MeetsMaxMemopRequirement - Determines if the number of memory ops required 3539/// to replace the memset / memcpy is below the threshold. It also returns the 3540/// types of the sequence of memory ops to perform memset / memcpy. 3541static bool MeetsMaxMemopRequirement(std::vector<MVT::ValueType> &MemOps, 3542 unsigned Limit, uint64_t Size, 3543 unsigned Align, TargetLowering &TLI) { 3544 MVT::ValueType VT; 3545 3546 if (TLI.allowsUnalignedMemoryAccesses()) { 3547 VT = MVT::i64; 3548 } else { 3549 switch (Align & 7) { 3550 case 0: 3551 VT = MVT::i64; 3552 break; 3553 case 4: 3554 VT = MVT::i32; 3555 break; 3556 case 2: 3557 VT = MVT::i16; 3558 break; 3559 default: 3560 VT = MVT::i8; 3561 break; 3562 } 3563 } 3564 3565 MVT::ValueType LVT = MVT::i64; 3566 while (!TLI.isTypeLegal(LVT)) 3567 LVT = (MVT::ValueType)((unsigned)LVT - 1); 3568 assert(MVT::isInteger(LVT)); 3569 3570 if (VT > LVT) 3571 VT = LVT; 3572 3573 unsigned NumMemOps = 0; 3574 while (Size != 0) { 3575 unsigned VTSize = getSizeInBits(VT) / 8; 3576 while (VTSize > Size) { 3577 VT = (MVT::ValueType)((unsigned)VT - 1); 3578 VTSize >>= 1; 3579 } 3580 assert(MVT::isInteger(VT)); 3581 3582 if (++NumMemOps > Limit) 3583 return false; 3584 MemOps.push_back(VT); 3585 Size -= VTSize; 3586 } 3587 3588 return true; 3589} 3590 3591void SelectionDAGLowering::visitMemIntrinsic(CallInst &I, unsigned Op) { 3592 SDOperand Op1 = getValue(I.getOperand(1)); 3593 SDOperand Op2 = getValue(I.getOperand(2)); 3594 SDOperand Op3 = getValue(I.getOperand(3)); 3595 SDOperand Op4 = getValue(I.getOperand(4)); 3596 unsigned Align = (unsigned)cast<ConstantSDNode>(Op4)->getValue(); 3597 if (Align == 0) Align = 1; 3598 3599 if (ConstantSDNode *Size = dyn_cast<ConstantSDNode>(Op3)) { 3600 std::vector<MVT::ValueType> MemOps; 3601 3602 // Expand memset / memcpy to a series of load / store ops 3603 // if the size operand falls below a certain threshold. 3604 SmallVector<SDOperand, 8> OutChains; 3605 switch (Op) { 3606 default: break; // Do nothing for now. 3607 case ISD::MEMSET: { 3608 if (MeetsMaxMemopRequirement(MemOps, TLI.getMaxStoresPerMemset(), 3609 Size->getValue(), Align, TLI)) { 3610 unsigned NumMemOps = MemOps.size(); 3611 unsigned Offset = 0; 3612 for (unsigned i = 0; i < NumMemOps; i++) { 3613 MVT::ValueType VT = MemOps[i]; 3614 unsigned VTSize = getSizeInBits(VT) / 8; 3615 SDOperand Value = getMemsetValue(Op2, VT, DAG); 3616 SDOperand Store = DAG.getStore(getRoot(), Value, 3617 getMemBasePlusOffset(Op1, Offset, DAG, TLI), 3618 I.getOperand(1), Offset); 3619 OutChains.push_back(Store); 3620 Offset += VTSize; 3621 } 3622 } 3623 break; 3624 } 3625 case ISD::MEMCPY: { 3626 if (MeetsMaxMemopRequirement(MemOps, TLI.getMaxStoresPerMemcpy(), 3627 Size->getValue(), Align, TLI)) { 3628 unsigned NumMemOps = MemOps.size(); 3629 unsigned SrcOff = 0, DstOff = 0, SrcDelta = 0; 3630 GlobalAddressSDNode *G = NULL; 3631 std::string Str; 3632 bool CopyFromStr = false; 3633 3634 if (Op2.getOpcode() == ISD::GlobalAddress) 3635 G = cast<GlobalAddressSDNode>(Op2); 3636 else if (Op2.getOpcode() == ISD::ADD && 3637 Op2.getOperand(0).getOpcode() == ISD::GlobalAddress && 3638 Op2.getOperand(1).getOpcode() == ISD::Constant) { 3639 G = cast<GlobalAddressSDNode>(Op2.getOperand(0)); 3640 SrcDelta = cast<ConstantSDNode>(Op2.getOperand(1))->getValue(); 3641 } 3642 if (G) { 3643 GlobalVariable *GV = dyn_cast<GlobalVariable>(G->getGlobal()); 3644 if (GV && GV->isConstant()) { 3645 Str = GV->getStringValue(false); 3646 if (!Str.empty()) { 3647 CopyFromStr = true; 3648 SrcOff += SrcDelta; 3649 } 3650 } 3651 } 3652 3653 for (unsigned i = 0; i < NumMemOps; i++) { 3654 MVT::ValueType VT = MemOps[i]; 3655 unsigned VTSize = getSizeInBits(VT) / 8; 3656 SDOperand Value, Chain, Store; 3657 3658 if (CopyFromStr) { 3659 Value = getMemsetStringVal(VT, DAG, TLI, Str, SrcOff); 3660 Chain = getRoot(); 3661 Store = 3662 DAG.getStore(Chain, Value, 3663 getMemBasePlusOffset(Op1, DstOff, DAG, TLI), 3664 I.getOperand(1), DstOff); 3665 } else { 3666 Value = DAG.getLoad(VT, getRoot(), 3667 getMemBasePlusOffset(Op2, SrcOff, DAG, TLI), 3668 I.getOperand(2), SrcOff); 3669 Chain = Value.getValue(1); 3670 Store = 3671 DAG.getStore(Chain, Value, 3672 getMemBasePlusOffset(Op1, DstOff, DAG, TLI), 3673 I.getOperand(1), DstOff); 3674 } 3675 OutChains.push_back(Store); 3676 SrcOff += VTSize; 3677 DstOff += VTSize; 3678 } 3679 } 3680 break; 3681 } 3682 } 3683 3684 if (!OutChains.empty()) { 3685 DAG.setRoot(DAG.getNode(ISD::TokenFactor, MVT::Other, 3686 &OutChains[0], OutChains.size())); 3687 return; 3688 } 3689 } 3690 3691 DAG.setRoot(DAG.getNode(Op, MVT::Other, getRoot(), Op1, Op2, Op3, Op4)); 3692} 3693 3694//===----------------------------------------------------------------------===// 3695// SelectionDAGISel code 3696//===----------------------------------------------------------------------===// 3697 3698unsigned SelectionDAGISel::MakeReg(MVT::ValueType VT) { 3699 return RegMap->createVirtualRegister(TLI.getRegClassFor(VT)); 3700} 3701 3702void SelectionDAGISel::getAnalysisUsage(AnalysisUsage &AU) const { 3703 // FIXME: we only modify the CFG to split critical edges. This 3704 // updates dom and loop info. 3705 AU.addRequired<AliasAnalysis>(); 3706 AU.addRequired<LoopInfo>(); 3707 AU.setPreservesAll(); 3708} 3709 3710 3711/// OptimizeNoopCopyExpression - We have determined that the specified cast 3712/// instruction is a noop copy (e.g. it's casting from one pointer type to 3713/// another, int->uint, or int->sbyte on PPC. 3714/// 3715/// Return true if any changes are made. 3716static bool OptimizeNoopCopyExpression(CastInst *CI) { 3717 BasicBlock *DefBB = CI->getParent(); 3718 3719 /// InsertedCasts - Only insert a cast in each block once. 3720 std::map<BasicBlock*, CastInst*> InsertedCasts; 3721 3722 bool MadeChange = false; 3723 for (Value::use_iterator UI = CI->use_begin(), E = CI->use_end(); 3724 UI != E; ) { 3725 Use &TheUse = UI.getUse(); 3726 Instruction *User = cast<Instruction>(*UI); 3727 3728 // Figure out which BB this cast is used in. For PHI's this is the 3729 // appropriate predecessor block. 3730 BasicBlock *UserBB = User->getParent(); 3731 if (PHINode *PN = dyn_cast<PHINode>(User)) { 3732 unsigned OpVal = UI.getOperandNo()/2; 3733 UserBB = PN->getIncomingBlock(OpVal); 3734 } 3735 3736 // Preincrement use iterator so we don't invalidate it. 3737 ++UI; 3738 3739 // If this user is in the same block as the cast, don't change the cast. 3740 if (UserBB == DefBB) continue; 3741 3742 // If we have already inserted a cast into this block, use it. 3743 CastInst *&InsertedCast = InsertedCasts[UserBB]; 3744 3745 if (!InsertedCast) { 3746 BasicBlock::iterator InsertPt = UserBB->begin(); 3747 while (isa<PHINode>(InsertPt)) ++InsertPt; 3748 3749 InsertedCast = 3750 CastInst::create(CI->getOpcode(), CI->getOperand(0), CI->getType(), "", 3751 InsertPt); 3752 MadeChange = true; 3753 } 3754 3755 // Replace a use of the cast with a use of the new casat. 3756 TheUse = InsertedCast; 3757 } 3758 3759 // If we removed all uses, nuke the cast. 3760 if (CI->use_empty()) 3761 CI->eraseFromParent(); 3762 3763 return MadeChange; 3764} 3765 3766/// InsertGEPComputeCode - Insert code into BB to compute Ptr+PtrOffset, 3767/// casting to the type of GEPI. 3768static Instruction *InsertGEPComputeCode(Instruction *&V, BasicBlock *BB, 3769 Instruction *GEPI, Value *Ptr, 3770 Value *PtrOffset) { 3771 if (V) return V; // Already computed. 3772 3773 // Figure out the insertion point 3774 BasicBlock::iterator InsertPt; 3775 if (BB == GEPI->getParent()) { 3776 // If GEP is already inserted into BB, insert right after the GEP. 3777 InsertPt = GEPI; 3778 ++InsertPt; 3779 } else { 3780 // Otherwise, insert at the top of BB, after any PHI nodes 3781 InsertPt = BB->begin(); 3782 while (isa<PHINode>(InsertPt)) ++InsertPt; 3783 } 3784 3785 // If Ptr is itself a cast, but in some other BB, emit a copy of the cast into 3786 // BB so that there is only one value live across basic blocks (the cast 3787 // operand). 3788 if (CastInst *CI = dyn_cast<CastInst>(Ptr)) 3789 if (CI->getParent() != BB && isa<PointerType>(CI->getOperand(0)->getType())) 3790 Ptr = CastInst::create(CI->getOpcode(), CI->getOperand(0), CI->getType(), 3791 "", InsertPt); 3792 3793 // Add the offset, cast it to the right type. 3794 Ptr = BinaryOperator::createAdd(Ptr, PtrOffset, "", InsertPt); 3795 // Ptr is an integer type, GEPI is pointer type ==> IntToPtr 3796 return V = CastInst::create(Instruction::IntToPtr, Ptr, GEPI->getType(), 3797 "", InsertPt); 3798} 3799 3800/// ReplaceUsesOfGEPInst - Replace all uses of RepPtr with inserted code to 3801/// compute its value. The RepPtr value can be computed with Ptr+PtrOffset. One 3802/// trivial way of doing this would be to evaluate Ptr+PtrOffset in RepPtr's 3803/// block, then ReplaceAllUsesWith'ing everything. However, we would prefer to 3804/// sink PtrOffset into user blocks where doing so will likely allow us to fold 3805/// the constant add into a load or store instruction. Additionally, if a user 3806/// is a pointer-pointer cast, we look through it to find its users. 3807static void ReplaceUsesOfGEPInst(Instruction *RepPtr, Value *Ptr, 3808 Constant *PtrOffset, BasicBlock *DefBB, 3809 GetElementPtrInst *GEPI, 3810 std::map<BasicBlock*,Instruction*> &InsertedExprs) { 3811 while (!RepPtr->use_empty()) { 3812 Instruction *User = cast<Instruction>(RepPtr->use_back()); 3813 3814 // If the user is a Pointer-Pointer cast, recurse. Only BitCast can be 3815 // used for a Pointer-Pointer cast. 3816 if (isa<BitCastInst>(User)) { 3817 ReplaceUsesOfGEPInst(User, Ptr, PtrOffset, DefBB, GEPI, InsertedExprs); 3818 3819 // Drop the use of RepPtr. The cast is dead. Don't delete it now, else we 3820 // could invalidate an iterator. 3821 User->setOperand(0, UndefValue::get(RepPtr->getType())); 3822 continue; 3823 } 3824 3825 // If this is a load of the pointer, or a store through the pointer, emit 3826 // the increment into the load/store block. 3827 Instruction *NewVal; 3828 if (isa<LoadInst>(User) || 3829 (isa<StoreInst>(User) && User->getOperand(0) != RepPtr)) { 3830 NewVal = InsertGEPComputeCode(InsertedExprs[User->getParent()], 3831 User->getParent(), GEPI, 3832 Ptr, PtrOffset); 3833 } else { 3834 // If this use is not foldable into the addressing mode, use a version 3835 // emitted in the GEP block. 3836 NewVal = InsertGEPComputeCode(InsertedExprs[DefBB], DefBB, GEPI, 3837 Ptr, PtrOffset); 3838 } 3839 3840 if (GEPI->getType() != RepPtr->getType()) { 3841 BasicBlock::iterator IP = NewVal; 3842 ++IP; 3843 // NewVal must be a GEP which must be pointer type, so BitCast 3844 NewVal = new BitCastInst(NewVal, RepPtr->getType(), "", IP); 3845 } 3846 User->replaceUsesOfWith(RepPtr, NewVal); 3847 } 3848} 3849 3850 3851/// OptimizeGEPExpression - Since we are doing basic-block-at-a-time instruction 3852/// selection, we want to be a bit careful about some things. In particular, if 3853/// we have a GEP instruction that is used in a different block than it is 3854/// defined, the addressing expression of the GEP cannot be folded into loads or 3855/// stores that use it. In this case, decompose the GEP and move constant 3856/// indices into blocks that use it. 3857static bool OptimizeGEPExpression(GetElementPtrInst *GEPI, 3858 const TargetData *TD) { 3859 // If this GEP is only used inside the block it is defined in, there is no 3860 // need to rewrite it. 3861 bool isUsedOutsideDefBB = false; 3862 BasicBlock *DefBB = GEPI->getParent(); 3863 for (Value::use_iterator UI = GEPI->use_begin(), E = GEPI->use_end(); 3864 UI != E; ++UI) { 3865 if (cast<Instruction>(*UI)->getParent() != DefBB) { 3866 isUsedOutsideDefBB = true; 3867 break; 3868 } 3869 } 3870 if (!isUsedOutsideDefBB) return false; 3871 3872 // If this GEP has no non-zero constant indices, there is nothing we can do, 3873 // ignore it. 3874 bool hasConstantIndex = false; 3875 bool hasVariableIndex = false; 3876 for (GetElementPtrInst::op_iterator OI = GEPI->op_begin()+1, 3877 E = GEPI->op_end(); OI != E; ++OI) { 3878 if (ConstantInt *CI = dyn_cast<ConstantInt>(*OI)) { 3879 if (CI->getZExtValue()) { 3880 hasConstantIndex = true; 3881 break; 3882 } 3883 } else { 3884 hasVariableIndex = true; 3885 } 3886 } 3887 3888 // If this is a "GEP X, 0, 0, 0", turn this into a cast. 3889 if (!hasConstantIndex && !hasVariableIndex) { 3890 /// The GEP operand must be a pointer, so must its result -> BitCast 3891 Value *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(), 3892 GEPI->getName(), GEPI); 3893 GEPI->replaceAllUsesWith(NC); 3894 GEPI->eraseFromParent(); 3895 return true; 3896 } 3897 3898 // If this is a GEP &Alloca, 0, 0, forward subst the frame index into uses. 3899 if (!hasConstantIndex && !isa<AllocaInst>(GEPI->getOperand(0))) 3900 return false; 3901 3902 // Otherwise, decompose the GEP instruction into multiplies and adds. Sum the 3903 // constant offset (which we now know is non-zero) and deal with it later. 3904 uint64_t ConstantOffset = 0; 3905 const Type *UIntPtrTy = TD->getIntPtrType(); 3906 Value *Ptr = new PtrToIntInst(GEPI->getOperand(0), UIntPtrTy, "", GEPI); 3907 const Type *Ty = GEPI->getOperand(0)->getType(); 3908 3909 for (GetElementPtrInst::op_iterator OI = GEPI->op_begin()+1, 3910 E = GEPI->op_end(); OI != E; ++OI) { 3911 Value *Idx = *OI; 3912 if (const StructType *StTy = dyn_cast<StructType>(Ty)) { 3913 unsigned Field = cast<ConstantInt>(Idx)->getZExtValue(); 3914 if (Field) 3915 ConstantOffset += TD->getStructLayout(StTy)->getElementOffset(Field); 3916 Ty = StTy->getElementType(Field); 3917 } else { 3918 Ty = cast<SequentialType>(Ty)->getElementType(); 3919 3920 // Handle constant subscripts. 3921 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) { 3922 if (CI->getZExtValue() == 0) continue; 3923 ConstantOffset += (int64_t)TD->getTypeSize(Ty)*CI->getSExtValue(); 3924 continue; 3925 } 3926 3927 // Ptr = Ptr + Idx * ElementSize; 3928 3929 // Cast Idx to UIntPtrTy if needed. 3930 Idx = CastInst::createIntegerCast(Idx, UIntPtrTy, true/*SExt*/, "", GEPI); 3931 3932 uint64_t ElementSize = TD->getTypeSize(Ty); 3933 // Mask off bits that should not be set. 3934 ElementSize &= ~0ULL >> (64-UIntPtrTy->getPrimitiveSizeInBits()); 3935 Constant *SizeCst = ConstantInt::get(UIntPtrTy, ElementSize); 3936 3937 // Multiply by the element size and add to the base. 3938 Idx = BinaryOperator::createMul(Idx, SizeCst, "", GEPI); 3939 Ptr = BinaryOperator::createAdd(Ptr, Idx, "", GEPI); 3940 } 3941 } 3942 3943 // Make sure that the offset fits in uintptr_t. 3944 ConstantOffset &= ~0ULL >> (64-UIntPtrTy->getPrimitiveSizeInBits()); 3945 Constant *PtrOffset = ConstantInt::get(UIntPtrTy, ConstantOffset); 3946 3947 // Okay, we have now emitted all of the variable index parts to the BB that 3948 // the GEP is defined in. Loop over all of the using instructions, inserting 3949 // an "add Ptr, ConstantOffset" into each block that uses it and update the 3950 // instruction to use the newly computed value, making GEPI dead. When the 3951 // user is a load or store instruction address, we emit the add into the user 3952 // block, otherwise we use a canonical version right next to the gep (these 3953 // won't be foldable as addresses, so we might as well share the computation). 3954 3955 std::map<BasicBlock*,Instruction*> InsertedExprs; 3956 ReplaceUsesOfGEPInst(GEPI, Ptr, PtrOffset, DefBB, GEPI, InsertedExprs); 3957 3958 // Finally, the GEP is dead, remove it. 3959 GEPI->eraseFromParent(); 3960 3961 return true; 3962} 3963 3964/// isLoopInvariantInst - Returns true if all operands of the instruction are 3965/// loop invariants in the specified loop. 3966static bool isLoopInvariantInst(Instruction *I, Loop *L) { 3967 // The instruction is loop invariant if all of its operands are loop-invariant 3968 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) 3969 if (!L->isLoopInvariant(I->getOperand(i))) 3970 return false; 3971 return true; 3972} 3973 3974/// SinkInvariantGEPIndex - If a GEP instruction has a variable index that has 3975/// been hoisted out of the loop by LICM pass, sink it back into the use BB 3976/// if it can be determined that the index computation can be folded into the 3977/// addressing mode of the load / store uses. 3978static bool SinkInvariantGEPIndex(BinaryOperator *BinOp, LoopInfo *loopInfo, 3979 const TargetLowering &TLI) { 3980 // Only look at Add / Sub for now. 3981 if (BinOp->getOpcode() != Instruction::Add && 3982 BinOp->getOpcode() != Instruction::Sub) 3983 return false; 3984 3985 // DestBBs - These are the blocks where a copy of BinOp will be inserted. 3986 SmallSet<BasicBlock*, 8> DestBBs; 3987 BasicBlock *DefBB = BinOp->getParent(); 3988 bool MadeChange = false; 3989 for (Value::use_iterator UI = BinOp->use_begin(), E = BinOp->use_end(); 3990 UI != E; ++UI) { 3991 Instruction *User = cast<Instruction>(*UI); 3992 // Only look for GEP use in another block. 3993 if (User->getParent() == DefBB) continue; 3994 3995 if (isa<GetElementPtrInst>(User)) { 3996 BasicBlock *UserBB = User->getParent(); 3997 Loop *L = loopInfo->getLoopFor(UserBB); 3998 3999 // Only sink if expression is a loop invariant in the use BB. 4000 if (L && isLoopInvariantInst(BinOp, L) && !User->use_empty()) { 4001 const Type *UseTy = NULL; 4002 // FIXME: We are assuming all the uses of the GEP will have the 4003 // same type. 4004 Instruction *GEPUser = cast<Instruction>(*User->use_begin()); 4005 if (LoadInst *Load = dyn_cast<LoadInst>(GEPUser)) 4006 UseTy = Load->getType(); 4007 else if (StoreInst *Store = dyn_cast<StoreInst>(GEPUser)) 4008 UseTy = Store->getOperand(0)->getType(); 4009 4010 // Check if it is possible to fold the expression to address mode. 4011 if (UseTy && 4012 TLI.isLegalAddressExpression(BinOp->getOpcode(), 4013 BinOp->getOperand(0), 4014 BinOp->getOperand(1), UseTy)) { 4015 DestBBs.insert(UserBB); 4016 MadeChange = true; 4017 } 4018 } 4019 } 4020 } 4021 4022 // Nothing to do. 4023 if (!MadeChange) 4024 return false; 4025 4026 /// InsertedOps - Only insert a duplicate in each block once. 4027 std::map<BasicBlock*, BinaryOperator*> InsertedOps; 4028 for (Value::use_iterator UI = BinOp->use_begin(), E = BinOp->use_end(); 4029 UI != E; ) { 4030 Instruction *User = cast<Instruction>(*UI); 4031 BasicBlock *UserBB = User->getParent(); 4032 4033 // Preincrement use iterator so we don't invalidate it. 4034 ++UI; 4035 4036 // If any user in this BB wants it, replace all the uses in the BB. 4037 if (DestBBs.count(UserBB)) { 4038 // Sink it into user block. 4039 BinaryOperator *&InsertedOp = InsertedOps[UserBB]; 4040 if (!InsertedOp) { 4041 BasicBlock::iterator InsertPt = UserBB->begin(); 4042 while (isa<PHINode>(InsertPt)) ++InsertPt; 4043 4044 InsertedOp = 4045 BinaryOperator::create(BinOp->getOpcode(), BinOp->getOperand(0), 4046 BinOp->getOperand(1), "", InsertPt); 4047 } 4048 4049 User->replaceUsesOfWith(BinOp, InsertedOp); 4050 } 4051 } 4052 4053 if (BinOp->use_empty()) 4054 BinOp->eraseFromParent(); 4055 4056 return true; 4057} 4058 4059 4060/// SplitEdgeNicely - Split the critical edge from TI to it's specified 4061/// successor if it will improve codegen. We only do this if the successor has 4062/// phi nodes (otherwise critical edges are ok). If there is already another 4063/// predecessor of the succ that is empty (and thus has no phi nodes), use it 4064/// instead of introducing a new block. 4065static void SplitEdgeNicely(TerminatorInst *TI, unsigned SuccNum, Pass *P) { 4066 BasicBlock *TIBB = TI->getParent(); 4067 BasicBlock *Dest = TI->getSuccessor(SuccNum); 4068 assert(isa<PHINode>(Dest->begin()) && 4069 "This should only be called if Dest has a PHI!"); 4070 4071 /// TIPHIValues - This array is lazily computed to determine the values of 4072 /// PHIs in Dest that TI would provide. 4073 std::vector<Value*> TIPHIValues; 4074 4075 // Check to see if Dest has any blocks that can be used as a split edge for 4076 // this terminator. 4077 for (pred_iterator PI = pred_begin(Dest), E = pred_end(Dest); PI != E; ++PI) { 4078 BasicBlock *Pred = *PI; 4079 // To be usable, the pred has to end with an uncond branch to the dest. 4080 BranchInst *PredBr = dyn_cast<BranchInst>(Pred->getTerminator()); 4081 if (!PredBr || !PredBr->isUnconditional() || 4082 // Must be empty other than the branch. 4083 &Pred->front() != PredBr) 4084 continue; 4085 4086 // Finally, since we know that Dest has phi nodes in it, we have to make 4087 // sure that jumping to Pred will have the same affect as going to Dest in 4088 // terms of PHI values. 4089 PHINode *PN; 4090 unsigned PHINo = 0; 4091 bool FoundMatch = true; 4092 for (BasicBlock::iterator I = Dest->begin(); 4093 (PN = dyn_cast<PHINode>(I)); ++I, ++PHINo) { 4094 if (PHINo == TIPHIValues.size()) 4095 TIPHIValues.push_back(PN->getIncomingValueForBlock(TIBB)); 4096 4097 // If the PHI entry doesn't work, we can't use this pred. 4098 if (TIPHIValues[PHINo] != PN->getIncomingValueForBlock(Pred)) { 4099 FoundMatch = false; 4100 break; 4101 } 4102 } 4103 4104 // If we found a workable predecessor, change TI to branch to Succ. 4105 if (FoundMatch) { 4106 Dest->removePredecessor(TIBB); 4107 TI->setSuccessor(SuccNum, Pred); 4108 return; 4109 } 4110 } 4111 4112 SplitCriticalEdge(TI, SuccNum, P, true); 4113} 4114 4115 4116bool SelectionDAGISel::runOnFunction(Function &Fn) { 4117 MachineFunction &MF = MachineFunction::construct(&Fn, TLI.getTargetMachine()); 4118 RegMap = MF.getSSARegMap(); 4119 DOUT << "\n\n\n=== " << Fn.getName() << "\n"; 4120 4121 LoopInfo *loopInfo = &getAnalysis<LoopInfo>(); 4122 4123 // First, split all critical edges. 4124 // 4125 // In this pass we also look for GEP and cast instructions that are used 4126 // across basic blocks and rewrite them to improve basic-block-at-a-time 4127 // selection. 4128 // 4129 bool MadeChange = true; 4130 while (MadeChange) { 4131 MadeChange = false; 4132 for (Function::iterator FNI = Fn.begin(), E = Fn.end(); FNI != E; ++FNI) { 4133 // Split all critical edges where the dest block has a PHI. 4134 TerminatorInst *BBTI = FNI->getTerminator(); 4135 if (BBTI->getNumSuccessors() > 1) { 4136 for (unsigned i = 0, e = BBTI->getNumSuccessors(); i != e; ++i) 4137 if (isa<PHINode>(BBTI->getSuccessor(i)->begin()) && 4138 isCriticalEdge(BBTI, i, true)) 4139 SplitEdgeNicely(BBTI, i, this); 4140 } 4141 4142 4143 for (BasicBlock::iterator BBI = FNI->begin(), E = FNI->end(); BBI != E; ) { 4144 Instruction *I = BBI++; 4145 4146 if (CallInst *CI = dyn_cast<CallInst>(I)) { 4147 // If we found an inline asm expession, and if the target knows how to 4148 // lower it to normal LLVM code, do so now. 4149 if (isa<InlineAsm>(CI->getCalledValue())) 4150 if (const TargetAsmInfo *TAI = 4151 TLI.getTargetMachine().getTargetAsmInfo()) { 4152 if (TAI->ExpandInlineAsm(CI)) 4153 BBI = FNI->begin(); 4154 } 4155 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) { 4156 MadeChange |= OptimizeGEPExpression(GEPI, TLI.getTargetData()); 4157 } else if (CastInst *CI = dyn_cast<CastInst>(I)) { 4158 // If the source of the cast is a constant, then this should have 4159 // already been constant folded. The only reason NOT to constant fold 4160 // it is if something (e.g. LSR) was careful to place the constant 4161 // evaluation in a block other than then one that uses it (e.g. to hoist 4162 // the address of globals out of a loop). If this is the case, we don't 4163 // want to forward-subst the cast. 4164 if (isa<Constant>(CI->getOperand(0))) 4165 continue; 4166 4167 // If this is a noop copy, sink it into user blocks to reduce the number 4168 // of virtual registers that must be created and coallesced. 4169 MVT::ValueType SrcVT = TLI.getValueType(CI->getOperand(0)->getType()); 4170 MVT::ValueType DstVT = TLI.getValueType(CI->getType()); 4171 4172 // This is an fp<->int conversion? 4173 if (MVT::isInteger(SrcVT) != MVT::isInteger(DstVT)) 4174 continue; 4175 4176 // If this is an extension, it will be a zero or sign extension, which 4177 // isn't a noop. 4178 if (SrcVT < DstVT) continue; 4179 4180 // If these values will be promoted, find out what they will be promoted 4181 // to. This helps us consider truncates on PPC as noop copies when they 4182 // are. 4183 if (TLI.getTypeAction(SrcVT) == TargetLowering::Promote) 4184 SrcVT = TLI.getTypeToTransformTo(SrcVT); 4185 if (TLI.getTypeAction(DstVT) == TargetLowering::Promote) 4186 DstVT = TLI.getTypeToTransformTo(DstVT); 4187 4188 // If, after promotion, these are the same types, this is a noop copy. 4189 if (SrcVT == DstVT) 4190 MadeChange |= OptimizeNoopCopyExpression(CI); 4191 } else if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(I)) { 4192 MadeChange |= SinkInvariantGEPIndex(BinOp, loopInfo, TLI); 4193 } 4194 } 4195 } 4196 } 4197 4198 FunctionLoweringInfo FuncInfo(TLI, Fn, MF); 4199 4200 for (Function::iterator I = Fn.begin(), E = Fn.end(); I != E; ++I) 4201 SelectBasicBlock(I, MF, FuncInfo); 4202 4203 // Add function live-ins to entry block live-in set. 4204 BasicBlock *EntryBB = &Fn.getEntryBlock(); 4205 BB = FuncInfo.MBBMap[EntryBB]; 4206 if (!MF.livein_empty()) 4207 for (MachineFunction::livein_iterator I = MF.livein_begin(), 4208 E = MF.livein_end(); I != E; ++I) 4209 BB->addLiveIn(I->first); 4210 4211 return true; 4212} 4213 4214SDOperand SelectionDAGLowering::CopyValueToVirtualRegister(Value *V, 4215 unsigned Reg) { 4216 SDOperand Op = getValue(V); 4217 assert((Op.getOpcode() != ISD::CopyFromReg || 4218 cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) && 4219 "Copy from a reg to the same reg!"); 4220 4221 // If this type is not legal, we must make sure to not create an invalid 4222 // register use. 4223 MVT::ValueType SrcVT = Op.getValueType(); 4224 MVT::ValueType DestVT = TLI.getTypeToTransformTo(SrcVT); 4225 if (SrcVT == DestVT) { 4226 return DAG.getCopyToReg(getRoot(), Reg, Op); 4227 } else if (SrcVT == MVT::Vector) { 4228 // Handle copies from generic vectors to registers. 4229 MVT::ValueType PTyElementVT, PTyLegalElementVT; 4230 unsigned NE = TLI.getVectorTypeBreakdown(cast<VectorType>(V->getType()), 4231 PTyElementVT, PTyLegalElementVT); 4232 4233 // Insert a VBIT_CONVERT of the input vector to a "N x PTyElementVT" 4234 // MVT::Vector type. 4235 Op = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, Op, 4236 DAG.getConstant(NE, MVT::i32), 4237 DAG.getValueType(PTyElementVT)); 4238 4239 // Loop over all of the elements of the resultant vector, 4240 // VEXTRACT_VECTOR_ELT'ing them, converting them to PTyLegalElementVT, then 4241 // copying them into output registers. 4242 SmallVector<SDOperand, 8> OutChains; 4243 SDOperand Root = getRoot(); 4244 for (unsigned i = 0; i != NE; ++i) { 4245 SDOperand Elt = DAG.getNode(ISD::VEXTRACT_VECTOR_ELT, PTyElementVT, 4246 Op, DAG.getConstant(i, TLI.getPointerTy())); 4247 if (PTyElementVT == PTyLegalElementVT) { 4248 // Elements are legal. 4249 OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Elt)); 4250 } else if (PTyLegalElementVT > PTyElementVT) { 4251 // Elements are promoted. 4252 if (MVT::isFloatingPoint(PTyLegalElementVT)) 4253 Elt = DAG.getNode(ISD::FP_EXTEND, PTyLegalElementVT, Elt); 4254 else 4255 Elt = DAG.getNode(ISD::ANY_EXTEND, PTyLegalElementVT, Elt); 4256 OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Elt)); 4257 } else { 4258 // Elements are expanded. 4259 // The src value is expanded into multiple registers. 4260 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, PTyLegalElementVT, 4261 Elt, DAG.getConstant(0, TLI.getPointerTy())); 4262 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, PTyLegalElementVT, 4263 Elt, DAG.getConstant(1, TLI.getPointerTy())); 4264 OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Lo)); 4265 OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Hi)); 4266 } 4267 } 4268 return DAG.getNode(ISD::TokenFactor, MVT::Other, 4269 &OutChains[0], OutChains.size()); 4270 } else if (TLI.getTypeAction(SrcVT) == TargetLowering::Promote) { 4271 // The src value is promoted to the register. 4272 if (MVT::isFloatingPoint(SrcVT)) 4273 Op = DAG.getNode(ISD::FP_EXTEND, DestVT, Op); 4274 else 4275 Op = DAG.getNode(ISD::ANY_EXTEND, DestVT, Op); 4276 return DAG.getCopyToReg(getRoot(), Reg, Op); 4277 } else { 4278 DestVT = TLI.getTypeToExpandTo(SrcVT); 4279 unsigned NumVals = TLI.getNumElements(SrcVT); 4280 if (NumVals == 1) 4281 return DAG.getCopyToReg(getRoot(), Reg, 4282 DAG.getNode(ISD::BIT_CONVERT, DestVT, Op)); 4283 assert(NumVals == 2 && "1 to 4 (and more) expansion not implemented!"); 4284 // The src value is expanded into multiple registers. 4285 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DestVT, 4286 Op, DAG.getConstant(0, TLI.getPointerTy())); 4287 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DestVT, 4288 Op, DAG.getConstant(1, TLI.getPointerTy())); 4289 Op = DAG.getCopyToReg(getRoot(), Reg, Lo); 4290 return DAG.getCopyToReg(Op, Reg+1, Hi); 4291 } 4292} 4293 4294void SelectionDAGISel:: 4295LowerArguments(BasicBlock *LLVMBB, SelectionDAGLowering &SDL, 4296 std::vector<SDOperand> &UnorderedChains) { 4297 // If this is the entry block, emit arguments. 4298 Function &F = *LLVMBB->getParent(); 4299 FunctionLoweringInfo &FuncInfo = SDL.FuncInfo; 4300 SDOperand OldRoot = SDL.DAG.getRoot(); 4301 std::vector<SDOperand> Args = TLI.LowerArguments(F, SDL.DAG); 4302 4303 unsigned a = 0; 4304 for (Function::arg_iterator AI = F.arg_begin(), E = F.arg_end(); 4305 AI != E; ++AI, ++a) 4306 if (!AI->use_empty()) { 4307 SDL.setValue(AI, Args[a]); 4308 4309 // If this argument is live outside of the entry block, insert a copy from 4310 // whereever we got it to the vreg that other BB's will reference it as. 4311 DenseMap<const Value*, unsigned>::iterator VMI=FuncInfo.ValueMap.find(AI); 4312 if (VMI != FuncInfo.ValueMap.end()) { 4313 SDOperand Copy = SDL.CopyValueToVirtualRegister(AI, VMI->second); 4314 UnorderedChains.push_back(Copy); 4315 } 4316 } 4317 4318 // Finally, if the target has anything special to do, allow it to do so. 4319 // FIXME: this should insert code into the DAG! 4320 EmitFunctionEntryCode(F, SDL.DAG.getMachineFunction()); 4321} 4322 4323void SelectionDAGISel::BuildSelectionDAG(SelectionDAG &DAG, BasicBlock *LLVMBB, 4324 std::vector<std::pair<MachineInstr*, unsigned> > &PHINodesToUpdate, 4325 FunctionLoweringInfo &FuncInfo) { 4326 SelectionDAGLowering SDL(DAG, TLI, FuncInfo); 4327 4328 std::vector<SDOperand> UnorderedChains; 4329 4330 // Lower any arguments needed in this block if this is the entry block. 4331 if (LLVMBB == &LLVMBB->getParent()->getEntryBlock()) 4332 LowerArguments(LLVMBB, SDL, UnorderedChains); 4333 4334 BB = FuncInfo.MBBMap[LLVMBB]; 4335 SDL.setCurrentBasicBlock(BB); 4336 4337 // Lower all of the non-terminator instructions. 4338 for (BasicBlock::iterator I = LLVMBB->begin(), E = --LLVMBB->end(); 4339 I != E; ++I) 4340 SDL.visit(*I); 4341 4342 // Lower call part of invoke. 4343 InvokeInst *Invoke = dyn_cast<InvokeInst>(LLVMBB->getTerminator()); 4344 if (Invoke) SDL.visitInvoke(*Invoke, false); 4345 4346 // Ensure that all instructions which are used outside of their defining 4347 // blocks are available as virtual registers. 4348 for (BasicBlock::iterator I = LLVMBB->begin(), E = LLVMBB->end(); I != E;++I) 4349 if (!I->use_empty() && !isa<PHINode>(I)) { 4350 DenseMap<const Value*, unsigned>::iterator VMI =FuncInfo.ValueMap.find(I); 4351 if (VMI != FuncInfo.ValueMap.end()) 4352 UnorderedChains.push_back( 4353 SDL.CopyValueToVirtualRegister(I, VMI->second)); 4354 } 4355 4356 // Handle PHI nodes in successor blocks. Emit code into the SelectionDAG to 4357 // ensure constants are generated when needed. Remember the virtual registers 4358 // that need to be added to the Machine PHI nodes as input. We cannot just 4359 // directly add them, because expansion might result in multiple MBB's for one 4360 // BB. As such, the start of the BB might correspond to a different MBB than 4361 // the end. 4362 // 4363 TerminatorInst *TI = LLVMBB->getTerminator(); 4364 4365 // Emit constants only once even if used by multiple PHI nodes. 4366 std::map<Constant*, unsigned> ConstantsOut; 4367 4368 // Vector bool would be better, but vector<bool> is really slow. 4369 std::vector<unsigned char> SuccsHandled; 4370 if (TI->getNumSuccessors()) 4371 SuccsHandled.resize(BB->getParent()->getNumBlockIDs()); 4372 4373 // Check successor nodes PHI nodes that expect a constant to be available from 4374 // this block. 4375 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) { 4376 BasicBlock *SuccBB = TI->getSuccessor(succ); 4377 if (!isa<PHINode>(SuccBB->begin())) continue; 4378 MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB]; 4379 4380 // If this terminator has multiple identical successors (common for 4381 // switches), only handle each succ once. 4382 unsigned SuccMBBNo = SuccMBB->getNumber(); 4383 if (SuccsHandled[SuccMBBNo]) continue; 4384 SuccsHandled[SuccMBBNo] = true; 4385 4386 MachineBasicBlock::iterator MBBI = SuccMBB->begin(); 4387 PHINode *PN; 4388 4389 // At this point we know that there is a 1-1 correspondence between LLVM PHI 4390 // nodes and Machine PHI nodes, but the incoming operands have not been 4391 // emitted yet. 4392 for (BasicBlock::iterator I = SuccBB->begin(); 4393 (PN = dyn_cast<PHINode>(I)); ++I) { 4394 // Ignore dead phi's. 4395 if (PN->use_empty()) continue; 4396 4397 unsigned Reg; 4398 Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB); 4399 4400 if (Constant *C = dyn_cast<Constant>(PHIOp)) { 4401 unsigned &RegOut = ConstantsOut[C]; 4402 if (RegOut == 0) { 4403 RegOut = FuncInfo.CreateRegForValue(C); 4404 UnorderedChains.push_back( 4405 SDL.CopyValueToVirtualRegister(C, RegOut)); 4406 } 4407 Reg = RegOut; 4408 } else { 4409 Reg = FuncInfo.ValueMap[PHIOp]; 4410 if (Reg == 0) { 4411 assert(isa<AllocaInst>(PHIOp) && 4412 FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) && 4413 "Didn't codegen value into a register!??"); 4414 Reg = FuncInfo.CreateRegForValue(PHIOp); 4415 UnorderedChains.push_back( 4416 SDL.CopyValueToVirtualRegister(PHIOp, Reg)); 4417 } 4418 } 4419 4420 // Remember that this register needs to added to the machine PHI node as 4421 // the input for this MBB. 4422 MVT::ValueType VT = TLI.getValueType(PN->getType()); 4423 unsigned NumElements; 4424 if (VT != MVT::Vector) 4425 NumElements = TLI.getNumElements(VT); 4426 else { 4427 MVT::ValueType VT1,VT2; 4428 NumElements = 4429 TLI.getVectorTypeBreakdown(cast<VectorType>(PN->getType()), 4430 VT1, VT2); 4431 } 4432 for (unsigned i = 0, e = NumElements; i != e; ++i) 4433 PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg+i)); 4434 } 4435 } 4436 ConstantsOut.clear(); 4437 4438 // Turn all of the unordered chains into one factored node. 4439 if (!UnorderedChains.empty()) { 4440 SDOperand Root = SDL.getRoot(); 4441 if (Root.getOpcode() != ISD::EntryToken) { 4442 unsigned i = 0, e = UnorderedChains.size(); 4443 for (; i != e; ++i) { 4444 assert(UnorderedChains[i].Val->getNumOperands() > 1); 4445 if (UnorderedChains[i].Val->getOperand(0) == Root) 4446 break; // Don't add the root if we already indirectly depend on it. 4447 } 4448 4449 if (i == e) 4450 UnorderedChains.push_back(Root); 4451 } 4452 DAG.setRoot(DAG.getNode(ISD::TokenFactor, MVT::Other, 4453 &UnorderedChains[0], UnorderedChains.size())); 4454 } 4455 4456 // Lower the terminator after the copies are emitted. 4457 if (Invoke) { 4458 // Just the branch part of invoke. 4459 SDL.visitInvoke(*Invoke, true); 4460 } else { 4461 SDL.visit(*LLVMBB->getTerminator()); 4462 } 4463 4464 // Copy over any CaseBlock records that may now exist due to SwitchInst 4465 // lowering, as well as any jump table information. 4466 SwitchCases.clear(); 4467 SwitchCases = SDL.SwitchCases; 4468 JT = SDL.JT; 4469 4470 // Make sure the root of the DAG is up-to-date. 4471 DAG.setRoot(SDL.getRoot()); 4472} 4473 4474void SelectionDAGISel::CodeGenAndEmitDAG(SelectionDAG &DAG) { 4475 // Get alias analysis for load/store combining. 4476 AliasAnalysis &AA = getAnalysis<AliasAnalysis>(); 4477 4478 // Run the DAG combiner in pre-legalize mode. 4479 DAG.Combine(false, AA); 4480 4481 DOUT << "Lowered selection DAG:\n"; 4482 DEBUG(DAG.dump()); 4483 4484 // Second step, hack on the DAG until it only uses operations and types that 4485 // the target supports. 4486 DAG.Legalize(); 4487 4488 DOUT << "Legalized selection DAG:\n"; 4489 DEBUG(DAG.dump()); 4490 4491 // Run the DAG combiner in post-legalize mode. 4492 DAG.Combine(true, AA); 4493 4494 if (ViewISelDAGs) DAG.viewGraph(); 4495 4496 // Third, instruction select all of the operations to machine code, adding the 4497 // code to the MachineBasicBlock. 4498 InstructionSelectBasicBlock(DAG); 4499 4500 DOUT << "Selected machine code:\n"; 4501 DEBUG(BB->dump()); 4502} 4503 4504void SelectionDAGISel::SelectBasicBlock(BasicBlock *LLVMBB, MachineFunction &MF, 4505 FunctionLoweringInfo &FuncInfo) { 4506 std::vector<std::pair<MachineInstr*, unsigned> > PHINodesToUpdate; 4507 { 4508 SelectionDAG DAG(TLI, MF, getAnalysisToUpdate<MachineModuleInfo>()); 4509 CurDAG = &DAG; 4510 4511 // First step, lower LLVM code to some DAG. This DAG may use operations and 4512 // types that are not supported by the target. 4513 BuildSelectionDAG(DAG, LLVMBB, PHINodesToUpdate, FuncInfo); 4514 4515 // Second step, emit the lowered DAG as machine code. 4516 CodeGenAndEmitDAG(DAG); 4517 } 4518 4519 // Next, now that we know what the last MBB the LLVM BB expanded is, update 4520 // PHI nodes in successors. 4521 if (SwitchCases.empty() && JT.Reg == 0) { 4522 for (unsigned i = 0, e = PHINodesToUpdate.size(); i != e; ++i) { 4523 MachineInstr *PHI = PHINodesToUpdate[i].first; 4524 assert(PHI->getOpcode() == TargetInstrInfo::PHI && 4525 "This is not a machine PHI node that we are updating!"); 4526 PHI->addRegOperand(PHINodesToUpdate[i].second, false); 4527 PHI->addMachineBasicBlockOperand(BB); 4528 } 4529 return; 4530 } 4531 4532 // If the JumpTable record is filled in, then we need to emit a jump table. 4533 // Updating the PHI nodes is tricky in this case, since we need to determine 4534 // whether the PHI is a successor of the range check MBB or the jump table MBB 4535 if (JT.Reg) { 4536 assert(SwitchCases.empty() && "Cannot have jump table and lowered switch"); 4537 SelectionDAG SDAG(TLI, MF, getAnalysisToUpdate<MachineModuleInfo>()); 4538 CurDAG = &SDAG; 4539 SelectionDAGLowering SDL(SDAG, TLI, FuncInfo); 4540 MachineBasicBlock *RangeBB = BB; 4541 // Set the current basic block to the mbb we wish to insert the code into 4542 BB = JT.MBB; 4543 SDL.setCurrentBasicBlock(BB); 4544 // Emit the code 4545 SDL.visitJumpTable(JT); 4546 SDAG.setRoot(SDL.getRoot()); 4547 CodeGenAndEmitDAG(SDAG); 4548 // Update PHI Nodes 4549 for (unsigned pi = 0, pe = PHINodesToUpdate.size(); pi != pe; ++pi) { 4550 MachineInstr *PHI = PHINodesToUpdate[pi].first; 4551 MachineBasicBlock *PHIBB = PHI->getParent(); 4552 assert(PHI->getOpcode() == TargetInstrInfo::PHI && 4553 "This is not a machine PHI node that we are updating!"); 4554 if (PHIBB == JT.Default) { 4555 PHI->addRegOperand(PHINodesToUpdate[pi].second, false); 4556 PHI->addMachineBasicBlockOperand(RangeBB); 4557 } 4558 if (BB->succ_end() != std::find(BB->succ_begin(),BB->succ_end(), PHIBB)) { 4559 PHI->addRegOperand(PHINodesToUpdate[pi].second, false); 4560 PHI->addMachineBasicBlockOperand(BB); 4561 } 4562 } 4563 return; 4564 } 4565 4566 // If the switch block involved a branch to one of the actual successors, we 4567 // need to update PHI nodes in that block. 4568 for (unsigned i = 0, e = PHINodesToUpdate.size(); i != e; ++i) { 4569 MachineInstr *PHI = PHINodesToUpdate[i].first; 4570 assert(PHI->getOpcode() == TargetInstrInfo::PHI && 4571 "This is not a machine PHI node that we are updating!"); 4572 if (BB->isSuccessor(PHI->getParent())) { 4573 PHI->addRegOperand(PHINodesToUpdate[i].second, false); 4574 PHI->addMachineBasicBlockOperand(BB); 4575 } 4576 } 4577 4578 // If we generated any switch lowering information, build and codegen any 4579 // additional DAGs necessary. 4580 for (unsigned i = 0, e = SwitchCases.size(); i != e; ++i) { 4581 SelectionDAG SDAG(TLI, MF, getAnalysisToUpdate<MachineModuleInfo>()); 4582 CurDAG = &SDAG; 4583 SelectionDAGLowering SDL(SDAG, TLI, FuncInfo); 4584 4585 // Set the current basic block to the mbb we wish to insert the code into 4586 BB = SwitchCases[i].ThisBB; 4587 SDL.setCurrentBasicBlock(BB); 4588 4589 // Emit the code 4590 SDL.visitSwitchCase(SwitchCases[i]); 4591 SDAG.setRoot(SDL.getRoot()); 4592 CodeGenAndEmitDAG(SDAG); 4593 4594 // Handle any PHI nodes in successors of this chunk, as if we were coming 4595 // from the original BB before switch expansion. Note that PHI nodes can 4596 // occur multiple times in PHINodesToUpdate. We have to be very careful to 4597 // handle them the right number of times. 4598 while ((BB = SwitchCases[i].TrueBB)) { // Handle LHS and RHS. 4599 for (MachineBasicBlock::iterator Phi = BB->begin(); 4600 Phi != BB->end() && Phi->getOpcode() == TargetInstrInfo::PHI; ++Phi){ 4601 // This value for this PHI node is recorded in PHINodesToUpdate, get it. 4602 for (unsigned pn = 0; ; ++pn) { 4603 assert(pn != PHINodesToUpdate.size() && "Didn't find PHI entry!"); 4604 if (PHINodesToUpdate[pn].first == Phi) { 4605 Phi->addRegOperand(PHINodesToUpdate[pn].second, false); 4606 Phi->addMachineBasicBlockOperand(SwitchCases[i].ThisBB); 4607 break; 4608 } 4609 } 4610 } 4611 4612 // Don't process RHS if same block as LHS. 4613 if (BB == SwitchCases[i].FalseBB) 4614 SwitchCases[i].FalseBB = 0; 4615 4616 // If we haven't handled the RHS, do so now. Otherwise, we're done. 4617 SwitchCases[i].TrueBB = SwitchCases[i].FalseBB; 4618 SwitchCases[i].FalseBB = 0; 4619 } 4620 assert(SwitchCases[i].TrueBB == 0 && SwitchCases[i].FalseBB == 0); 4621 } 4622} 4623 4624 4625//===----------------------------------------------------------------------===// 4626/// ScheduleAndEmitDAG - Pick a safe ordering and emit instructions for each 4627/// target node in the graph. 4628void SelectionDAGISel::ScheduleAndEmitDAG(SelectionDAG &DAG) { 4629 if (ViewSchedDAGs) DAG.viewGraph(); 4630 4631 RegisterScheduler::FunctionPassCtor Ctor = RegisterScheduler::getDefault(); 4632 4633 if (!Ctor) { 4634 Ctor = ISHeuristic; 4635 RegisterScheduler::setDefault(Ctor); 4636 } 4637 4638 ScheduleDAG *SL = Ctor(this, &DAG, BB); 4639 BB = SL->Run(); 4640 delete SL; 4641} 4642 4643 4644HazardRecognizer *SelectionDAGISel::CreateTargetHazardRecognizer() { 4645 return new HazardRecognizer(); 4646} 4647 4648//===----------------------------------------------------------------------===// 4649// Helper functions used by the generated instruction selector. 4650//===----------------------------------------------------------------------===// 4651// Calls to these methods are generated by tblgen. 4652 4653/// CheckAndMask - The isel is trying to match something like (and X, 255). If 4654/// the dag combiner simplified the 255, we still want to match. RHS is the 4655/// actual value in the DAG on the RHS of an AND, and DesiredMaskS is the value 4656/// specified in the .td file (e.g. 255). 4657bool SelectionDAGISel::CheckAndMask(SDOperand LHS, ConstantSDNode *RHS, 4658 int64_t DesiredMaskS) { 4659 uint64_t ActualMask = RHS->getValue(); 4660 uint64_t DesiredMask =DesiredMaskS & MVT::getIntVTBitMask(LHS.getValueType()); 4661 4662 // If the actual mask exactly matches, success! 4663 if (ActualMask == DesiredMask) 4664 return true; 4665 4666 // If the actual AND mask is allowing unallowed bits, this doesn't match. 4667 if (ActualMask & ~DesiredMask) 4668 return false; 4669 4670 // Otherwise, the DAG Combiner may have proven that the value coming in is 4671 // either already zero or is not demanded. Check for known zero input bits. 4672 uint64_t NeededMask = DesiredMask & ~ActualMask; 4673 if (getTargetLowering().MaskedValueIsZero(LHS, NeededMask)) 4674 return true; 4675 4676 // TODO: check to see if missing bits are just not demanded. 4677 4678 // Otherwise, this pattern doesn't match. 4679 return false; 4680} 4681 4682/// CheckOrMask - The isel is trying to match something like (or X, 255). If 4683/// the dag combiner simplified the 255, we still want to match. RHS is the 4684/// actual value in the DAG on the RHS of an OR, and DesiredMaskS is the value 4685/// specified in the .td file (e.g. 255). 4686bool SelectionDAGISel::CheckOrMask(SDOperand LHS, ConstantSDNode *RHS, 4687 int64_t DesiredMaskS) { 4688 uint64_t ActualMask = RHS->getValue(); 4689 uint64_t DesiredMask =DesiredMaskS & MVT::getIntVTBitMask(LHS.getValueType()); 4690 4691 // If the actual mask exactly matches, success! 4692 if (ActualMask == DesiredMask) 4693 return true; 4694 4695 // If the actual AND mask is allowing unallowed bits, this doesn't match. 4696 if (ActualMask & ~DesiredMask) 4697 return false; 4698 4699 // Otherwise, the DAG Combiner may have proven that the value coming in is 4700 // either already zero or is not demanded. Check for known zero input bits. 4701 uint64_t NeededMask = DesiredMask & ~ActualMask; 4702 4703 uint64_t KnownZero, KnownOne; 4704 getTargetLowering().ComputeMaskedBits(LHS, NeededMask, KnownZero, KnownOne); 4705 4706 // If all the missing bits in the or are already known to be set, match! 4707 if ((NeededMask & KnownOne) == NeededMask) 4708 return true; 4709 4710 // TODO: check to see if missing bits are just not demanded. 4711 4712 // Otherwise, this pattern doesn't match. 4713 return false; 4714} 4715 4716 4717/// SelectInlineAsmMemoryOperands - Calls to this are automatically generated 4718/// by tblgen. Others should not call it. 4719void SelectionDAGISel:: 4720SelectInlineAsmMemoryOperands(std::vector<SDOperand> &Ops, SelectionDAG &DAG) { 4721 std::vector<SDOperand> InOps; 4722 std::swap(InOps, Ops); 4723 4724 Ops.push_back(InOps[0]); // input chain. 4725 Ops.push_back(InOps[1]); // input asm string. 4726 4727 unsigned i = 2, e = InOps.size(); 4728 if (InOps[e-1].getValueType() == MVT::Flag) 4729 --e; // Don't process a flag operand if it is here. 4730 4731 while (i != e) { 4732 unsigned Flags = cast<ConstantSDNode>(InOps[i])->getValue(); 4733 if ((Flags & 7) != 4 /*MEM*/) { 4734 // Just skip over this operand, copying the operands verbatim. 4735 Ops.insert(Ops.end(), InOps.begin()+i, InOps.begin()+i+(Flags >> 3) + 1); 4736 i += (Flags >> 3) + 1; 4737 } else { 4738 assert((Flags >> 3) == 1 && "Memory operand with multiple values?"); 4739 // Otherwise, this is a memory operand. Ask the target to select it. 4740 std::vector<SDOperand> SelOps; 4741 if (SelectInlineAsmMemoryOperand(InOps[i+1], 'm', SelOps, DAG)) { 4742 cerr << "Could not match memory address. Inline asm failure!\n"; 4743 exit(1); 4744 } 4745 4746 // Add this to the output node. 4747 Ops.push_back(DAG.getTargetConstant(4/*MEM*/ | (SelOps.size() << 3), 4748 MVT::i32)); 4749 Ops.insert(Ops.end(), SelOps.begin(), SelOps.end()); 4750 i += 2; 4751 } 4752 } 4753 4754 // Add the flag input back if present. 4755 if (e != InOps.size()) 4756 Ops.push_back(InOps.back()); 4757} 4758