SelectionDAGISel.cpp revision 2452595927a19a84ab20f012f1e2414a18f65ffb
1//===-- SelectionDAGISel.cpp - Implement the SelectionDAGISel class -------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file was developed by the LLVM research group and is distributed under 6// the University of Illinois Open Source License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This implements the SelectionDAGISel class. 11// 12//===----------------------------------------------------------------------===// 13 14#define DEBUG_TYPE "isel" 15#include "llvm/Analysis/AliasAnalysis.h" 16#include "llvm/CodeGen/SelectionDAGISel.h" 17#include "llvm/CodeGen/ScheduleDAG.h" 18#include "llvm/CallingConv.h" 19#include "llvm/Constants.h" 20#include "llvm/DerivedTypes.h" 21#include "llvm/Function.h" 22#include "llvm/GlobalVariable.h" 23#include "llvm/InlineAsm.h" 24#include "llvm/Instructions.h" 25#include "llvm/Intrinsics.h" 26#include "llvm/IntrinsicInst.h" 27#include "llvm/CodeGen/IntrinsicLowering.h" 28#include "llvm/CodeGen/MachineDebugInfo.h" 29#include "llvm/CodeGen/MachineFunction.h" 30#include "llvm/CodeGen/MachineFrameInfo.h" 31#include "llvm/CodeGen/MachineJumpTableInfo.h" 32#include "llvm/CodeGen/MachineInstrBuilder.h" 33#include "llvm/CodeGen/SchedulerRegistry.h" 34#include "llvm/CodeGen/SelectionDAG.h" 35#include "llvm/CodeGen/SSARegMap.h" 36#include "llvm/Target/MRegisterInfo.h" 37#include "llvm/Target/TargetData.h" 38#include "llvm/Target/TargetFrameInfo.h" 39#include "llvm/Target/TargetInstrInfo.h" 40#include "llvm/Target/TargetLowering.h" 41#include "llvm/Target/TargetMachine.h" 42#include "llvm/Target/TargetOptions.h" 43#include "llvm/Transforms/Utils/BasicBlockUtils.h" 44#include "llvm/Support/MathExtras.h" 45#include "llvm/Support/Debug.h" 46#include "llvm/Support/Compiler.h" 47#include <map> 48#include <set> 49#include <iostream> 50#include <algorithm> 51using namespace llvm; 52 53#ifndef NDEBUG 54static cl::opt<bool> 55ViewISelDAGs("view-isel-dags", cl::Hidden, 56 cl::desc("Pop up a window to show isel dags as they are selected")); 57static cl::opt<bool> 58ViewSchedDAGs("view-sched-dags", cl::Hidden, 59 cl::desc("Pop up a window to show sched dags as they are processed")); 60#else 61static const bool ViewISelDAGs = 0, ViewSchedDAGs = 0; 62#endif 63 64 65//===---------------------------------------------------------------------===// 66/// 67/// RegisterScheduler class - Track the registration of instruction schedulers. 68/// 69//===---------------------------------------------------------------------===// 70MachinePassRegistry RegisterScheduler::Registry; 71 72//===---------------------------------------------------------------------===// 73/// 74/// ISHeuristic command line option for instruction schedulers. 75/// 76//===---------------------------------------------------------------------===// 77namespace { 78 cl::opt<RegisterScheduler::FunctionPassCtor, false, 79 RegisterPassParser<RegisterScheduler> > 80 ISHeuristic("sched", 81 cl::init(&createDefaultScheduler), 82 cl::desc("Instruction schedulers available:")); 83 84 static RegisterScheduler 85 defaultListDAGScheduler("default", " Best scheduler for the target", 86 createDefaultScheduler); 87} // namespace 88 89namespace { 90 /// RegsForValue - This struct represents the physical registers that a 91 /// particular value is assigned and the type information about the value. 92 /// This is needed because values can be promoted into larger registers and 93 /// expanded into multiple smaller registers than the value. 94 struct VISIBILITY_HIDDEN RegsForValue { 95 /// Regs - This list hold the register (for legal and promoted values) 96 /// or register set (for expanded values) that the value should be assigned 97 /// to. 98 std::vector<unsigned> Regs; 99 100 /// RegVT - The value type of each register. 101 /// 102 MVT::ValueType RegVT; 103 104 /// ValueVT - The value type of the LLVM value, which may be promoted from 105 /// RegVT or made from merging the two expanded parts. 106 MVT::ValueType ValueVT; 107 108 RegsForValue() : RegVT(MVT::Other), ValueVT(MVT::Other) {} 109 110 RegsForValue(unsigned Reg, MVT::ValueType regvt, MVT::ValueType valuevt) 111 : RegVT(regvt), ValueVT(valuevt) { 112 Regs.push_back(Reg); 113 } 114 RegsForValue(const std::vector<unsigned> ®s, 115 MVT::ValueType regvt, MVT::ValueType valuevt) 116 : Regs(regs), RegVT(regvt), ValueVT(valuevt) { 117 } 118 119 /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from 120 /// this value and returns the result as a ValueVT value. This uses 121 /// Chain/Flag as the input and updates them for the output Chain/Flag. 122 SDOperand getCopyFromRegs(SelectionDAG &DAG, 123 SDOperand &Chain, SDOperand &Flag) const; 124 125 /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the 126 /// specified value into the registers specified by this object. This uses 127 /// Chain/Flag as the input and updates them for the output Chain/Flag. 128 void getCopyToRegs(SDOperand Val, SelectionDAG &DAG, 129 SDOperand &Chain, SDOperand &Flag, 130 MVT::ValueType PtrVT) const; 131 132 /// AddInlineAsmOperands - Add this value to the specified inlineasm node 133 /// operand list. This adds the code marker and includes the number of 134 /// values added into it. 135 void AddInlineAsmOperands(unsigned Code, SelectionDAG &DAG, 136 std::vector<SDOperand> &Ops) const; 137 }; 138} 139 140namespace llvm { 141 //===--------------------------------------------------------------------===// 142 /// createDefaultScheduler - This creates an instruction scheduler appropriate 143 /// for the target. 144 ScheduleDAG* createDefaultScheduler(SelectionDAGISel *IS, 145 SelectionDAG *DAG, 146 MachineBasicBlock *BB) { 147 TargetLowering &TLI = IS->getTargetLowering(); 148 149 if (TLI.getSchedulingPreference() == TargetLowering::SchedulingForLatency) { 150 return createTDListDAGScheduler(IS, DAG, BB); 151 } else { 152 assert(TLI.getSchedulingPreference() == 153 TargetLowering::SchedulingForRegPressure && "Unknown sched type!"); 154 return createBURRListDAGScheduler(IS, DAG, BB); 155 } 156 } 157 158 159 //===--------------------------------------------------------------------===// 160 /// FunctionLoweringInfo - This contains information that is global to a 161 /// function that is used when lowering a region of the function. 162 class FunctionLoweringInfo { 163 public: 164 TargetLowering &TLI; 165 Function &Fn; 166 MachineFunction &MF; 167 SSARegMap *RegMap; 168 169 FunctionLoweringInfo(TargetLowering &TLI, Function &Fn,MachineFunction &MF); 170 171 /// MBBMap - A mapping from LLVM basic blocks to their machine code entry. 172 std::map<const BasicBlock*, MachineBasicBlock *> MBBMap; 173 174 /// ValueMap - Since we emit code for the function a basic block at a time, 175 /// we must remember which virtual registers hold the values for 176 /// cross-basic-block values. 177 std::map<const Value*, unsigned> ValueMap; 178 179 /// StaticAllocaMap - Keep track of frame indices for fixed sized allocas in 180 /// the entry block. This allows the allocas to be efficiently referenced 181 /// anywhere in the function. 182 std::map<const AllocaInst*, int> StaticAllocaMap; 183 184 unsigned MakeReg(MVT::ValueType VT) { 185 return RegMap->createVirtualRegister(TLI.getRegClassFor(VT)); 186 } 187 188 unsigned CreateRegForValue(const Value *V); 189 190 unsigned InitializeRegForValue(const Value *V) { 191 unsigned &R = ValueMap[V]; 192 assert(R == 0 && "Already initialized this value register!"); 193 return R = CreateRegForValue(V); 194 } 195 }; 196} 197 198/// isUsedOutsideOfDefiningBlock - Return true if this instruction is used by 199/// PHI nodes or outside of the basic block that defines it, or used by a 200/// switch instruction, which may expand to multiple basic blocks. 201static bool isUsedOutsideOfDefiningBlock(Instruction *I) { 202 if (isa<PHINode>(I)) return true; 203 BasicBlock *BB = I->getParent(); 204 for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E; ++UI) 205 if (cast<Instruction>(*UI)->getParent() != BB || isa<PHINode>(*UI) || 206 isa<SwitchInst>(*UI)) 207 return true; 208 return false; 209} 210 211/// isOnlyUsedInEntryBlock - If the specified argument is only used in the 212/// entry block, return true. This includes arguments used by switches, since 213/// the switch may expand into multiple basic blocks. 214static bool isOnlyUsedInEntryBlock(Argument *A) { 215 BasicBlock *Entry = A->getParent()->begin(); 216 for (Value::use_iterator UI = A->use_begin(), E = A->use_end(); UI != E; ++UI) 217 if (cast<Instruction>(*UI)->getParent() != Entry || isa<SwitchInst>(*UI)) 218 return false; // Use not in entry block. 219 return true; 220} 221 222FunctionLoweringInfo::FunctionLoweringInfo(TargetLowering &tli, 223 Function &fn, MachineFunction &mf) 224 : TLI(tli), Fn(fn), MF(mf), RegMap(MF.getSSARegMap()) { 225 226 // Create a vreg for each argument register that is not dead and is used 227 // outside of the entry block for the function. 228 for (Function::arg_iterator AI = Fn.arg_begin(), E = Fn.arg_end(); 229 AI != E; ++AI) 230 if (!isOnlyUsedInEntryBlock(AI)) 231 InitializeRegForValue(AI); 232 233 // Initialize the mapping of values to registers. This is only set up for 234 // instruction values that are used outside of the block that defines 235 // them. 236 Function::iterator BB = Fn.begin(), EB = Fn.end(); 237 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) 238 if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) 239 if (ConstantInt *CUI = dyn_cast<ConstantInt>(AI->getArraySize())) { 240 const Type *Ty = AI->getAllocatedType(); 241 uint64_t TySize = TLI.getTargetData()->getTypeSize(Ty); 242 unsigned Align = 243 std::max((unsigned)TLI.getTargetData()->getTypeAlignment(Ty), 244 AI->getAlignment()); 245 246 // If the alignment of the value is smaller than the size of the 247 // value, and if the size of the value is particularly small 248 // (<= 8 bytes), round up to the size of the value for potentially 249 // better performance. 250 // 251 // FIXME: This could be made better with a preferred alignment hook in 252 // TargetData. It serves primarily to 8-byte align doubles for X86. 253 if (Align < TySize && TySize <= 8) Align = TySize; 254 TySize *= CUI->getZExtValue(); // Get total allocated size. 255 if (TySize == 0) TySize = 1; // Don't create zero-sized stack objects. 256 StaticAllocaMap[AI] = 257 MF.getFrameInfo()->CreateStackObject((unsigned)TySize, Align); 258 } 259 260 for (; BB != EB; ++BB) 261 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) 262 if (!I->use_empty() && isUsedOutsideOfDefiningBlock(I)) 263 if (!isa<AllocaInst>(I) || 264 !StaticAllocaMap.count(cast<AllocaInst>(I))) 265 InitializeRegForValue(I); 266 267 // Create an initial MachineBasicBlock for each LLVM BasicBlock in F. This 268 // also creates the initial PHI MachineInstrs, though none of the input 269 // operands are populated. 270 for (BB = Fn.begin(), EB = Fn.end(); BB != EB; ++BB) { 271 MachineBasicBlock *MBB = new MachineBasicBlock(BB); 272 MBBMap[BB] = MBB; 273 MF.getBasicBlockList().push_back(MBB); 274 275 // Create Machine PHI nodes for LLVM PHI nodes, lowering them as 276 // appropriate. 277 PHINode *PN; 278 for (BasicBlock::iterator I = BB->begin(); 279 (PN = dyn_cast<PHINode>(I)); ++I) 280 if (!PN->use_empty()) { 281 MVT::ValueType VT = TLI.getValueType(PN->getType()); 282 unsigned NumElements; 283 if (VT != MVT::Vector) 284 NumElements = TLI.getNumElements(VT); 285 else { 286 MVT::ValueType VT1,VT2; 287 NumElements = 288 TLI.getPackedTypeBreakdown(cast<PackedType>(PN->getType()), 289 VT1, VT2); 290 } 291 unsigned PHIReg = ValueMap[PN]; 292 assert(PHIReg &&"PHI node does not have an assigned virtual register!"); 293 for (unsigned i = 0; i != NumElements; ++i) 294 BuildMI(MBB, TargetInstrInfo::PHI, PN->getNumOperands(), PHIReg+i); 295 } 296 } 297} 298 299/// CreateRegForValue - Allocate the appropriate number of virtual registers of 300/// the correctly promoted or expanded types. Assign these registers 301/// consecutive vreg numbers and return the first assigned number. 302unsigned FunctionLoweringInfo::CreateRegForValue(const Value *V) { 303 MVT::ValueType VT = TLI.getValueType(V->getType()); 304 305 // The number of multiples of registers that we need, to, e.g., split up 306 // a <2 x int64> -> 4 x i32 registers. 307 unsigned NumVectorRegs = 1; 308 309 // If this is a packed type, figure out what type it will decompose into 310 // and how many of the elements it will use. 311 if (VT == MVT::Vector) { 312 const PackedType *PTy = cast<PackedType>(V->getType()); 313 unsigned NumElts = PTy->getNumElements(); 314 MVT::ValueType EltTy = TLI.getValueType(PTy->getElementType()); 315 316 // Divide the input until we get to a supported size. This will always 317 // end with a scalar if the target doesn't support vectors. 318 while (NumElts > 1 && !TLI.isTypeLegal(getVectorType(EltTy, NumElts))) { 319 NumElts >>= 1; 320 NumVectorRegs <<= 1; 321 } 322 if (NumElts == 1) 323 VT = EltTy; 324 else 325 VT = getVectorType(EltTy, NumElts); 326 } 327 328 // The common case is that we will only create one register for this 329 // value. If we have that case, create and return the virtual register. 330 unsigned NV = TLI.getNumElements(VT); 331 if (NV == 1) { 332 // If we are promoting this value, pick the next largest supported type. 333 MVT::ValueType PromotedType = TLI.getTypeToTransformTo(VT); 334 unsigned Reg = MakeReg(PromotedType); 335 // If this is a vector of supported or promoted types (e.g. 4 x i16), 336 // create all of the registers. 337 for (unsigned i = 1; i != NumVectorRegs; ++i) 338 MakeReg(PromotedType); 339 return Reg; 340 } 341 342 // If this value is represented with multiple target registers, make sure 343 // to create enough consecutive registers of the right (smaller) type. 344 unsigned NT = VT-1; // Find the type to use. 345 while (TLI.getNumElements((MVT::ValueType)NT) != 1) 346 --NT; 347 348 unsigned R = MakeReg((MVT::ValueType)NT); 349 for (unsigned i = 1; i != NV*NumVectorRegs; ++i) 350 MakeReg((MVT::ValueType)NT); 351 return R; 352} 353 354//===----------------------------------------------------------------------===// 355/// SelectionDAGLowering - This is the common target-independent lowering 356/// implementation that is parameterized by a TargetLowering object. 357/// Also, targets can overload any lowering method. 358/// 359namespace llvm { 360class SelectionDAGLowering { 361 MachineBasicBlock *CurMBB; 362 363 std::map<const Value*, SDOperand> NodeMap; 364 365 /// PendingLoads - Loads are not emitted to the program immediately. We bunch 366 /// them up and then emit token factor nodes when possible. This allows us to 367 /// get simple disambiguation between loads without worrying about alias 368 /// analysis. 369 std::vector<SDOperand> PendingLoads; 370 371 /// Case - A pair of values to record the Value for a switch case, and the 372 /// case's target basic block. 373 typedef std::pair<Constant*, MachineBasicBlock*> Case; 374 typedef std::vector<Case>::iterator CaseItr; 375 typedef std::pair<CaseItr, CaseItr> CaseRange; 376 377 /// CaseRec - A struct with ctor used in lowering switches to a binary tree 378 /// of conditional branches. 379 struct CaseRec { 380 CaseRec(MachineBasicBlock *bb, Constant *lt, Constant *ge, CaseRange r) : 381 CaseBB(bb), LT(lt), GE(ge), Range(r) {} 382 383 /// CaseBB - The MBB in which to emit the compare and branch 384 MachineBasicBlock *CaseBB; 385 /// LT, GE - If nonzero, we know the current case value must be less-than or 386 /// greater-than-or-equal-to these Constants. 387 Constant *LT; 388 Constant *GE; 389 /// Range - A pair of iterators representing the range of case values to be 390 /// processed at this point in the binary search tree. 391 CaseRange Range; 392 }; 393 394 /// The comparison function for sorting Case values. 395 struct CaseCmp { 396 bool operator () (const Case& C1, const Case& C2) { 397 if (const ConstantInt* I1 = dyn_cast<const ConstantInt>(C1.first)) 398 if (I1->getType()->isUnsigned()) 399 return I1->getZExtValue() < 400 cast<const ConstantInt>(C2.first)->getZExtValue(); 401 402 return cast<const ConstantInt>(C1.first)->getSExtValue() < 403 cast<const ConstantInt>(C2.first)->getSExtValue(); 404 } 405 }; 406 407public: 408 // TLI - This is information that describes the available target features we 409 // need for lowering. This indicates when operations are unavailable, 410 // implemented with a libcall, etc. 411 TargetLowering &TLI; 412 SelectionDAG &DAG; 413 const TargetData *TD; 414 415 /// SwitchCases - Vector of CaseBlock structures used to communicate 416 /// SwitchInst code generation information. 417 std::vector<SelectionDAGISel::CaseBlock> SwitchCases; 418 SelectionDAGISel::JumpTable JT; 419 420 /// FuncInfo - Information about the function as a whole. 421 /// 422 FunctionLoweringInfo &FuncInfo; 423 424 SelectionDAGLowering(SelectionDAG &dag, TargetLowering &tli, 425 FunctionLoweringInfo &funcinfo) 426 : TLI(tli), DAG(dag), TD(DAG.getTarget().getTargetData()), 427 JT(0,0,0,0), FuncInfo(funcinfo) { 428 } 429 430 /// getRoot - Return the current virtual root of the Selection DAG. 431 /// 432 SDOperand getRoot() { 433 if (PendingLoads.empty()) 434 return DAG.getRoot(); 435 436 if (PendingLoads.size() == 1) { 437 SDOperand Root = PendingLoads[0]; 438 DAG.setRoot(Root); 439 PendingLoads.clear(); 440 return Root; 441 } 442 443 // Otherwise, we have to make a token factor node. 444 SDOperand Root = DAG.getNode(ISD::TokenFactor, MVT::Other, 445 &PendingLoads[0], PendingLoads.size()); 446 PendingLoads.clear(); 447 DAG.setRoot(Root); 448 return Root; 449 } 450 451 void visit(Instruction &I) { visit(I.getOpcode(), I); } 452 453 void visit(unsigned Opcode, User &I) { 454 switch (Opcode) { 455 default: assert(0 && "Unknown instruction type encountered!"); 456 abort(); 457 // Build the switch statement using the Instruction.def file. 458#define HANDLE_INST(NUM, OPCODE, CLASS) \ 459 case Instruction::OPCODE:return visit##OPCODE((CLASS&)I); 460#include "llvm/Instruction.def" 461 } 462 } 463 464 void setCurrentBasicBlock(MachineBasicBlock *MBB) { CurMBB = MBB; } 465 466 SDOperand getLoadFrom(const Type *Ty, SDOperand Ptr, 467 const Value *SV, SDOperand Root, 468 bool isVolatile); 469 470 SDOperand getIntPtrConstant(uint64_t Val) { 471 return DAG.getConstant(Val, TLI.getPointerTy()); 472 } 473 474 SDOperand getValue(const Value *V); 475 476 const SDOperand &setValue(const Value *V, SDOperand NewN) { 477 SDOperand &N = NodeMap[V]; 478 assert(N.Val == 0 && "Already set a value for this node!"); 479 return N = NewN; 480 } 481 482 RegsForValue GetRegistersForValue(const std::string &ConstrCode, 483 MVT::ValueType VT, 484 bool OutReg, bool InReg, 485 std::set<unsigned> &OutputRegs, 486 std::set<unsigned> &InputRegs); 487 488 // Terminator instructions. 489 void visitRet(ReturnInst &I); 490 void visitBr(BranchInst &I); 491 void visitSwitch(SwitchInst &I); 492 void visitUnreachable(UnreachableInst &I) { /* noop */ } 493 494 // Helper for visitSwitch 495 void visitSwitchCase(SelectionDAGISel::CaseBlock &CB); 496 void visitJumpTable(SelectionDAGISel::JumpTable &JT); 497 498 // These all get lowered before this pass. 499 void visitInvoke(InvokeInst &I) { assert(0 && "TODO"); } 500 void visitUnwind(UnwindInst &I) { assert(0 && "TODO"); } 501 502 void visitBinary(User &I, unsigned IntOp, unsigned FPOp, unsigned VecOp); 503 void visitShift(User &I, unsigned Opcode); 504 void visitAdd(User &I) { 505 visitBinary(I, ISD::ADD, ISD::FADD, ISD::VADD); 506 } 507 void visitSub(User &I); 508 void visitMul(User &I) { 509 visitBinary(I, ISD::MUL, ISD::FMUL, ISD::VMUL); 510 } 511 void visitDiv(User &I) { 512 const Type *Ty = I.getType(); 513 visitBinary(I, 514 Ty->isSigned() ? ISD::SDIV : ISD::UDIV, ISD::FDIV, 515 Ty->isSigned() ? ISD::VSDIV : ISD::VUDIV); 516 } 517 void visitRem(User &I) { 518 const Type *Ty = I.getType(); 519 visitBinary(I, Ty->isSigned() ? ISD::SREM : ISD::UREM, ISD::FREM, 0); 520 } 521 void visitAnd(User &I) { visitBinary(I, ISD::AND, 0, ISD::VAND); } 522 void visitOr (User &I) { visitBinary(I, ISD::OR, 0, ISD::VOR); } 523 void visitXor(User &I) { visitBinary(I, ISD::XOR, 0, ISD::VXOR); } 524 void visitShl(User &I) { visitShift(I, ISD::SHL); } 525 void visitShr(User &I) { 526 visitShift(I, I.getType()->isUnsigned() ? ISD::SRL : ISD::SRA); 527 } 528 529 void visitSetCC(User &I, ISD::CondCode SignedOpc, ISD::CondCode UnsignedOpc, 530 ISD::CondCode FPOpc); 531 void visitSetEQ(User &I) { visitSetCC(I, ISD::SETEQ, ISD::SETEQ, 532 ISD::SETOEQ); } 533 void visitSetNE(User &I) { visitSetCC(I, ISD::SETNE, ISD::SETNE, 534 ISD::SETUNE); } 535 void visitSetLE(User &I) { visitSetCC(I, ISD::SETLE, ISD::SETULE, 536 ISD::SETOLE); } 537 void visitSetGE(User &I) { visitSetCC(I, ISD::SETGE, ISD::SETUGE, 538 ISD::SETOGE); } 539 void visitSetLT(User &I) { visitSetCC(I, ISD::SETLT, ISD::SETULT, 540 ISD::SETOLT); } 541 void visitSetGT(User &I) { visitSetCC(I, ISD::SETGT, ISD::SETUGT, 542 ISD::SETOGT); } 543 544 void visitExtractElement(User &I); 545 void visitInsertElement(User &I); 546 void visitShuffleVector(User &I); 547 548 void visitGetElementPtr(User &I); 549 void visitCast(User &I); 550 void visitSelect(User &I); 551 552 void visitMalloc(MallocInst &I); 553 void visitFree(FreeInst &I); 554 void visitAlloca(AllocaInst &I); 555 void visitLoad(LoadInst &I); 556 void visitStore(StoreInst &I); 557 void visitPHI(PHINode &I) { } // PHI nodes are handled specially. 558 void visitCall(CallInst &I); 559 void visitInlineAsm(CallInst &I); 560 const char *visitIntrinsicCall(CallInst &I, unsigned Intrinsic); 561 void visitTargetIntrinsic(CallInst &I, unsigned Intrinsic); 562 563 void visitVAStart(CallInst &I); 564 void visitVAArg(VAArgInst &I); 565 void visitVAEnd(CallInst &I); 566 void visitVACopy(CallInst &I); 567 void visitFrameReturnAddress(CallInst &I, bool isFrameAddress); 568 569 void visitMemIntrinsic(CallInst &I, unsigned Op); 570 571 void visitUserOp1(Instruction &I) { 572 assert(0 && "UserOp1 should not exist at instruction selection time!"); 573 abort(); 574 } 575 void visitUserOp2(Instruction &I) { 576 assert(0 && "UserOp2 should not exist at instruction selection time!"); 577 abort(); 578 } 579}; 580} // end namespace llvm 581 582SDOperand SelectionDAGLowering::getValue(const Value *V) { 583 SDOperand &N = NodeMap[V]; 584 if (N.Val) return N; 585 586 const Type *VTy = V->getType(); 587 MVT::ValueType VT = TLI.getValueType(VTy); 588 if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(V))) { 589 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) { 590 visit(CE->getOpcode(), *CE); 591 assert(N.Val && "visit didn't populate the ValueMap!"); 592 return N; 593 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(C)) { 594 return N = DAG.getGlobalAddress(GV, VT); 595 } else if (isa<ConstantPointerNull>(C)) { 596 return N = DAG.getConstant(0, TLI.getPointerTy()); 597 } else if (isa<UndefValue>(C)) { 598 if (!isa<PackedType>(VTy)) 599 return N = DAG.getNode(ISD::UNDEF, VT); 600 601 // Create a VBUILD_VECTOR of undef nodes. 602 const PackedType *PTy = cast<PackedType>(VTy); 603 unsigned NumElements = PTy->getNumElements(); 604 MVT::ValueType PVT = TLI.getValueType(PTy->getElementType()); 605 606 SmallVector<SDOperand, 8> Ops; 607 Ops.assign(NumElements, DAG.getNode(ISD::UNDEF, PVT)); 608 609 // Create a VConstant node with generic Vector type. 610 Ops.push_back(DAG.getConstant(NumElements, MVT::i32)); 611 Ops.push_back(DAG.getValueType(PVT)); 612 return N = DAG.getNode(ISD::VBUILD_VECTOR, MVT::Vector, 613 &Ops[0], Ops.size()); 614 } else if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) { 615 return N = DAG.getConstantFP(CFP->getValue(), VT); 616 } else if (const PackedType *PTy = dyn_cast<PackedType>(VTy)) { 617 unsigned NumElements = PTy->getNumElements(); 618 MVT::ValueType PVT = TLI.getValueType(PTy->getElementType()); 619 620 // Now that we know the number and type of the elements, push a 621 // Constant or ConstantFP node onto the ops list for each element of 622 // the packed constant. 623 SmallVector<SDOperand, 8> Ops; 624 if (ConstantPacked *CP = dyn_cast<ConstantPacked>(C)) { 625 for (unsigned i = 0; i != NumElements; ++i) 626 Ops.push_back(getValue(CP->getOperand(i))); 627 } else { 628 assert(isa<ConstantAggregateZero>(C) && "Unknown packed constant!"); 629 SDOperand Op; 630 if (MVT::isFloatingPoint(PVT)) 631 Op = DAG.getConstantFP(0, PVT); 632 else 633 Op = DAG.getConstant(0, PVT); 634 Ops.assign(NumElements, Op); 635 } 636 637 // Create a VBUILD_VECTOR node with generic Vector type. 638 Ops.push_back(DAG.getConstant(NumElements, MVT::i32)); 639 Ops.push_back(DAG.getValueType(PVT)); 640 return N = DAG.getNode(ISD::VBUILD_VECTOR,MVT::Vector,&Ops[0],Ops.size()); 641 } else { 642 // Canonicalize all constant ints to be unsigned. 643 return N = DAG.getConstant(cast<ConstantIntegral>(C)->getZExtValue(),VT); 644 } 645 } 646 647 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) { 648 std::map<const AllocaInst*, int>::iterator SI = 649 FuncInfo.StaticAllocaMap.find(AI); 650 if (SI != FuncInfo.StaticAllocaMap.end()) 651 return DAG.getFrameIndex(SI->second, TLI.getPointerTy()); 652 } 653 654 std::map<const Value*, unsigned>::const_iterator VMI = 655 FuncInfo.ValueMap.find(V); 656 assert(VMI != FuncInfo.ValueMap.end() && "Value not in map!"); 657 658 unsigned InReg = VMI->second; 659 660 // If this type is not legal, make it so now. 661 if (VT != MVT::Vector) { 662 MVT::ValueType DestVT = TLI.getTypeToTransformTo(VT); 663 664 N = DAG.getCopyFromReg(DAG.getEntryNode(), InReg, DestVT); 665 if (DestVT < VT) { 666 // Source must be expanded. This input value is actually coming from the 667 // register pair VMI->second and VMI->second+1. 668 N = DAG.getNode(ISD::BUILD_PAIR, VT, N, 669 DAG.getCopyFromReg(DAG.getEntryNode(), InReg+1, DestVT)); 670 } else if (DestVT > VT) { // Promotion case 671 if (MVT::isFloatingPoint(VT)) 672 N = DAG.getNode(ISD::FP_ROUND, VT, N); 673 else 674 N = DAG.getNode(ISD::TRUNCATE, VT, N); 675 } 676 } else { 677 // Otherwise, if this is a vector, make it available as a generic vector 678 // here. 679 MVT::ValueType PTyElementVT, PTyLegalElementVT; 680 const PackedType *PTy = cast<PackedType>(VTy); 681 unsigned NE = TLI.getPackedTypeBreakdown(PTy, PTyElementVT, 682 PTyLegalElementVT); 683 684 // Build a VBUILD_VECTOR with the input registers. 685 SmallVector<SDOperand, 8> Ops; 686 if (PTyElementVT == PTyLegalElementVT) { 687 // If the value types are legal, just VBUILD the CopyFromReg nodes. 688 for (unsigned i = 0; i != NE; ++i) 689 Ops.push_back(DAG.getCopyFromReg(DAG.getEntryNode(), InReg++, 690 PTyElementVT)); 691 } else if (PTyElementVT < PTyLegalElementVT) { 692 // If the register was promoted, use TRUNCATE of FP_ROUND as appropriate. 693 for (unsigned i = 0; i != NE; ++i) { 694 SDOperand Op = DAG.getCopyFromReg(DAG.getEntryNode(), InReg++, 695 PTyElementVT); 696 if (MVT::isFloatingPoint(PTyElementVT)) 697 Op = DAG.getNode(ISD::FP_ROUND, PTyElementVT, Op); 698 else 699 Op = DAG.getNode(ISD::TRUNCATE, PTyElementVT, Op); 700 Ops.push_back(Op); 701 } 702 } else { 703 // If the register was expanded, use BUILD_PAIR. 704 assert((NE & 1) == 0 && "Must expand into a multiple of 2 elements!"); 705 for (unsigned i = 0; i != NE/2; ++i) { 706 SDOperand Op0 = DAG.getCopyFromReg(DAG.getEntryNode(), InReg++, 707 PTyElementVT); 708 SDOperand Op1 = DAG.getCopyFromReg(DAG.getEntryNode(), InReg++, 709 PTyElementVT); 710 Ops.push_back(DAG.getNode(ISD::BUILD_PAIR, VT, Op0, Op1)); 711 } 712 } 713 714 Ops.push_back(DAG.getConstant(NE, MVT::i32)); 715 Ops.push_back(DAG.getValueType(PTyLegalElementVT)); 716 N = DAG.getNode(ISD::VBUILD_VECTOR, MVT::Vector, &Ops[0], Ops.size()); 717 718 // Finally, use a VBIT_CONVERT to make this available as the appropriate 719 // vector type. 720 N = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, N, 721 DAG.getConstant(PTy->getNumElements(), 722 MVT::i32), 723 DAG.getValueType(TLI.getValueType(PTy->getElementType()))); 724 } 725 726 return N; 727} 728 729 730void SelectionDAGLowering::visitRet(ReturnInst &I) { 731 if (I.getNumOperands() == 0) { 732 DAG.setRoot(DAG.getNode(ISD::RET, MVT::Other, getRoot())); 733 return; 734 } 735 SmallVector<SDOperand, 8> NewValues; 736 NewValues.push_back(getRoot()); 737 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) { 738 SDOperand RetOp = getValue(I.getOperand(i)); 739 bool isSigned = I.getOperand(i)->getType()->isSigned(); 740 741 // If this is an integer return value, we need to promote it ourselves to 742 // the full width of a register, since LegalizeOp will use ANY_EXTEND rather 743 // than sign/zero. 744 // FIXME: C calling convention requires the return type to be promoted to 745 // at least 32-bit. But this is not necessary for non-C calling conventions. 746 if (MVT::isInteger(RetOp.getValueType()) && 747 RetOp.getValueType() < MVT::i64) { 748 MVT::ValueType TmpVT; 749 if (TLI.getTypeAction(MVT::i32) == TargetLowering::Promote) 750 TmpVT = TLI.getTypeToTransformTo(MVT::i32); 751 else 752 TmpVT = MVT::i32; 753 754 if (isSigned) 755 RetOp = DAG.getNode(ISD::SIGN_EXTEND, TmpVT, RetOp); 756 else 757 RetOp = DAG.getNode(ISD::ZERO_EXTEND, TmpVT, RetOp); 758 } 759 NewValues.push_back(RetOp); 760 NewValues.push_back(DAG.getConstant(isSigned, MVT::i32)); 761 } 762 DAG.setRoot(DAG.getNode(ISD::RET, MVT::Other, 763 &NewValues[0], NewValues.size())); 764} 765 766void SelectionDAGLowering::visitBr(BranchInst &I) { 767 // Update machine-CFG edges. 768 MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)]; 769 770 // Figure out which block is immediately after the current one. 771 MachineBasicBlock *NextBlock = 0; 772 MachineFunction::iterator BBI = CurMBB; 773 if (++BBI != CurMBB->getParent()->end()) 774 NextBlock = BBI; 775 776 if (I.isUnconditional()) { 777 // If this is not a fall-through branch, emit the branch. 778 if (Succ0MBB != NextBlock) 779 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, getRoot(), 780 DAG.getBasicBlock(Succ0MBB))); 781 782 // Update machine-CFG edges. 783 CurMBB->addSuccessor(Succ0MBB); 784 785 return; 786 } 787 788 // If this condition is one of the special cases we handle, do special stuff 789 // now. 790 Value *CondVal = I.getCondition(); 791 MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)]; 792 793 // Create a CaseBlock record representing this branch. 794 SelectionDAGISel::CaseBlock CB(ISD::SETEQ, CondVal, 0, 795 Succ0MBB, Succ1MBB, CurMBB); 796 // Use visitSwitchCase to actually insert the fast branch sequence for this 797 // cond branch. 798 visitSwitchCase(CB); 799} 800 801/// visitSwitchCase - Emits the necessary code to represent a single node in 802/// the binary search tree resulting from lowering a switch instruction. 803void SelectionDAGLowering::visitSwitchCase(SelectionDAGISel::CaseBlock &CB) { 804 SDOperand Cond; 805 SDOperand CondLHS = getValue(CB.CmpLHS); 806 807 // If the CaseBlock has both LHS/RHS comparisons, build the setcc now, 808 // otherwise, just use the LHS value as a bool comparison value. 809 if (CB.CmpRHS) 810 Cond = DAG.getSetCC(MVT::i1, CondLHS, getValue(CB.CmpRHS), CB.CC); 811 else 812 Cond = CondLHS; 813 814 // Set NextBlock to be the MBB immediately after the current one, if any. 815 // This is used to avoid emitting unnecessary branches to the next block. 816 MachineBasicBlock *NextBlock = 0; 817 MachineFunction::iterator BBI = CurMBB; 818 if (++BBI != CurMBB->getParent()->end()) 819 NextBlock = BBI; 820 821 // If the lhs block is the next block, invert the condition so that we can 822 // fall through to the lhs instead of the rhs block. 823 if (CB.TrueBB == NextBlock) { 824 std::swap(CB.TrueBB, CB.FalseBB); 825 SDOperand True = DAG.getConstant(1, Cond.getValueType()); 826 Cond = DAG.getNode(ISD::XOR, Cond.getValueType(), Cond, True); 827 } 828 SDOperand BrCond = DAG.getNode(ISD::BRCOND, MVT::Other, getRoot(), Cond, 829 DAG.getBasicBlock(CB.TrueBB)); 830 if (CB.FalseBB == NextBlock) 831 DAG.setRoot(BrCond); 832 else 833 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, BrCond, 834 DAG.getBasicBlock(CB.FalseBB))); 835 // Update successor info 836 CurMBB->addSuccessor(CB.TrueBB); 837 CurMBB->addSuccessor(CB.FalseBB); 838} 839 840void SelectionDAGLowering::visitJumpTable(SelectionDAGISel::JumpTable &JT) { 841 // Emit the code for the jump table 842 MVT::ValueType PTy = TLI.getPointerTy(); 843 assert((PTy == MVT::i32 || PTy == MVT::i64) && 844 "Jump table entries are 32-bit values"); 845 bool isPIC = TLI.getTargetMachine().getRelocationModel() == Reloc::PIC_; 846 // PIC jump table entries are 32-bit values. 847 unsigned EntrySize = isPIC ? 4 : MVT::getSizeInBits(PTy)/8; 848 SDOperand Copy = DAG.getCopyFromReg(getRoot(), JT.Reg, PTy); 849 SDOperand IDX = DAG.getNode(ISD::MUL, PTy, Copy, 850 DAG.getConstant(EntrySize, PTy)); 851 SDOperand TAB = DAG.getJumpTable(JT.JTI,PTy); 852 SDOperand ADD = DAG.getNode(ISD::ADD, PTy, IDX, TAB); 853 SDOperand LD = DAG.getLoad(isPIC ? MVT::i32 : PTy, Copy.getValue(1), ADD, 854 NULL, 0); 855 if (isPIC) { 856 // For Pic, the sequence is: 857 // BRIND(load(Jumptable + index) + RelocBase) 858 // RelocBase is the JumpTable on PPC and X86, GOT on Alpha 859 SDOperand Reloc; 860 if (TLI.usesGlobalOffsetTable()) 861 Reloc = DAG.getNode(ISD::GLOBAL_OFFSET_TABLE, PTy); 862 else 863 Reloc = TAB; 864 ADD = (PTy != MVT::i32) ? DAG.getNode(ISD::SIGN_EXTEND, PTy, LD) : LD; 865 ADD = DAG.getNode(ISD::ADD, PTy, ADD, Reloc); 866 DAG.setRoot(DAG.getNode(ISD::BRIND, MVT::Other, LD.getValue(1), ADD)); 867 } else { 868 DAG.setRoot(DAG.getNode(ISD::BRIND, MVT::Other, LD.getValue(1), LD)); 869 } 870} 871 872void SelectionDAGLowering::visitSwitch(SwitchInst &I) { 873 // Figure out which block is immediately after the current one. 874 MachineBasicBlock *NextBlock = 0; 875 MachineFunction::iterator BBI = CurMBB; 876 877 if (++BBI != CurMBB->getParent()->end()) 878 NextBlock = BBI; 879 880 MachineBasicBlock *Default = FuncInfo.MBBMap[I.getDefaultDest()]; 881 882 // If there is only the default destination, branch to it if it is not the 883 // next basic block. Otherwise, just fall through. 884 if (I.getNumOperands() == 2) { 885 // Update machine-CFG edges. 886 887 // If this is not a fall-through branch, emit the branch. 888 if (Default != NextBlock) 889 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, getRoot(), 890 DAG.getBasicBlock(Default))); 891 892 CurMBB->addSuccessor(Default); 893 return; 894 } 895 896 // If there are any non-default case statements, create a vector of Cases 897 // representing each one, and sort the vector so that we can efficiently 898 // create a binary search tree from them. 899 std::vector<Case> Cases; 900 901 for (unsigned i = 1; i < I.getNumSuccessors(); ++i) { 902 MachineBasicBlock *SMBB = FuncInfo.MBBMap[I.getSuccessor(i)]; 903 Cases.push_back(Case(I.getSuccessorValue(i), SMBB)); 904 } 905 906 std::sort(Cases.begin(), Cases.end(), CaseCmp()); 907 908 // Get the Value to be switched on and default basic blocks, which will be 909 // inserted into CaseBlock records, representing basic blocks in the binary 910 // search tree. 911 Value *SV = I.getOperand(0); 912 913 // Get the MachineFunction which holds the current MBB. This is used during 914 // emission of jump tables, and when inserting any additional MBBs necessary 915 // to represent the switch. 916 MachineFunction *CurMF = CurMBB->getParent(); 917 const BasicBlock *LLVMBB = CurMBB->getBasicBlock(); 918 919 // If the switch has few cases (two or less) emit a series of specific 920 // tests. 921 if (Cases.size() < 3) { 922 // TODO: If any two of the cases has the same destination, and if one value 923 // is the same as the other, but has one bit unset that the other has set, 924 // use bit manipulation to do two compares at once. For example: 925 // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)" 926 927 // Rearrange the case blocks so that the last one falls through if possible. 928 if (NextBlock && Default != NextBlock && Cases.back().second != NextBlock) { 929 // The last case block won't fall through into 'NextBlock' if we emit the 930 // branches in this order. See if rearranging a case value would help. 931 for (unsigned i = 0, e = Cases.size()-1; i != e; ++i) { 932 if (Cases[i].second == NextBlock) { 933 std::swap(Cases[i], Cases.back()); 934 break; 935 } 936 } 937 } 938 939 // Create a CaseBlock record representing a conditional branch to 940 // the Case's target mbb if the value being switched on SV is equal 941 // to C. 942 MachineBasicBlock *CurBlock = CurMBB; 943 for (unsigned i = 0, e = Cases.size(); i != e; ++i) { 944 MachineBasicBlock *FallThrough; 945 if (i != e-1) { 946 FallThrough = new MachineBasicBlock(CurMBB->getBasicBlock()); 947 CurMF->getBasicBlockList().insert(BBI, FallThrough); 948 } else { 949 // If the last case doesn't match, go to the default block. 950 FallThrough = Default; 951 } 952 953 SelectionDAGISel::CaseBlock CB(ISD::SETEQ, SV, Cases[i].first, 954 Cases[i].second, FallThrough, CurBlock); 955 956 // If emitting the first comparison, just call visitSwitchCase to emit the 957 // code into the current block. Otherwise, push the CaseBlock onto the 958 // vector to be later processed by SDISel, and insert the node's MBB 959 // before the next MBB. 960 if (CurBlock == CurMBB) 961 visitSwitchCase(CB); 962 else 963 SwitchCases.push_back(CB); 964 965 CurBlock = FallThrough; 966 } 967 return; 968 } 969 970 // If the switch has more than 5 blocks, and at least 31.25% dense, and the 971 // target supports indirect branches, then emit a jump table rather than 972 // lowering the switch to a binary tree of conditional branches. 973 if (TLI.isOperationLegal(ISD::BRIND, TLI.getPointerTy()) && 974 Cases.size() > 5) { 975 uint64_t First =cast<ConstantIntegral>(Cases.front().first)->getZExtValue(); 976 uint64_t Last = cast<ConstantIntegral>(Cases.back().first)->getZExtValue(); 977 double Density = (double)Cases.size() / (double)((Last - First) + 1ULL); 978 979 if (Density >= 0.3125) { 980 // Create a new basic block to hold the code for loading the address 981 // of the jump table, and jumping to it. Update successor information; 982 // we will either branch to the default case for the switch, or the jump 983 // table. 984 MachineBasicBlock *JumpTableBB = new MachineBasicBlock(LLVMBB); 985 CurMF->getBasicBlockList().insert(BBI, JumpTableBB); 986 CurMBB->addSuccessor(Default); 987 CurMBB->addSuccessor(JumpTableBB); 988 989 // Subtract the lowest switch case value from the value being switched on 990 // and conditional branch to default mbb if the result is greater than the 991 // difference between smallest and largest cases. 992 SDOperand SwitchOp = getValue(SV); 993 MVT::ValueType VT = SwitchOp.getValueType(); 994 SDOperand SUB = DAG.getNode(ISD::SUB, VT, SwitchOp, 995 DAG.getConstant(First, VT)); 996 997 // The SDNode we just created, which holds the value being switched on 998 // minus the the smallest case value, needs to be copied to a virtual 999 // register so it can be used as an index into the jump table in a 1000 // subsequent basic block. This value may be smaller or larger than the 1001 // target's pointer type, and therefore require extension or truncating. 1002 if (VT > TLI.getPointerTy()) 1003 SwitchOp = DAG.getNode(ISD::TRUNCATE, TLI.getPointerTy(), SUB); 1004 else 1005 SwitchOp = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(), SUB); 1006 1007 unsigned JumpTableReg = FuncInfo.MakeReg(TLI.getPointerTy()); 1008 SDOperand CopyTo = DAG.getCopyToReg(getRoot(), JumpTableReg, SwitchOp); 1009 1010 // Emit the range check for the jump table, and branch to the default 1011 // block for the switch statement if the value being switched on exceeds 1012 // the largest case in the switch. 1013 SDOperand CMP = DAG.getSetCC(TLI.getSetCCResultTy(), SUB, 1014 DAG.getConstant(Last-First,VT), ISD::SETUGT); 1015 DAG.setRoot(DAG.getNode(ISD::BRCOND, MVT::Other, CopyTo, CMP, 1016 DAG.getBasicBlock(Default))); 1017 1018 // Build a vector of destination BBs, corresponding to each target 1019 // of the jump table. If the value of the jump table slot corresponds to 1020 // a case statement, push the case's BB onto the vector, otherwise, push 1021 // the default BB. 1022 std::vector<MachineBasicBlock*> DestBBs; 1023 uint64_t TEI = First; 1024 for (CaseItr ii = Cases.begin(), ee = Cases.end(); ii != ee; ++TEI) 1025 if (cast<ConstantIntegral>(ii->first)->getZExtValue() == TEI) { 1026 DestBBs.push_back(ii->second); 1027 ++ii; 1028 } else { 1029 DestBBs.push_back(Default); 1030 } 1031 1032 // Update successor info 1033 for (std::vector<MachineBasicBlock*>::iterator I = DestBBs.begin(), 1034 E = DestBBs.end(); I != E; ++I) 1035 JumpTableBB->addSuccessor(*I); 1036 1037 // Create a jump table index for this jump table, or return an existing 1038 // one. 1039 unsigned JTI = CurMF->getJumpTableInfo()->getJumpTableIndex(DestBBs); 1040 1041 // Set the jump table information so that we can codegen it as a second 1042 // MachineBasicBlock 1043 JT.Reg = JumpTableReg; 1044 JT.JTI = JTI; 1045 JT.MBB = JumpTableBB; 1046 JT.Default = Default; 1047 return; 1048 } 1049 } 1050 1051 // Push the initial CaseRec onto the worklist 1052 std::vector<CaseRec> CaseVec; 1053 CaseVec.push_back(CaseRec(CurMBB,0,0,CaseRange(Cases.begin(),Cases.end()))); 1054 1055 while (!CaseVec.empty()) { 1056 // Grab a record representing a case range to process off the worklist 1057 CaseRec CR = CaseVec.back(); 1058 CaseVec.pop_back(); 1059 1060 // Size is the number of Cases represented by this range. If Size is 1, 1061 // then we are processing a leaf of the binary search tree. Otherwise, 1062 // we need to pick a pivot, and push left and right ranges onto the 1063 // worklist. 1064 unsigned Size = CR.Range.second - CR.Range.first; 1065 1066 if (Size == 1) { 1067 // Create a CaseBlock record representing a conditional branch to 1068 // the Case's target mbb if the value being switched on SV is equal 1069 // to C. Otherwise, branch to default. 1070 Constant *C = CR.Range.first->first; 1071 MachineBasicBlock *Target = CR.Range.first->second; 1072 SelectionDAGISel::CaseBlock CB(ISD::SETEQ, SV, C, Target, Default, 1073 CR.CaseBB); 1074 1075 // If the MBB representing the leaf node is the current MBB, then just 1076 // call visitSwitchCase to emit the code into the current block. 1077 // Otherwise, push the CaseBlock onto the vector to be later processed 1078 // by SDISel, and insert the node's MBB before the next MBB. 1079 if (CR.CaseBB == CurMBB) 1080 visitSwitchCase(CB); 1081 else 1082 SwitchCases.push_back(CB); 1083 } else { 1084 // split case range at pivot 1085 CaseItr Pivot = CR.Range.first + (Size / 2); 1086 CaseRange LHSR(CR.Range.first, Pivot); 1087 CaseRange RHSR(Pivot, CR.Range.second); 1088 Constant *C = Pivot->first; 1089 MachineBasicBlock *FalseBB = 0, *TrueBB = 0; 1090 1091 // We know that we branch to the LHS if the Value being switched on is 1092 // less than the Pivot value, C. We use this to optimize our binary 1093 // tree a bit, by recognizing that if SV is greater than or equal to the 1094 // LHS's Case Value, and that Case Value is exactly one less than the 1095 // Pivot's Value, then we can branch directly to the LHS's Target, 1096 // rather than creating a leaf node for it. 1097 if ((LHSR.second - LHSR.first) == 1 && 1098 LHSR.first->first == CR.GE && 1099 cast<ConstantIntegral>(C)->getZExtValue() == 1100 (cast<ConstantIntegral>(CR.GE)->getZExtValue() + 1ULL)) { 1101 TrueBB = LHSR.first->second; 1102 } else { 1103 TrueBB = new MachineBasicBlock(LLVMBB); 1104 CurMF->getBasicBlockList().insert(BBI, TrueBB); 1105 CaseVec.push_back(CaseRec(TrueBB, C, CR.GE, LHSR)); 1106 } 1107 1108 // Similar to the optimization above, if the Value being switched on is 1109 // known to be less than the Constant CR.LT, and the current Case Value 1110 // is CR.LT - 1, then we can branch directly to the target block for 1111 // the current Case Value, rather than emitting a RHS leaf node for it. 1112 if ((RHSR.second - RHSR.first) == 1 && CR.LT && 1113 cast<ConstantIntegral>(RHSR.first->first)->getZExtValue() == 1114 (cast<ConstantIntegral>(CR.LT)->getZExtValue() - 1ULL)) { 1115 FalseBB = RHSR.first->second; 1116 } else { 1117 FalseBB = new MachineBasicBlock(LLVMBB); 1118 CurMF->getBasicBlockList().insert(BBI, FalseBB); 1119 CaseVec.push_back(CaseRec(FalseBB,CR.LT,C,RHSR)); 1120 } 1121 1122 // Create a CaseBlock record representing a conditional branch to 1123 // the LHS node if the value being switched on SV is less than C. 1124 // Otherwise, branch to LHS. 1125 ISD::CondCode CC = C->getType()->isSigned() ? ISD::SETLT : ISD::SETULT; 1126 SelectionDAGISel::CaseBlock CB(CC, SV, C, TrueBB, FalseBB, CR.CaseBB); 1127 1128 if (CR.CaseBB == CurMBB) 1129 visitSwitchCase(CB); 1130 else 1131 SwitchCases.push_back(CB); 1132 } 1133 } 1134} 1135 1136void SelectionDAGLowering::visitSub(User &I) { 1137 // -0.0 - X --> fneg 1138 if (I.getType()->isFloatingPoint()) { 1139 if (ConstantFP *CFP = dyn_cast<ConstantFP>(I.getOperand(0))) 1140 if (CFP->isExactlyValue(-0.0)) { 1141 SDOperand Op2 = getValue(I.getOperand(1)); 1142 setValue(&I, DAG.getNode(ISD::FNEG, Op2.getValueType(), Op2)); 1143 return; 1144 } 1145 } 1146 visitBinary(I, ISD::SUB, ISD::FSUB, ISD::VSUB); 1147} 1148 1149void SelectionDAGLowering::visitBinary(User &I, unsigned IntOp, unsigned FPOp, 1150 unsigned VecOp) { 1151 const Type *Ty = I.getType(); 1152 SDOperand Op1 = getValue(I.getOperand(0)); 1153 SDOperand Op2 = getValue(I.getOperand(1)); 1154 1155 if (Ty->isIntegral()) { 1156 setValue(&I, DAG.getNode(IntOp, Op1.getValueType(), Op1, Op2)); 1157 } else if (Ty->isFloatingPoint()) { 1158 setValue(&I, DAG.getNode(FPOp, Op1.getValueType(), Op1, Op2)); 1159 } else { 1160 const PackedType *PTy = cast<PackedType>(Ty); 1161 SDOperand Num = DAG.getConstant(PTy->getNumElements(), MVT::i32); 1162 SDOperand Typ = DAG.getValueType(TLI.getValueType(PTy->getElementType())); 1163 setValue(&I, DAG.getNode(VecOp, MVT::Vector, Op1, Op2, Num, Typ)); 1164 } 1165} 1166 1167void SelectionDAGLowering::visitShift(User &I, unsigned Opcode) { 1168 SDOperand Op1 = getValue(I.getOperand(0)); 1169 SDOperand Op2 = getValue(I.getOperand(1)); 1170 1171 Op2 = DAG.getNode(ISD::ANY_EXTEND, TLI.getShiftAmountTy(), Op2); 1172 1173 setValue(&I, DAG.getNode(Opcode, Op1.getValueType(), Op1, Op2)); 1174} 1175 1176void SelectionDAGLowering::visitSetCC(User &I,ISD::CondCode SignedOpcode, 1177 ISD::CondCode UnsignedOpcode, 1178 ISD::CondCode FPOpcode) { 1179 SDOperand Op1 = getValue(I.getOperand(0)); 1180 SDOperand Op2 = getValue(I.getOperand(1)); 1181 ISD::CondCode Opcode = SignedOpcode; 1182 if (!FiniteOnlyFPMath() && I.getOperand(0)->getType()->isFloatingPoint()) 1183 Opcode = FPOpcode; 1184 else if (I.getOperand(0)->getType()->isUnsigned()) 1185 Opcode = UnsignedOpcode; 1186 setValue(&I, DAG.getSetCC(MVT::i1, Op1, Op2, Opcode)); 1187} 1188 1189void SelectionDAGLowering::visitSelect(User &I) { 1190 SDOperand Cond = getValue(I.getOperand(0)); 1191 SDOperand TrueVal = getValue(I.getOperand(1)); 1192 SDOperand FalseVal = getValue(I.getOperand(2)); 1193 if (!isa<PackedType>(I.getType())) { 1194 setValue(&I, DAG.getNode(ISD::SELECT, TrueVal.getValueType(), Cond, 1195 TrueVal, FalseVal)); 1196 } else { 1197 setValue(&I, DAG.getNode(ISD::VSELECT, MVT::Vector, Cond, TrueVal, FalseVal, 1198 *(TrueVal.Val->op_end()-2), 1199 *(TrueVal.Val->op_end()-1))); 1200 } 1201} 1202 1203void SelectionDAGLowering::visitCast(User &I) { 1204 SDOperand N = getValue(I.getOperand(0)); 1205 MVT::ValueType SrcVT = N.getValueType(); 1206 MVT::ValueType DestVT = TLI.getValueType(I.getType()); 1207 1208 if (DestVT == MVT::Vector) { 1209 // This is a cast to a vector from something else. This is always a bit 1210 // convert. Get information about the input vector. 1211 const PackedType *DestTy = cast<PackedType>(I.getType()); 1212 MVT::ValueType EltVT = TLI.getValueType(DestTy->getElementType()); 1213 setValue(&I, DAG.getNode(ISD::VBIT_CONVERT, DestVT, N, 1214 DAG.getConstant(DestTy->getNumElements(),MVT::i32), 1215 DAG.getValueType(EltVT))); 1216 } else if (SrcVT == DestVT) { 1217 setValue(&I, N); // noop cast. 1218 } else if (DestVT == MVT::i1) { 1219 // Cast to bool is a comparison against zero, not truncation to zero. 1220 SDOperand Zero = isInteger(SrcVT) ? DAG.getConstant(0, N.getValueType()) : 1221 DAG.getConstantFP(0.0, N.getValueType()); 1222 setValue(&I, DAG.getSetCC(MVT::i1, N, Zero, ISD::SETNE)); 1223 } else if (isInteger(SrcVT)) { 1224 if (isInteger(DestVT)) { // Int -> Int cast 1225 if (DestVT < SrcVT) // Truncating cast? 1226 setValue(&I, DAG.getNode(ISD::TRUNCATE, DestVT, N)); 1227 else if (I.getOperand(0)->getType()->isSigned()) 1228 setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, DestVT, N)); 1229 else 1230 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, DestVT, N)); 1231 } else if (isFloatingPoint(DestVT)) { // Int -> FP cast 1232 if (I.getOperand(0)->getType()->isSigned()) 1233 setValue(&I, DAG.getNode(ISD::SINT_TO_FP, DestVT, N)); 1234 else 1235 setValue(&I, DAG.getNode(ISD::UINT_TO_FP, DestVT, N)); 1236 } else { 1237 assert(0 && "Unknown cast!"); 1238 } 1239 } else if (isFloatingPoint(SrcVT)) { 1240 if (isFloatingPoint(DestVT)) { // FP -> FP cast 1241 if (DestVT < SrcVT) // Rounding cast? 1242 setValue(&I, DAG.getNode(ISD::FP_ROUND, DestVT, N)); 1243 else 1244 setValue(&I, DAG.getNode(ISD::FP_EXTEND, DestVT, N)); 1245 } else if (isInteger(DestVT)) { // FP -> Int cast. 1246 if (I.getType()->isSigned()) 1247 setValue(&I, DAG.getNode(ISD::FP_TO_SINT, DestVT, N)); 1248 else 1249 setValue(&I, DAG.getNode(ISD::FP_TO_UINT, DestVT, N)); 1250 } else { 1251 assert(0 && "Unknown cast!"); 1252 } 1253 } else { 1254 assert(SrcVT == MVT::Vector && "Unknown cast!"); 1255 assert(DestVT != MVT::Vector && "Casts to vector already handled!"); 1256 // This is a cast from a vector to something else. This is always a bit 1257 // convert. Get information about the input vector. 1258 setValue(&I, DAG.getNode(ISD::VBIT_CONVERT, DestVT, N)); 1259 } 1260} 1261 1262void SelectionDAGLowering::visitInsertElement(User &I) { 1263 SDOperand InVec = getValue(I.getOperand(0)); 1264 SDOperand InVal = getValue(I.getOperand(1)); 1265 SDOperand InIdx = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(), 1266 getValue(I.getOperand(2))); 1267 1268 SDOperand Num = *(InVec.Val->op_end()-2); 1269 SDOperand Typ = *(InVec.Val->op_end()-1); 1270 setValue(&I, DAG.getNode(ISD::VINSERT_VECTOR_ELT, MVT::Vector, 1271 InVec, InVal, InIdx, Num, Typ)); 1272} 1273 1274void SelectionDAGLowering::visitExtractElement(User &I) { 1275 SDOperand InVec = getValue(I.getOperand(0)); 1276 SDOperand InIdx = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(), 1277 getValue(I.getOperand(1))); 1278 SDOperand Typ = *(InVec.Val->op_end()-1); 1279 setValue(&I, DAG.getNode(ISD::VEXTRACT_VECTOR_ELT, 1280 TLI.getValueType(I.getType()), InVec, InIdx)); 1281} 1282 1283void SelectionDAGLowering::visitShuffleVector(User &I) { 1284 SDOperand V1 = getValue(I.getOperand(0)); 1285 SDOperand V2 = getValue(I.getOperand(1)); 1286 SDOperand Mask = getValue(I.getOperand(2)); 1287 1288 SDOperand Num = *(V1.Val->op_end()-2); 1289 SDOperand Typ = *(V2.Val->op_end()-1); 1290 setValue(&I, DAG.getNode(ISD::VVECTOR_SHUFFLE, MVT::Vector, 1291 V1, V2, Mask, Num, Typ)); 1292} 1293 1294 1295void SelectionDAGLowering::visitGetElementPtr(User &I) { 1296 SDOperand N = getValue(I.getOperand(0)); 1297 const Type *Ty = I.getOperand(0)->getType(); 1298 1299 for (GetElementPtrInst::op_iterator OI = I.op_begin()+1, E = I.op_end(); 1300 OI != E; ++OI) { 1301 Value *Idx = *OI; 1302 if (const StructType *StTy = dyn_cast<StructType>(Ty)) { 1303 unsigned Field = cast<ConstantInt>(Idx)->getZExtValue(); 1304 if (Field) { 1305 // N = N + Offset 1306 uint64_t Offset = TD->getStructLayout(StTy)->MemberOffsets[Field]; 1307 N = DAG.getNode(ISD::ADD, N.getValueType(), N, 1308 getIntPtrConstant(Offset)); 1309 } 1310 Ty = StTy->getElementType(Field); 1311 } else { 1312 Ty = cast<SequentialType>(Ty)->getElementType(); 1313 1314 // If this is a constant subscript, handle it quickly. 1315 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) { 1316 if (CI->getZExtValue() == 0) continue; 1317 uint64_t Offs; 1318 if (CI->getType()->isSigned()) 1319 Offs = (int64_t) 1320 TD->getTypeSize(Ty)*cast<ConstantInt>(CI)->getSExtValue(); 1321 else 1322 Offs = 1323 TD->getTypeSize(Ty)*cast<ConstantInt>(CI)->getZExtValue(); 1324 N = DAG.getNode(ISD::ADD, N.getValueType(), N, getIntPtrConstant(Offs)); 1325 continue; 1326 } 1327 1328 // N = N + Idx * ElementSize; 1329 uint64_t ElementSize = TD->getTypeSize(Ty); 1330 SDOperand IdxN = getValue(Idx); 1331 1332 // If the index is smaller or larger than intptr_t, truncate or extend 1333 // it. 1334 if (IdxN.getValueType() < N.getValueType()) { 1335 if (Idx->getType()->isSigned()) 1336 IdxN = DAG.getNode(ISD::SIGN_EXTEND, N.getValueType(), IdxN); 1337 else 1338 IdxN = DAG.getNode(ISD::ZERO_EXTEND, N.getValueType(), IdxN); 1339 } else if (IdxN.getValueType() > N.getValueType()) 1340 IdxN = DAG.getNode(ISD::TRUNCATE, N.getValueType(), IdxN); 1341 1342 // If this is a multiply by a power of two, turn it into a shl 1343 // immediately. This is a very common case. 1344 if (isPowerOf2_64(ElementSize)) { 1345 unsigned Amt = Log2_64(ElementSize); 1346 IdxN = DAG.getNode(ISD::SHL, N.getValueType(), IdxN, 1347 DAG.getConstant(Amt, TLI.getShiftAmountTy())); 1348 N = DAG.getNode(ISD::ADD, N.getValueType(), N, IdxN); 1349 continue; 1350 } 1351 1352 SDOperand Scale = getIntPtrConstant(ElementSize); 1353 IdxN = DAG.getNode(ISD::MUL, N.getValueType(), IdxN, Scale); 1354 N = DAG.getNode(ISD::ADD, N.getValueType(), N, IdxN); 1355 } 1356 } 1357 setValue(&I, N); 1358} 1359 1360void SelectionDAGLowering::visitAlloca(AllocaInst &I) { 1361 // If this is a fixed sized alloca in the entry block of the function, 1362 // allocate it statically on the stack. 1363 if (FuncInfo.StaticAllocaMap.count(&I)) 1364 return; // getValue will auto-populate this. 1365 1366 const Type *Ty = I.getAllocatedType(); 1367 uint64_t TySize = TLI.getTargetData()->getTypeSize(Ty); 1368 unsigned Align = std::max((unsigned)TLI.getTargetData()->getTypeAlignment(Ty), 1369 I.getAlignment()); 1370 1371 SDOperand AllocSize = getValue(I.getArraySize()); 1372 MVT::ValueType IntPtr = TLI.getPointerTy(); 1373 if (IntPtr < AllocSize.getValueType()) 1374 AllocSize = DAG.getNode(ISD::TRUNCATE, IntPtr, AllocSize); 1375 else if (IntPtr > AllocSize.getValueType()) 1376 AllocSize = DAG.getNode(ISD::ZERO_EXTEND, IntPtr, AllocSize); 1377 1378 AllocSize = DAG.getNode(ISD::MUL, IntPtr, AllocSize, 1379 getIntPtrConstant(TySize)); 1380 1381 // Handle alignment. If the requested alignment is less than or equal to the 1382 // stack alignment, ignore it and round the size of the allocation up to the 1383 // stack alignment size. If the size is greater than the stack alignment, we 1384 // note this in the DYNAMIC_STACKALLOC node. 1385 unsigned StackAlign = 1386 TLI.getTargetMachine().getFrameInfo()->getStackAlignment(); 1387 if (Align <= StackAlign) { 1388 Align = 0; 1389 // Add SA-1 to the size. 1390 AllocSize = DAG.getNode(ISD::ADD, AllocSize.getValueType(), AllocSize, 1391 getIntPtrConstant(StackAlign-1)); 1392 // Mask out the low bits for alignment purposes. 1393 AllocSize = DAG.getNode(ISD::AND, AllocSize.getValueType(), AllocSize, 1394 getIntPtrConstant(~(uint64_t)(StackAlign-1))); 1395 } 1396 1397 SDOperand Ops[] = { getRoot(), AllocSize, getIntPtrConstant(Align) }; 1398 const MVT::ValueType *VTs = DAG.getNodeValueTypes(AllocSize.getValueType(), 1399 MVT::Other); 1400 SDOperand DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, VTs, 2, Ops, 3); 1401 DAG.setRoot(setValue(&I, DSA).getValue(1)); 1402 1403 // Inform the Frame Information that we have just allocated a variable-sized 1404 // object. 1405 CurMBB->getParent()->getFrameInfo()->CreateVariableSizedObject(); 1406} 1407 1408void SelectionDAGLowering::visitLoad(LoadInst &I) { 1409 SDOperand Ptr = getValue(I.getOperand(0)); 1410 1411 SDOperand Root; 1412 if (I.isVolatile()) 1413 Root = getRoot(); 1414 else { 1415 // Do not serialize non-volatile loads against each other. 1416 Root = DAG.getRoot(); 1417 } 1418 1419 setValue(&I, getLoadFrom(I.getType(), Ptr, I.getOperand(0), 1420 Root, I.isVolatile())); 1421} 1422 1423SDOperand SelectionDAGLowering::getLoadFrom(const Type *Ty, SDOperand Ptr, 1424 const Value *SV, SDOperand Root, 1425 bool isVolatile) { 1426 SDOperand L; 1427 if (const PackedType *PTy = dyn_cast<PackedType>(Ty)) { 1428 MVT::ValueType PVT = TLI.getValueType(PTy->getElementType()); 1429 L = DAG.getVecLoad(PTy->getNumElements(), PVT, Root, Ptr, 1430 DAG.getSrcValue(SV)); 1431 } else { 1432 L = DAG.getLoad(TLI.getValueType(Ty), Root, Ptr, SV, isVolatile); 1433 } 1434 1435 if (isVolatile) 1436 DAG.setRoot(L.getValue(1)); 1437 else 1438 PendingLoads.push_back(L.getValue(1)); 1439 1440 return L; 1441} 1442 1443 1444void SelectionDAGLowering::visitStore(StoreInst &I) { 1445 Value *SrcV = I.getOperand(0); 1446 SDOperand Src = getValue(SrcV); 1447 SDOperand Ptr = getValue(I.getOperand(1)); 1448 DAG.setRoot(DAG.getStore(getRoot(), Src, Ptr, I.getOperand(1), 1449 I.isVolatile())); 1450} 1451 1452/// IntrinsicCannotAccessMemory - Return true if the specified intrinsic cannot 1453/// access memory and has no other side effects at all. 1454static bool IntrinsicCannotAccessMemory(unsigned IntrinsicID) { 1455#define GET_NO_MEMORY_INTRINSICS 1456#include "llvm/Intrinsics.gen" 1457#undef GET_NO_MEMORY_INTRINSICS 1458 return false; 1459} 1460 1461// IntrinsicOnlyReadsMemory - Return true if the specified intrinsic doesn't 1462// have any side-effects or if it only reads memory. 1463static bool IntrinsicOnlyReadsMemory(unsigned IntrinsicID) { 1464#define GET_SIDE_EFFECT_INFO 1465#include "llvm/Intrinsics.gen" 1466#undef GET_SIDE_EFFECT_INFO 1467 return false; 1468} 1469 1470/// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC 1471/// node. 1472void SelectionDAGLowering::visitTargetIntrinsic(CallInst &I, 1473 unsigned Intrinsic) { 1474 bool HasChain = !IntrinsicCannotAccessMemory(Intrinsic); 1475 bool OnlyLoad = HasChain && IntrinsicOnlyReadsMemory(Intrinsic); 1476 1477 // Build the operand list. 1478 SmallVector<SDOperand, 8> Ops; 1479 if (HasChain) { // If this intrinsic has side-effects, chainify it. 1480 if (OnlyLoad) { 1481 // We don't need to serialize loads against other loads. 1482 Ops.push_back(DAG.getRoot()); 1483 } else { 1484 Ops.push_back(getRoot()); 1485 } 1486 } 1487 1488 // Add the intrinsic ID as an integer operand. 1489 Ops.push_back(DAG.getConstant(Intrinsic, TLI.getPointerTy())); 1490 1491 // Add all operands of the call to the operand list. 1492 for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) { 1493 SDOperand Op = getValue(I.getOperand(i)); 1494 1495 // If this is a vector type, force it to the right packed type. 1496 if (Op.getValueType() == MVT::Vector) { 1497 const PackedType *OpTy = cast<PackedType>(I.getOperand(i)->getType()); 1498 MVT::ValueType EltVT = TLI.getValueType(OpTy->getElementType()); 1499 1500 MVT::ValueType VVT = MVT::getVectorType(EltVT, OpTy->getNumElements()); 1501 assert(VVT != MVT::Other && "Intrinsic uses a non-legal type?"); 1502 Op = DAG.getNode(ISD::VBIT_CONVERT, VVT, Op); 1503 } 1504 1505 assert(TLI.isTypeLegal(Op.getValueType()) && 1506 "Intrinsic uses a non-legal type?"); 1507 Ops.push_back(Op); 1508 } 1509 1510 std::vector<MVT::ValueType> VTs; 1511 if (I.getType() != Type::VoidTy) { 1512 MVT::ValueType VT = TLI.getValueType(I.getType()); 1513 if (VT == MVT::Vector) { 1514 const PackedType *DestTy = cast<PackedType>(I.getType()); 1515 MVT::ValueType EltVT = TLI.getValueType(DestTy->getElementType()); 1516 1517 VT = MVT::getVectorType(EltVT, DestTy->getNumElements()); 1518 assert(VT != MVT::Other && "Intrinsic uses a non-legal type?"); 1519 } 1520 1521 assert(TLI.isTypeLegal(VT) && "Intrinsic uses a non-legal type?"); 1522 VTs.push_back(VT); 1523 } 1524 if (HasChain) 1525 VTs.push_back(MVT::Other); 1526 1527 const MVT::ValueType *VTList = DAG.getNodeValueTypes(VTs); 1528 1529 // Create the node. 1530 SDOperand Result; 1531 if (!HasChain) 1532 Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, VTList, VTs.size(), 1533 &Ops[0], Ops.size()); 1534 else if (I.getType() != Type::VoidTy) 1535 Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, VTList, VTs.size(), 1536 &Ops[0], Ops.size()); 1537 else 1538 Result = DAG.getNode(ISD::INTRINSIC_VOID, VTList, VTs.size(), 1539 &Ops[0], Ops.size()); 1540 1541 if (HasChain) { 1542 SDOperand Chain = Result.getValue(Result.Val->getNumValues()-1); 1543 if (OnlyLoad) 1544 PendingLoads.push_back(Chain); 1545 else 1546 DAG.setRoot(Chain); 1547 } 1548 if (I.getType() != Type::VoidTy) { 1549 if (const PackedType *PTy = dyn_cast<PackedType>(I.getType())) { 1550 MVT::ValueType EVT = TLI.getValueType(PTy->getElementType()); 1551 Result = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, Result, 1552 DAG.getConstant(PTy->getNumElements(), MVT::i32), 1553 DAG.getValueType(EVT)); 1554 } 1555 setValue(&I, Result); 1556 } 1557} 1558 1559/// visitIntrinsicCall - Lower the call to the specified intrinsic function. If 1560/// we want to emit this as a call to a named external function, return the name 1561/// otherwise lower it and return null. 1562const char * 1563SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) { 1564 switch (Intrinsic) { 1565 default: 1566 // By default, turn this into a target intrinsic node. 1567 visitTargetIntrinsic(I, Intrinsic); 1568 return 0; 1569 case Intrinsic::vastart: visitVAStart(I); return 0; 1570 case Intrinsic::vaend: visitVAEnd(I); return 0; 1571 case Intrinsic::vacopy: visitVACopy(I); return 0; 1572 case Intrinsic::returnaddress: visitFrameReturnAddress(I, false); return 0; 1573 case Intrinsic::frameaddress: visitFrameReturnAddress(I, true); return 0; 1574 case Intrinsic::setjmp: 1575 return "_setjmp"+!TLI.usesUnderscoreSetJmpLongJmp(); 1576 break; 1577 case Intrinsic::longjmp: 1578 return "_longjmp"+!TLI.usesUnderscoreSetJmpLongJmp(); 1579 break; 1580 case Intrinsic::memcpy_i32: 1581 case Intrinsic::memcpy_i64: 1582 visitMemIntrinsic(I, ISD::MEMCPY); 1583 return 0; 1584 case Intrinsic::memset_i32: 1585 case Intrinsic::memset_i64: 1586 visitMemIntrinsic(I, ISD::MEMSET); 1587 return 0; 1588 case Intrinsic::memmove_i32: 1589 case Intrinsic::memmove_i64: 1590 visitMemIntrinsic(I, ISD::MEMMOVE); 1591 return 0; 1592 1593 case Intrinsic::dbg_stoppoint: { 1594 MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo(); 1595 DbgStopPointInst &SPI = cast<DbgStopPointInst>(I); 1596 if (DebugInfo && SPI.getContext() && DebugInfo->Verify(SPI.getContext())) { 1597 SDOperand Ops[5]; 1598 1599 Ops[0] = getRoot(); 1600 Ops[1] = getValue(SPI.getLineValue()); 1601 Ops[2] = getValue(SPI.getColumnValue()); 1602 1603 DebugInfoDesc *DD = DebugInfo->getDescFor(SPI.getContext()); 1604 assert(DD && "Not a debug information descriptor"); 1605 CompileUnitDesc *CompileUnit = cast<CompileUnitDesc>(DD); 1606 1607 Ops[3] = DAG.getString(CompileUnit->getFileName()); 1608 Ops[4] = DAG.getString(CompileUnit->getDirectory()); 1609 1610 DAG.setRoot(DAG.getNode(ISD::LOCATION, MVT::Other, Ops, 5)); 1611 } 1612 1613 return 0; 1614 } 1615 case Intrinsic::dbg_region_start: { 1616 MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo(); 1617 DbgRegionStartInst &RSI = cast<DbgRegionStartInst>(I); 1618 if (DebugInfo && RSI.getContext() && DebugInfo->Verify(RSI.getContext())) { 1619 unsigned LabelID = DebugInfo->RecordRegionStart(RSI.getContext()); 1620 DAG.setRoot(DAG.getNode(ISD::DEBUG_LABEL, MVT::Other, getRoot(), 1621 DAG.getConstant(LabelID, MVT::i32))); 1622 } 1623 1624 return 0; 1625 } 1626 case Intrinsic::dbg_region_end: { 1627 MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo(); 1628 DbgRegionEndInst &REI = cast<DbgRegionEndInst>(I); 1629 if (DebugInfo && REI.getContext() && DebugInfo->Verify(REI.getContext())) { 1630 unsigned LabelID = DebugInfo->RecordRegionEnd(REI.getContext()); 1631 DAG.setRoot(DAG.getNode(ISD::DEBUG_LABEL, MVT::Other, 1632 getRoot(), DAG.getConstant(LabelID, MVT::i32))); 1633 } 1634 1635 return 0; 1636 } 1637 case Intrinsic::dbg_func_start: { 1638 MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo(); 1639 DbgFuncStartInst &FSI = cast<DbgFuncStartInst>(I); 1640 if (DebugInfo && FSI.getSubprogram() && 1641 DebugInfo->Verify(FSI.getSubprogram())) { 1642 unsigned LabelID = DebugInfo->RecordRegionStart(FSI.getSubprogram()); 1643 DAG.setRoot(DAG.getNode(ISD::DEBUG_LABEL, MVT::Other, 1644 getRoot(), DAG.getConstant(LabelID, MVT::i32))); 1645 } 1646 1647 return 0; 1648 } 1649 case Intrinsic::dbg_declare: { 1650 MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo(); 1651 DbgDeclareInst &DI = cast<DbgDeclareInst>(I); 1652 if (DebugInfo && DI.getVariable() && DebugInfo->Verify(DI.getVariable())) { 1653 SDOperand AddressOp = getValue(DI.getAddress()); 1654 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(AddressOp)) 1655 DebugInfo->RecordVariable(DI.getVariable(), FI->getIndex()); 1656 } 1657 1658 return 0; 1659 } 1660 1661 case Intrinsic::isunordered_f32: 1662 case Intrinsic::isunordered_f64: 1663 setValue(&I, DAG.getSetCC(MVT::i1,getValue(I.getOperand(1)), 1664 getValue(I.getOperand(2)), ISD::SETUO)); 1665 return 0; 1666 1667 case Intrinsic::sqrt_f32: 1668 case Intrinsic::sqrt_f64: 1669 setValue(&I, DAG.getNode(ISD::FSQRT, 1670 getValue(I.getOperand(1)).getValueType(), 1671 getValue(I.getOperand(1)))); 1672 return 0; 1673 case Intrinsic::powi_f32: 1674 case Intrinsic::powi_f64: 1675 setValue(&I, DAG.getNode(ISD::FPOWI, 1676 getValue(I.getOperand(1)).getValueType(), 1677 getValue(I.getOperand(1)), 1678 getValue(I.getOperand(2)))); 1679 return 0; 1680 case Intrinsic::pcmarker: { 1681 SDOperand Tmp = getValue(I.getOperand(1)); 1682 DAG.setRoot(DAG.getNode(ISD::PCMARKER, MVT::Other, getRoot(), Tmp)); 1683 return 0; 1684 } 1685 case Intrinsic::readcyclecounter: { 1686 SDOperand Op = getRoot(); 1687 SDOperand Tmp = DAG.getNode(ISD::READCYCLECOUNTER, 1688 DAG.getNodeValueTypes(MVT::i64, MVT::Other), 2, 1689 &Op, 1); 1690 setValue(&I, Tmp); 1691 DAG.setRoot(Tmp.getValue(1)); 1692 return 0; 1693 } 1694 case Intrinsic::bswap_i16: 1695 case Intrinsic::bswap_i32: 1696 case Intrinsic::bswap_i64: 1697 setValue(&I, DAG.getNode(ISD::BSWAP, 1698 getValue(I.getOperand(1)).getValueType(), 1699 getValue(I.getOperand(1)))); 1700 return 0; 1701 case Intrinsic::cttz_i8: 1702 case Intrinsic::cttz_i16: 1703 case Intrinsic::cttz_i32: 1704 case Intrinsic::cttz_i64: 1705 setValue(&I, DAG.getNode(ISD::CTTZ, 1706 getValue(I.getOperand(1)).getValueType(), 1707 getValue(I.getOperand(1)))); 1708 return 0; 1709 case Intrinsic::ctlz_i8: 1710 case Intrinsic::ctlz_i16: 1711 case Intrinsic::ctlz_i32: 1712 case Intrinsic::ctlz_i64: 1713 setValue(&I, DAG.getNode(ISD::CTLZ, 1714 getValue(I.getOperand(1)).getValueType(), 1715 getValue(I.getOperand(1)))); 1716 return 0; 1717 case Intrinsic::ctpop_i8: 1718 case Intrinsic::ctpop_i16: 1719 case Intrinsic::ctpop_i32: 1720 case Intrinsic::ctpop_i64: 1721 setValue(&I, DAG.getNode(ISD::CTPOP, 1722 getValue(I.getOperand(1)).getValueType(), 1723 getValue(I.getOperand(1)))); 1724 return 0; 1725 case Intrinsic::stacksave: { 1726 SDOperand Op = getRoot(); 1727 SDOperand Tmp = DAG.getNode(ISD::STACKSAVE, 1728 DAG.getNodeValueTypes(TLI.getPointerTy(), MVT::Other), 2, &Op, 1); 1729 setValue(&I, Tmp); 1730 DAG.setRoot(Tmp.getValue(1)); 1731 return 0; 1732 } 1733 case Intrinsic::stackrestore: { 1734 SDOperand Tmp = getValue(I.getOperand(1)); 1735 DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, MVT::Other, getRoot(), Tmp)); 1736 return 0; 1737 } 1738 case Intrinsic::prefetch: 1739 // FIXME: Currently discarding prefetches. 1740 return 0; 1741 } 1742} 1743 1744 1745void SelectionDAGLowering::visitCall(CallInst &I) { 1746 const char *RenameFn = 0; 1747 if (Function *F = I.getCalledFunction()) { 1748 if (F->isExternal()) 1749 if (unsigned IID = F->getIntrinsicID()) { 1750 RenameFn = visitIntrinsicCall(I, IID); 1751 if (!RenameFn) 1752 return; 1753 } else { // Not an LLVM intrinsic. 1754 const std::string &Name = F->getName(); 1755 if (Name[0] == 'c' && (Name == "copysign" || Name == "copysignf")) { 1756 if (I.getNumOperands() == 3 && // Basic sanity checks. 1757 I.getOperand(1)->getType()->isFloatingPoint() && 1758 I.getType() == I.getOperand(1)->getType() && 1759 I.getType() == I.getOperand(2)->getType()) { 1760 SDOperand LHS = getValue(I.getOperand(1)); 1761 SDOperand RHS = getValue(I.getOperand(2)); 1762 setValue(&I, DAG.getNode(ISD::FCOPYSIGN, LHS.getValueType(), 1763 LHS, RHS)); 1764 return; 1765 } 1766 } else if (Name[0] == 'f' && (Name == "fabs" || Name == "fabsf")) { 1767 if (I.getNumOperands() == 2 && // Basic sanity checks. 1768 I.getOperand(1)->getType()->isFloatingPoint() && 1769 I.getType() == I.getOperand(1)->getType()) { 1770 SDOperand Tmp = getValue(I.getOperand(1)); 1771 setValue(&I, DAG.getNode(ISD::FABS, Tmp.getValueType(), Tmp)); 1772 return; 1773 } 1774 } else if (Name[0] == 's' && (Name == "sin" || Name == "sinf")) { 1775 if (I.getNumOperands() == 2 && // Basic sanity checks. 1776 I.getOperand(1)->getType()->isFloatingPoint() && 1777 I.getType() == I.getOperand(1)->getType()) { 1778 SDOperand Tmp = getValue(I.getOperand(1)); 1779 setValue(&I, DAG.getNode(ISD::FSIN, Tmp.getValueType(), Tmp)); 1780 return; 1781 } 1782 } else if (Name[0] == 'c' && (Name == "cos" || Name == "cosf")) { 1783 if (I.getNumOperands() == 2 && // Basic sanity checks. 1784 I.getOperand(1)->getType()->isFloatingPoint() && 1785 I.getType() == I.getOperand(1)->getType()) { 1786 SDOperand Tmp = getValue(I.getOperand(1)); 1787 setValue(&I, DAG.getNode(ISD::FCOS, Tmp.getValueType(), Tmp)); 1788 return; 1789 } 1790 } 1791 } 1792 } else if (isa<InlineAsm>(I.getOperand(0))) { 1793 visitInlineAsm(I); 1794 return; 1795 } 1796 1797 SDOperand Callee; 1798 if (!RenameFn) 1799 Callee = getValue(I.getOperand(0)); 1800 else 1801 Callee = DAG.getExternalSymbol(RenameFn, TLI.getPointerTy()); 1802 std::vector<std::pair<SDOperand, const Type*> > Args; 1803 Args.reserve(I.getNumOperands()); 1804 for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) { 1805 Value *Arg = I.getOperand(i); 1806 SDOperand ArgNode = getValue(Arg); 1807 Args.push_back(std::make_pair(ArgNode, Arg->getType())); 1808 } 1809 1810 const PointerType *PT = cast<PointerType>(I.getCalledValue()->getType()); 1811 const FunctionType *FTy = cast<FunctionType>(PT->getElementType()); 1812 1813 std::pair<SDOperand,SDOperand> Result = 1814 TLI.LowerCallTo(getRoot(), I.getType(), FTy->isVarArg(), I.getCallingConv(), 1815 I.isTailCall(), Callee, Args, DAG); 1816 if (I.getType() != Type::VoidTy) 1817 setValue(&I, Result.first); 1818 DAG.setRoot(Result.second); 1819} 1820 1821SDOperand RegsForValue::getCopyFromRegs(SelectionDAG &DAG, 1822 SDOperand &Chain, SDOperand &Flag)const{ 1823 SDOperand Val = DAG.getCopyFromReg(Chain, Regs[0], RegVT, Flag); 1824 Chain = Val.getValue(1); 1825 Flag = Val.getValue(2); 1826 1827 // If the result was expanded, copy from the top part. 1828 if (Regs.size() > 1) { 1829 assert(Regs.size() == 2 && 1830 "Cannot expand to more than 2 elts yet!"); 1831 SDOperand Hi = DAG.getCopyFromReg(Chain, Regs[1], RegVT, Flag); 1832 Chain = Hi.getValue(1); 1833 Flag = Hi.getValue(2); 1834 if (DAG.getTargetLoweringInfo().isLittleEndian()) 1835 return DAG.getNode(ISD::BUILD_PAIR, ValueVT, Val, Hi); 1836 else 1837 return DAG.getNode(ISD::BUILD_PAIR, ValueVT, Hi, Val); 1838 } 1839 1840 // Otherwise, if the return value was promoted or extended, truncate it to the 1841 // appropriate type. 1842 if (RegVT == ValueVT) 1843 return Val; 1844 1845 if (MVT::isInteger(RegVT)) { 1846 if (ValueVT < RegVT) 1847 return DAG.getNode(ISD::TRUNCATE, ValueVT, Val); 1848 else 1849 return DAG.getNode(ISD::ANY_EXTEND, ValueVT, Val); 1850 } else { 1851 return DAG.getNode(ISD::FP_ROUND, ValueVT, Val); 1852 } 1853} 1854 1855/// getCopyToRegs - Emit a series of CopyToReg nodes that copies the 1856/// specified value into the registers specified by this object. This uses 1857/// Chain/Flag as the input and updates them for the output Chain/Flag. 1858void RegsForValue::getCopyToRegs(SDOperand Val, SelectionDAG &DAG, 1859 SDOperand &Chain, SDOperand &Flag, 1860 MVT::ValueType PtrVT) const { 1861 if (Regs.size() == 1) { 1862 // If there is a single register and the types differ, this must be 1863 // a promotion. 1864 if (RegVT != ValueVT) { 1865 if (MVT::isInteger(RegVT)) { 1866 if (RegVT < ValueVT) 1867 Val = DAG.getNode(ISD::TRUNCATE, RegVT, Val); 1868 else 1869 Val = DAG.getNode(ISD::ANY_EXTEND, RegVT, Val); 1870 } else 1871 Val = DAG.getNode(ISD::FP_EXTEND, RegVT, Val); 1872 } 1873 Chain = DAG.getCopyToReg(Chain, Regs[0], Val, Flag); 1874 Flag = Chain.getValue(1); 1875 } else { 1876 std::vector<unsigned> R(Regs); 1877 if (!DAG.getTargetLoweringInfo().isLittleEndian()) 1878 std::reverse(R.begin(), R.end()); 1879 1880 for (unsigned i = 0, e = R.size(); i != e; ++i) { 1881 SDOperand Part = DAG.getNode(ISD::EXTRACT_ELEMENT, RegVT, Val, 1882 DAG.getConstant(i, PtrVT)); 1883 Chain = DAG.getCopyToReg(Chain, R[i], Part, Flag); 1884 Flag = Chain.getValue(1); 1885 } 1886 } 1887} 1888 1889/// AddInlineAsmOperands - Add this value to the specified inlineasm node 1890/// operand list. This adds the code marker and includes the number of 1891/// values added into it. 1892void RegsForValue::AddInlineAsmOperands(unsigned Code, SelectionDAG &DAG, 1893 std::vector<SDOperand> &Ops) const { 1894 Ops.push_back(DAG.getConstant(Code | (Regs.size() << 3), MVT::i32)); 1895 for (unsigned i = 0, e = Regs.size(); i != e; ++i) 1896 Ops.push_back(DAG.getRegister(Regs[i], RegVT)); 1897} 1898 1899/// isAllocatableRegister - If the specified register is safe to allocate, 1900/// i.e. it isn't a stack pointer or some other special register, return the 1901/// register class for the register. Otherwise, return null. 1902static const TargetRegisterClass * 1903isAllocatableRegister(unsigned Reg, MachineFunction &MF, 1904 const TargetLowering &TLI, const MRegisterInfo *MRI) { 1905 MVT::ValueType FoundVT = MVT::Other; 1906 const TargetRegisterClass *FoundRC = 0; 1907 for (MRegisterInfo::regclass_iterator RCI = MRI->regclass_begin(), 1908 E = MRI->regclass_end(); RCI != E; ++RCI) { 1909 MVT::ValueType ThisVT = MVT::Other; 1910 1911 const TargetRegisterClass *RC = *RCI; 1912 // If none of the the value types for this register class are valid, we 1913 // can't use it. For example, 64-bit reg classes on 32-bit targets. 1914 for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end(); 1915 I != E; ++I) { 1916 if (TLI.isTypeLegal(*I)) { 1917 // If we have already found this register in a different register class, 1918 // choose the one with the largest VT specified. For example, on 1919 // PowerPC, we favor f64 register classes over f32. 1920 if (FoundVT == MVT::Other || 1921 MVT::getSizeInBits(FoundVT) < MVT::getSizeInBits(*I)) { 1922 ThisVT = *I; 1923 break; 1924 } 1925 } 1926 } 1927 1928 if (ThisVT == MVT::Other) continue; 1929 1930 // NOTE: This isn't ideal. In particular, this might allocate the 1931 // frame pointer in functions that need it (due to them not being taken 1932 // out of allocation, because a variable sized allocation hasn't been seen 1933 // yet). This is a slight code pessimization, but should still work. 1934 for (TargetRegisterClass::iterator I = RC->allocation_order_begin(MF), 1935 E = RC->allocation_order_end(MF); I != E; ++I) 1936 if (*I == Reg) { 1937 // We found a matching register class. Keep looking at others in case 1938 // we find one with larger registers that this physreg is also in. 1939 FoundRC = RC; 1940 FoundVT = ThisVT; 1941 break; 1942 } 1943 } 1944 return FoundRC; 1945} 1946 1947RegsForValue SelectionDAGLowering:: 1948GetRegistersForValue(const std::string &ConstrCode, 1949 MVT::ValueType VT, bool isOutReg, bool isInReg, 1950 std::set<unsigned> &OutputRegs, 1951 std::set<unsigned> &InputRegs) { 1952 std::pair<unsigned, const TargetRegisterClass*> PhysReg = 1953 TLI.getRegForInlineAsmConstraint(ConstrCode, VT); 1954 std::vector<unsigned> Regs; 1955 1956 unsigned NumRegs = VT != MVT::Other ? TLI.getNumElements(VT) : 1; 1957 MVT::ValueType RegVT; 1958 MVT::ValueType ValueVT = VT; 1959 1960 if (PhysReg.first) { 1961 if (VT == MVT::Other) 1962 ValueVT = *PhysReg.second->vt_begin(); 1963 1964 // Get the actual register value type. This is important, because the user 1965 // may have asked for (e.g.) the AX register in i32 type. We need to 1966 // remember that AX is actually i16 to get the right extension. 1967 RegVT = *PhysReg.second->vt_begin(); 1968 1969 // This is a explicit reference to a physical register. 1970 Regs.push_back(PhysReg.first); 1971 1972 // If this is an expanded reference, add the rest of the regs to Regs. 1973 if (NumRegs != 1) { 1974 TargetRegisterClass::iterator I = PhysReg.second->begin(); 1975 TargetRegisterClass::iterator E = PhysReg.second->end(); 1976 for (; *I != PhysReg.first; ++I) 1977 assert(I != E && "Didn't find reg!"); 1978 1979 // Already added the first reg. 1980 --NumRegs; ++I; 1981 for (; NumRegs; --NumRegs, ++I) { 1982 assert(I != E && "Ran out of registers to allocate!"); 1983 Regs.push_back(*I); 1984 } 1985 } 1986 return RegsForValue(Regs, RegVT, ValueVT); 1987 } 1988 1989 // This is a reference to a register class. Allocate NumRegs consecutive, 1990 // available, registers from the class. 1991 std::vector<unsigned> RegClassRegs = 1992 TLI.getRegClassForInlineAsmConstraint(ConstrCode, VT); 1993 1994 const MRegisterInfo *MRI = DAG.getTarget().getRegisterInfo(); 1995 MachineFunction &MF = *CurMBB->getParent(); 1996 unsigned NumAllocated = 0; 1997 for (unsigned i = 0, e = RegClassRegs.size(); i != e; ++i) { 1998 unsigned Reg = RegClassRegs[i]; 1999 // See if this register is available. 2000 if ((isOutReg && OutputRegs.count(Reg)) || // Already used. 2001 (isInReg && InputRegs.count(Reg))) { // Already used. 2002 // Make sure we find consecutive registers. 2003 NumAllocated = 0; 2004 continue; 2005 } 2006 2007 // Check to see if this register is allocatable (i.e. don't give out the 2008 // stack pointer). 2009 const TargetRegisterClass *RC = isAllocatableRegister(Reg, MF, TLI, MRI); 2010 if (!RC) { 2011 // Make sure we find consecutive registers. 2012 NumAllocated = 0; 2013 continue; 2014 } 2015 2016 // Okay, this register is good, we can use it. 2017 ++NumAllocated; 2018 2019 // If we allocated enough consecutive 2020 if (NumAllocated == NumRegs) { 2021 unsigned RegStart = (i-NumAllocated)+1; 2022 unsigned RegEnd = i+1; 2023 // Mark all of the allocated registers used. 2024 for (unsigned i = RegStart; i != RegEnd; ++i) { 2025 unsigned Reg = RegClassRegs[i]; 2026 Regs.push_back(Reg); 2027 if (isOutReg) OutputRegs.insert(Reg); // Mark reg used. 2028 if (isInReg) InputRegs.insert(Reg); // Mark reg used. 2029 } 2030 2031 return RegsForValue(Regs, *RC->vt_begin(), VT); 2032 } 2033 } 2034 2035 // Otherwise, we couldn't allocate enough registers for this. 2036 return RegsForValue(); 2037} 2038 2039 2040/// visitInlineAsm - Handle a call to an InlineAsm object. 2041/// 2042void SelectionDAGLowering::visitInlineAsm(CallInst &I) { 2043 InlineAsm *IA = cast<InlineAsm>(I.getOperand(0)); 2044 2045 SDOperand AsmStr = DAG.getTargetExternalSymbol(IA->getAsmString().c_str(), 2046 MVT::Other); 2047 2048 // Note, we treat inline asms both with and without side-effects as the same. 2049 // If an inline asm doesn't have side effects and doesn't access memory, we 2050 // could not choose to not chain it. 2051 bool hasSideEffects = IA->hasSideEffects(); 2052 2053 std::vector<InlineAsm::ConstraintInfo> Constraints = IA->ParseConstraints(); 2054 std::vector<MVT::ValueType> ConstraintVTs; 2055 2056 /// AsmNodeOperands - A list of pairs. The first element is a register, the 2057 /// second is a bitfield where bit #0 is set if it is a use and bit #1 is set 2058 /// if it is a def of that register. 2059 std::vector<SDOperand> AsmNodeOperands; 2060 AsmNodeOperands.push_back(SDOperand()); // reserve space for input chain 2061 AsmNodeOperands.push_back(AsmStr); 2062 2063 SDOperand Chain = getRoot(); 2064 SDOperand Flag; 2065 2066 // We fully assign registers here at isel time. This is not optimal, but 2067 // should work. For register classes that correspond to LLVM classes, we 2068 // could let the LLVM RA do its thing, but we currently don't. Do a prepass 2069 // over the constraints, collecting fixed registers that we know we can't use. 2070 std::set<unsigned> OutputRegs, InputRegs; 2071 unsigned OpNum = 1; 2072 for (unsigned i = 0, e = Constraints.size(); i != e; ++i) { 2073 assert(Constraints[i].Codes.size() == 1 && "Only handles one code so far!"); 2074 std::string &ConstraintCode = Constraints[i].Codes[0]; 2075 2076 MVT::ValueType OpVT; 2077 2078 // Compute the value type for each operand and add it to ConstraintVTs. 2079 switch (Constraints[i].Type) { 2080 case InlineAsm::isOutput: 2081 if (!Constraints[i].isIndirectOutput) { 2082 assert(I.getType() != Type::VoidTy && "Bad inline asm!"); 2083 OpVT = TLI.getValueType(I.getType()); 2084 } else { 2085 const Type *OpTy = I.getOperand(OpNum)->getType(); 2086 OpVT = TLI.getValueType(cast<PointerType>(OpTy)->getElementType()); 2087 OpNum++; // Consumes a call operand. 2088 } 2089 break; 2090 case InlineAsm::isInput: 2091 OpVT = TLI.getValueType(I.getOperand(OpNum)->getType()); 2092 OpNum++; // Consumes a call operand. 2093 break; 2094 case InlineAsm::isClobber: 2095 OpVT = MVT::Other; 2096 break; 2097 } 2098 2099 ConstraintVTs.push_back(OpVT); 2100 2101 if (TLI.getRegForInlineAsmConstraint(ConstraintCode, OpVT).first == 0) 2102 continue; // Not assigned a fixed reg. 2103 2104 // Build a list of regs that this operand uses. This always has a single 2105 // element for promoted/expanded operands. 2106 RegsForValue Regs = GetRegistersForValue(ConstraintCode, OpVT, 2107 false, false, 2108 OutputRegs, InputRegs); 2109 2110 switch (Constraints[i].Type) { 2111 case InlineAsm::isOutput: 2112 // We can't assign any other output to this register. 2113 OutputRegs.insert(Regs.Regs.begin(), Regs.Regs.end()); 2114 // If this is an early-clobber output, it cannot be assigned to the same 2115 // value as the input reg. 2116 if (Constraints[i].isEarlyClobber || Constraints[i].hasMatchingInput) 2117 InputRegs.insert(Regs.Regs.begin(), Regs.Regs.end()); 2118 break; 2119 case InlineAsm::isInput: 2120 // We can't assign any other input to this register. 2121 InputRegs.insert(Regs.Regs.begin(), Regs.Regs.end()); 2122 break; 2123 case InlineAsm::isClobber: 2124 // Clobbered regs cannot be used as inputs or outputs. 2125 InputRegs.insert(Regs.Regs.begin(), Regs.Regs.end()); 2126 OutputRegs.insert(Regs.Regs.begin(), Regs.Regs.end()); 2127 break; 2128 } 2129 } 2130 2131 // Loop over all of the inputs, copying the operand values into the 2132 // appropriate registers and processing the output regs. 2133 RegsForValue RetValRegs; 2134 std::vector<std::pair<RegsForValue, Value*> > IndirectStoresToEmit; 2135 OpNum = 1; 2136 2137 for (unsigned i = 0, e = Constraints.size(); i != e; ++i) { 2138 assert(Constraints[i].Codes.size() == 1 && "Only handles one code so far!"); 2139 std::string &ConstraintCode = Constraints[i].Codes[0]; 2140 2141 switch (Constraints[i].Type) { 2142 case InlineAsm::isOutput: { 2143 TargetLowering::ConstraintType CTy = TargetLowering::C_RegisterClass; 2144 if (ConstraintCode.size() == 1) // not a physreg name. 2145 CTy = TLI.getConstraintType(ConstraintCode[0]); 2146 2147 if (CTy == TargetLowering::C_Memory) { 2148 // Memory output. 2149 SDOperand InOperandVal = getValue(I.getOperand(OpNum)); 2150 2151 // Check that the operand (the address to store to) isn't a float. 2152 if (!MVT::isInteger(InOperandVal.getValueType())) 2153 assert(0 && "MATCH FAIL!"); 2154 2155 if (!Constraints[i].isIndirectOutput) 2156 assert(0 && "MATCH FAIL!"); 2157 2158 OpNum++; // Consumes a call operand. 2159 2160 // Extend/truncate to the right pointer type if needed. 2161 MVT::ValueType PtrType = TLI.getPointerTy(); 2162 if (InOperandVal.getValueType() < PtrType) 2163 InOperandVal = DAG.getNode(ISD::ZERO_EXTEND, PtrType, InOperandVal); 2164 else if (InOperandVal.getValueType() > PtrType) 2165 InOperandVal = DAG.getNode(ISD::TRUNCATE, PtrType, InOperandVal); 2166 2167 // Add information to the INLINEASM node to know about this output. 2168 unsigned ResOpType = 4/*MEM*/ | (1 << 3); 2169 AsmNodeOperands.push_back(DAG.getConstant(ResOpType, MVT::i32)); 2170 AsmNodeOperands.push_back(InOperandVal); 2171 break; 2172 } 2173 2174 // Otherwise, this is a register output. 2175 assert(CTy == TargetLowering::C_RegisterClass && "Unknown op type!"); 2176 2177 // If this is an early-clobber output, or if there is an input 2178 // constraint that matches this, we need to reserve the input register 2179 // so no other inputs allocate to it. 2180 bool UsesInputRegister = false; 2181 if (Constraints[i].isEarlyClobber || Constraints[i].hasMatchingInput) 2182 UsesInputRegister = true; 2183 2184 // Copy the output from the appropriate register. Find a register that 2185 // we can use. 2186 RegsForValue Regs = 2187 GetRegistersForValue(ConstraintCode, ConstraintVTs[i], 2188 true, UsesInputRegister, 2189 OutputRegs, InputRegs); 2190 assert(!Regs.Regs.empty() && "Couldn't allocate output reg!"); 2191 2192 if (!Constraints[i].isIndirectOutput) { 2193 assert(RetValRegs.Regs.empty() && 2194 "Cannot have multiple output constraints yet!"); 2195 assert(I.getType() != Type::VoidTy && "Bad inline asm!"); 2196 RetValRegs = Regs; 2197 } else { 2198 IndirectStoresToEmit.push_back(std::make_pair(Regs, 2199 I.getOperand(OpNum))); 2200 OpNum++; // Consumes a call operand. 2201 } 2202 2203 // Add information to the INLINEASM node to know that this register is 2204 // set. 2205 Regs.AddInlineAsmOperands(2 /*REGDEF*/, DAG, AsmNodeOperands); 2206 break; 2207 } 2208 case InlineAsm::isInput: { 2209 SDOperand InOperandVal = getValue(I.getOperand(OpNum)); 2210 OpNum++; // Consumes a call operand. 2211 2212 if (isdigit(ConstraintCode[0])) { // Matching constraint? 2213 // If this is required to match an output register we have already set, 2214 // just use its register. 2215 unsigned OperandNo = atoi(ConstraintCode.c_str()); 2216 2217 // Scan until we find the definition we already emitted of this operand. 2218 // When we find it, create a RegsForValue operand. 2219 unsigned CurOp = 2; // The first operand. 2220 for (; OperandNo; --OperandNo) { 2221 // Advance to the next operand. 2222 unsigned NumOps = 2223 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getValue(); 2224 assert(((NumOps & 7) == 2 /*REGDEF*/ || 2225 (NumOps & 7) == 4 /*MEM*/) && 2226 "Skipped past definitions?"); 2227 CurOp += (NumOps>>3)+1; 2228 } 2229 2230 unsigned NumOps = 2231 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getValue(); 2232 assert((NumOps & 7) == 2 /*REGDEF*/ && 2233 "Skipped past definitions?"); 2234 2235 // Add NumOps>>3 registers to MatchedRegs. 2236 RegsForValue MatchedRegs; 2237 MatchedRegs.ValueVT = InOperandVal.getValueType(); 2238 MatchedRegs.RegVT = AsmNodeOperands[CurOp+1].getValueType(); 2239 for (unsigned i = 0, e = NumOps>>3; i != e; ++i) { 2240 unsigned Reg=cast<RegisterSDNode>(AsmNodeOperands[++CurOp])->getReg(); 2241 MatchedRegs.Regs.push_back(Reg); 2242 } 2243 2244 // Use the produced MatchedRegs object to 2245 MatchedRegs.getCopyToRegs(InOperandVal, DAG, Chain, Flag, 2246 TLI.getPointerTy()); 2247 MatchedRegs.AddInlineAsmOperands(1 /*REGUSE*/, DAG, AsmNodeOperands); 2248 break; 2249 } 2250 2251 TargetLowering::ConstraintType CTy = TargetLowering::C_RegisterClass; 2252 if (ConstraintCode.size() == 1) // not a physreg name. 2253 CTy = TLI.getConstraintType(ConstraintCode[0]); 2254 2255 if (CTy == TargetLowering::C_Other) { 2256 if (!TLI.isOperandValidForConstraint(InOperandVal, ConstraintCode[0])) 2257 assert(0 && "MATCH FAIL!"); 2258 2259 // Add information to the INLINEASM node to know about this input. 2260 unsigned ResOpType = 3 /*IMM*/ | (1 << 3); 2261 AsmNodeOperands.push_back(DAG.getConstant(ResOpType, MVT::i32)); 2262 AsmNodeOperands.push_back(InOperandVal); 2263 break; 2264 } else if (CTy == TargetLowering::C_Memory) { 2265 // Memory input. 2266 2267 // Check that the operand isn't a float. 2268 if (!MVT::isInteger(InOperandVal.getValueType())) 2269 assert(0 && "MATCH FAIL!"); 2270 2271 // Extend/truncate to the right pointer type if needed. 2272 MVT::ValueType PtrType = TLI.getPointerTy(); 2273 if (InOperandVal.getValueType() < PtrType) 2274 InOperandVal = DAG.getNode(ISD::ZERO_EXTEND, PtrType, InOperandVal); 2275 else if (InOperandVal.getValueType() > PtrType) 2276 InOperandVal = DAG.getNode(ISD::TRUNCATE, PtrType, InOperandVal); 2277 2278 // Add information to the INLINEASM node to know about this input. 2279 unsigned ResOpType = 4/*MEM*/ | (1 << 3); 2280 AsmNodeOperands.push_back(DAG.getConstant(ResOpType, MVT::i32)); 2281 AsmNodeOperands.push_back(InOperandVal); 2282 break; 2283 } 2284 2285 assert(CTy == TargetLowering::C_RegisterClass && "Unknown op type!"); 2286 2287 // Copy the input into the appropriate registers. 2288 RegsForValue InRegs = 2289 GetRegistersForValue(ConstraintCode, ConstraintVTs[i], 2290 false, true, OutputRegs, InputRegs); 2291 // FIXME: should be match fail. 2292 assert(!InRegs.Regs.empty() && "Couldn't allocate input reg!"); 2293 2294 InRegs.getCopyToRegs(InOperandVal, DAG, Chain, Flag, TLI.getPointerTy()); 2295 2296 InRegs.AddInlineAsmOperands(1/*REGUSE*/, DAG, AsmNodeOperands); 2297 break; 2298 } 2299 case InlineAsm::isClobber: { 2300 RegsForValue ClobberedRegs = 2301 GetRegistersForValue(ConstraintCode, MVT::Other, false, false, 2302 OutputRegs, InputRegs); 2303 // Add the clobbered value to the operand list, so that the register 2304 // allocator is aware that the physreg got clobbered. 2305 if (!ClobberedRegs.Regs.empty()) 2306 ClobberedRegs.AddInlineAsmOperands(2/*REGDEF*/, DAG, AsmNodeOperands); 2307 break; 2308 } 2309 } 2310 } 2311 2312 // Finish up input operands. 2313 AsmNodeOperands[0] = Chain; 2314 if (Flag.Val) AsmNodeOperands.push_back(Flag); 2315 2316 Chain = DAG.getNode(ISD::INLINEASM, 2317 DAG.getNodeValueTypes(MVT::Other, MVT::Flag), 2, 2318 &AsmNodeOperands[0], AsmNodeOperands.size()); 2319 Flag = Chain.getValue(1); 2320 2321 // If this asm returns a register value, copy the result from that register 2322 // and set it as the value of the call. 2323 if (!RetValRegs.Regs.empty()) 2324 setValue(&I, RetValRegs.getCopyFromRegs(DAG, Chain, Flag)); 2325 2326 std::vector<std::pair<SDOperand, Value*> > StoresToEmit; 2327 2328 // Process indirect outputs, first output all of the flagged copies out of 2329 // physregs. 2330 for (unsigned i = 0, e = IndirectStoresToEmit.size(); i != e; ++i) { 2331 RegsForValue &OutRegs = IndirectStoresToEmit[i].first; 2332 Value *Ptr = IndirectStoresToEmit[i].second; 2333 SDOperand OutVal = OutRegs.getCopyFromRegs(DAG, Chain, Flag); 2334 StoresToEmit.push_back(std::make_pair(OutVal, Ptr)); 2335 } 2336 2337 // Emit the non-flagged stores from the physregs. 2338 SmallVector<SDOperand, 8> OutChains; 2339 for (unsigned i = 0, e = StoresToEmit.size(); i != e; ++i) 2340 OutChains.push_back(DAG.getStore(Chain, StoresToEmit[i].first, 2341 getValue(StoresToEmit[i].second), 2342 StoresToEmit[i].second, 0)); 2343 if (!OutChains.empty()) 2344 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 2345 &OutChains[0], OutChains.size()); 2346 DAG.setRoot(Chain); 2347} 2348 2349 2350void SelectionDAGLowering::visitMalloc(MallocInst &I) { 2351 SDOperand Src = getValue(I.getOperand(0)); 2352 2353 MVT::ValueType IntPtr = TLI.getPointerTy(); 2354 2355 if (IntPtr < Src.getValueType()) 2356 Src = DAG.getNode(ISD::TRUNCATE, IntPtr, Src); 2357 else if (IntPtr > Src.getValueType()) 2358 Src = DAG.getNode(ISD::ZERO_EXTEND, IntPtr, Src); 2359 2360 // Scale the source by the type size. 2361 uint64_t ElementSize = TD->getTypeSize(I.getType()->getElementType()); 2362 Src = DAG.getNode(ISD::MUL, Src.getValueType(), 2363 Src, getIntPtrConstant(ElementSize)); 2364 2365 std::vector<std::pair<SDOperand, const Type*> > Args; 2366 Args.push_back(std::make_pair(Src, TLI.getTargetData()->getIntPtrType())); 2367 2368 std::pair<SDOperand,SDOperand> Result = 2369 TLI.LowerCallTo(getRoot(), I.getType(), false, CallingConv::C, true, 2370 DAG.getExternalSymbol("malloc", IntPtr), 2371 Args, DAG); 2372 setValue(&I, Result.first); // Pointers always fit in registers 2373 DAG.setRoot(Result.second); 2374} 2375 2376void SelectionDAGLowering::visitFree(FreeInst &I) { 2377 std::vector<std::pair<SDOperand, const Type*> > Args; 2378 Args.push_back(std::make_pair(getValue(I.getOperand(0)), 2379 TLI.getTargetData()->getIntPtrType())); 2380 MVT::ValueType IntPtr = TLI.getPointerTy(); 2381 std::pair<SDOperand,SDOperand> Result = 2382 TLI.LowerCallTo(getRoot(), Type::VoidTy, false, CallingConv::C, true, 2383 DAG.getExternalSymbol("free", IntPtr), Args, DAG); 2384 DAG.setRoot(Result.second); 2385} 2386 2387// InsertAtEndOfBasicBlock - This method should be implemented by targets that 2388// mark instructions with the 'usesCustomDAGSchedInserter' flag. These 2389// instructions are special in various ways, which require special support to 2390// insert. The specified MachineInstr is created but not inserted into any 2391// basic blocks, and the scheduler passes ownership of it to this method. 2392MachineBasicBlock *TargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI, 2393 MachineBasicBlock *MBB) { 2394 std::cerr << "If a target marks an instruction with " 2395 "'usesCustomDAGSchedInserter', it must implement " 2396 "TargetLowering::InsertAtEndOfBasicBlock!\n"; 2397 abort(); 2398 return 0; 2399} 2400 2401void SelectionDAGLowering::visitVAStart(CallInst &I) { 2402 DAG.setRoot(DAG.getNode(ISD::VASTART, MVT::Other, getRoot(), 2403 getValue(I.getOperand(1)), 2404 DAG.getSrcValue(I.getOperand(1)))); 2405} 2406 2407void SelectionDAGLowering::visitVAArg(VAArgInst &I) { 2408 SDOperand V = DAG.getVAArg(TLI.getValueType(I.getType()), getRoot(), 2409 getValue(I.getOperand(0)), 2410 DAG.getSrcValue(I.getOperand(0))); 2411 setValue(&I, V); 2412 DAG.setRoot(V.getValue(1)); 2413} 2414 2415void SelectionDAGLowering::visitVAEnd(CallInst &I) { 2416 DAG.setRoot(DAG.getNode(ISD::VAEND, MVT::Other, getRoot(), 2417 getValue(I.getOperand(1)), 2418 DAG.getSrcValue(I.getOperand(1)))); 2419} 2420 2421void SelectionDAGLowering::visitVACopy(CallInst &I) { 2422 DAG.setRoot(DAG.getNode(ISD::VACOPY, MVT::Other, getRoot(), 2423 getValue(I.getOperand(1)), 2424 getValue(I.getOperand(2)), 2425 DAG.getSrcValue(I.getOperand(1)), 2426 DAG.getSrcValue(I.getOperand(2)))); 2427} 2428 2429/// TargetLowering::LowerArguments - This is the default LowerArguments 2430/// implementation, which just inserts a FORMAL_ARGUMENTS node. FIXME: When all 2431/// targets are migrated to using FORMAL_ARGUMENTS, this hook should be 2432/// integrated into SDISel. 2433std::vector<SDOperand> 2434TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) { 2435 // Add CC# and isVararg as operands to the FORMAL_ARGUMENTS node. 2436 std::vector<SDOperand> Ops; 2437 Ops.push_back(DAG.getRoot()); 2438 Ops.push_back(DAG.getConstant(F.getCallingConv(), getPointerTy())); 2439 Ops.push_back(DAG.getConstant(F.isVarArg(), getPointerTy())); 2440 2441 // Add one result value for each formal argument. 2442 std::vector<MVT::ValueType> RetVals; 2443 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) { 2444 MVT::ValueType VT = getValueType(I->getType()); 2445 2446 switch (getTypeAction(VT)) { 2447 default: assert(0 && "Unknown type action!"); 2448 case Legal: 2449 RetVals.push_back(VT); 2450 break; 2451 case Promote: 2452 RetVals.push_back(getTypeToTransformTo(VT)); 2453 break; 2454 case Expand: 2455 if (VT != MVT::Vector) { 2456 // If this is a large integer, it needs to be broken up into small 2457 // integers. Figure out what the destination type is and how many small 2458 // integers it turns into. 2459 MVT::ValueType NVT = getTypeToTransformTo(VT); 2460 unsigned NumVals = MVT::getSizeInBits(VT)/MVT::getSizeInBits(NVT); 2461 for (unsigned i = 0; i != NumVals; ++i) 2462 RetVals.push_back(NVT); 2463 } else { 2464 // Otherwise, this is a vector type. We only support legal vectors 2465 // right now. 2466 unsigned NumElems = cast<PackedType>(I->getType())->getNumElements(); 2467 const Type *EltTy = cast<PackedType>(I->getType())->getElementType(); 2468 2469 // Figure out if there is a Packed type corresponding to this Vector 2470 // type. If so, convert to the packed type. 2471 MVT::ValueType TVT = MVT::getVectorType(getValueType(EltTy), NumElems); 2472 if (TVT != MVT::Other && isTypeLegal(TVT)) { 2473 RetVals.push_back(TVT); 2474 } else { 2475 assert(0 && "Don't support illegal by-val vector arguments yet!"); 2476 } 2477 } 2478 break; 2479 } 2480 } 2481 2482 RetVals.push_back(MVT::Other); 2483 2484 // Create the node. 2485 SDNode *Result = DAG.getNode(ISD::FORMAL_ARGUMENTS, 2486 DAG.getNodeValueTypes(RetVals), RetVals.size(), 2487 &Ops[0], Ops.size()).Val; 2488 2489 DAG.setRoot(SDOperand(Result, Result->getNumValues()-1)); 2490 2491 // Set up the return result vector. 2492 Ops.clear(); 2493 unsigned i = 0; 2494 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) { 2495 MVT::ValueType VT = getValueType(I->getType()); 2496 2497 switch (getTypeAction(VT)) { 2498 default: assert(0 && "Unknown type action!"); 2499 case Legal: 2500 Ops.push_back(SDOperand(Result, i++)); 2501 break; 2502 case Promote: { 2503 SDOperand Op(Result, i++); 2504 if (MVT::isInteger(VT)) { 2505 unsigned AssertOp = I->getType()->isSigned() ? ISD::AssertSext 2506 : ISD::AssertZext; 2507 Op = DAG.getNode(AssertOp, Op.getValueType(), Op, DAG.getValueType(VT)); 2508 Op = DAG.getNode(ISD::TRUNCATE, VT, Op); 2509 } else { 2510 assert(MVT::isFloatingPoint(VT) && "Not int or FP?"); 2511 Op = DAG.getNode(ISD::FP_ROUND, VT, Op); 2512 } 2513 Ops.push_back(Op); 2514 break; 2515 } 2516 case Expand: 2517 if (VT != MVT::Vector) { 2518 // If this is a large integer, it needs to be reassembled from small 2519 // integers. Figure out what the source elt type is and how many small 2520 // integers it is. 2521 MVT::ValueType NVT = getTypeToTransformTo(VT); 2522 unsigned NumVals = MVT::getSizeInBits(VT)/MVT::getSizeInBits(NVT); 2523 if (NumVals == 2) { 2524 SDOperand Lo = SDOperand(Result, i++); 2525 SDOperand Hi = SDOperand(Result, i++); 2526 2527 if (!isLittleEndian()) 2528 std::swap(Lo, Hi); 2529 2530 Ops.push_back(DAG.getNode(ISD::BUILD_PAIR, VT, Lo, Hi)); 2531 } else { 2532 // Value scalarized into many values. Unimp for now. 2533 assert(0 && "Cannot expand i64 -> i16 yet!"); 2534 } 2535 } else { 2536 // Otherwise, this is a vector type. We only support legal vectors 2537 // right now. 2538 const PackedType *PTy = cast<PackedType>(I->getType()); 2539 unsigned NumElems = PTy->getNumElements(); 2540 const Type *EltTy = PTy->getElementType(); 2541 2542 // Figure out if there is a Packed type corresponding to this Vector 2543 // type. If so, convert to the packed type. 2544 MVT::ValueType TVT = MVT::getVectorType(getValueType(EltTy), NumElems); 2545 if (TVT != MVT::Other && isTypeLegal(TVT)) { 2546 SDOperand N = SDOperand(Result, i++); 2547 // Handle copies from generic vectors to registers. 2548 N = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, N, 2549 DAG.getConstant(NumElems, MVT::i32), 2550 DAG.getValueType(getValueType(EltTy))); 2551 Ops.push_back(N); 2552 } else { 2553 assert(0 && "Don't support illegal by-val vector arguments yet!"); 2554 abort(); 2555 } 2556 } 2557 break; 2558 } 2559 } 2560 return Ops; 2561} 2562 2563 2564/// TargetLowering::LowerCallTo - This is the default LowerCallTo 2565/// implementation, which just inserts an ISD::CALL node, which is later custom 2566/// lowered by the target to something concrete. FIXME: When all targets are 2567/// migrated to using ISD::CALL, this hook should be integrated into SDISel. 2568std::pair<SDOperand, SDOperand> 2569TargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy, bool isVarArg, 2570 unsigned CallingConv, bool isTailCall, 2571 SDOperand Callee, 2572 ArgListTy &Args, SelectionDAG &DAG) { 2573 SmallVector<SDOperand, 32> Ops; 2574 Ops.push_back(Chain); // Op#0 - Chain 2575 Ops.push_back(DAG.getConstant(CallingConv, getPointerTy())); // Op#1 - CC 2576 Ops.push_back(DAG.getConstant(isVarArg, getPointerTy())); // Op#2 - VarArg 2577 Ops.push_back(DAG.getConstant(isTailCall, getPointerTy())); // Op#3 - Tail 2578 Ops.push_back(Callee); 2579 2580 // Handle all of the outgoing arguments. 2581 for (unsigned i = 0, e = Args.size(); i != e; ++i) { 2582 MVT::ValueType VT = getValueType(Args[i].second); 2583 SDOperand Op = Args[i].first; 2584 bool isSigned = Args[i].second->isSigned(); 2585 switch (getTypeAction(VT)) { 2586 default: assert(0 && "Unknown type action!"); 2587 case Legal: 2588 Ops.push_back(Op); 2589 Ops.push_back(DAG.getConstant(isSigned, MVT::i32)); 2590 break; 2591 case Promote: 2592 if (MVT::isInteger(VT)) { 2593 unsigned ExtOp = isSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 2594 Op = DAG.getNode(ExtOp, getTypeToTransformTo(VT), Op); 2595 } else { 2596 assert(MVT::isFloatingPoint(VT) && "Not int or FP?"); 2597 Op = DAG.getNode(ISD::FP_EXTEND, getTypeToTransformTo(VT), Op); 2598 } 2599 Ops.push_back(Op); 2600 Ops.push_back(DAG.getConstant(isSigned, MVT::i32)); 2601 break; 2602 case Expand: 2603 if (VT != MVT::Vector) { 2604 // If this is a large integer, it needs to be broken down into small 2605 // integers. Figure out what the source elt type is and how many small 2606 // integers it is. 2607 MVT::ValueType NVT = getTypeToTransformTo(VT); 2608 unsigned NumVals = MVT::getSizeInBits(VT)/MVT::getSizeInBits(NVT); 2609 if (NumVals == 2) { 2610 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, NVT, Op, 2611 DAG.getConstant(0, getPointerTy())); 2612 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, NVT, Op, 2613 DAG.getConstant(1, getPointerTy())); 2614 if (!isLittleEndian()) 2615 std::swap(Lo, Hi); 2616 2617 Ops.push_back(Lo); 2618 Ops.push_back(DAG.getConstant(isSigned, MVT::i32)); 2619 Ops.push_back(Hi); 2620 Ops.push_back(DAG.getConstant(isSigned, MVT::i32)); 2621 } else { 2622 // Value scalarized into many values. Unimp for now. 2623 assert(0 && "Cannot expand i64 -> i16 yet!"); 2624 } 2625 } else { 2626 // Otherwise, this is a vector type. We only support legal vectors 2627 // right now. 2628 const PackedType *PTy = cast<PackedType>(Args[i].second); 2629 unsigned NumElems = PTy->getNumElements(); 2630 const Type *EltTy = PTy->getElementType(); 2631 2632 // Figure out if there is a Packed type corresponding to this Vector 2633 // type. If so, convert to the packed type. 2634 MVT::ValueType TVT = MVT::getVectorType(getValueType(EltTy), NumElems); 2635 if (TVT != MVT::Other && isTypeLegal(TVT)) { 2636 // Insert a VBIT_CONVERT of the MVT::Vector type to the packed type. 2637 Op = DAG.getNode(ISD::VBIT_CONVERT, TVT, Op); 2638 Ops.push_back(Op); 2639 Ops.push_back(DAG.getConstant(isSigned, MVT::i32)); 2640 } else { 2641 assert(0 && "Don't support illegal by-val vector call args yet!"); 2642 abort(); 2643 } 2644 } 2645 break; 2646 } 2647 } 2648 2649 // Figure out the result value types. 2650 SmallVector<MVT::ValueType, 4> RetTys; 2651 2652 if (RetTy != Type::VoidTy) { 2653 MVT::ValueType VT = getValueType(RetTy); 2654 switch (getTypeAction(VT)) { 2655 default: assert(0 && "Unknown type action!"); 2656 case Legal: 2657 RetTys.push_back(VT); 2658 break; 2659 case Promote: 2660 RetTys.push_back(getTypeToTransformTo(VT)); 2661 break; 2662 case Expand: 2663 if (VT != MVT::Vector) { 2664 // If this is a large integer, it needs to be reassembled from small 2665 // integers. Figure out what the source elt type is and how many small 2666 // integers it is. 2667 MVT::ValueType NVT = getTypeToTransformTo(VT); 2668 unsigned NumVals = MVT::getSizeInBits(VT)/MVT::getSizeInBits(NVT); 2669 for (unsigned i = 0; i != NumVals; ++i) 2670 RetTys.push_back(NVT); 2671 } else { 2672 // Otherwise, this is a vector type. We only support legal vectors 2673 // right now. 2674 const PackedType *PTy = cast<PackedType>(RetTy); 2675 unsigned NumElems = PTy->getNumElements(); 2676 const Type *EltTy = PTy->getElementType(); 2677 2678 // Figure out if there is a Packed type corresponding to this Vector 2679 // type. If so, convert to the packed type. 2680 MVT::ValueType TVT = MVT::getVectorType(getValueType(EltTy), NumElems); 2681 if (TVT != MVT::Other && isTypeLegal(TVT)) { 2682 RetTys.push_back(TVT); 2683 } else { 2684 assert(0 && "Don't support illegal by-val vector call results yet!"); 2685 abort(); 2686 } 2687 } 2688 } 2689 } 2690 2691 RetTys.push_back(MVT::Other); // Always has a chain. 2692 2693 // Finally, create the CALL node. 2694 SDOperand Res = DAG.getNode(ISD::CALL, 2695 DAG.getVTList(&RetTys[0], RetTys.size()), 2696 &Ops[0], Ops.size()); 2697 2698 // This returns a pair of operands. The first element is the 2699 // return value for the function (if RetTy is not VoidTy). The second 2700 // element is the outgoing token chain. 2701 SDOperand ResVal; 2702 if (RetTys.size() != 1) { 2703 MVT::ValueType VT = getValueType(RetTy); 2704 if (RetTys.size() == 2) { 2705 ResVal = Res; 2706 2707 // If this value was promoted, truncate it down. 2708 if (ResVal.getValueType() != VT) { 2709 if (VT == MVT::Vector) { 2710 // Insert a VBITCONVERT to convert from the packed result type to the 2711 // MVT::Vector type. 2712 unsigned NumElems = cast<PackedType>(RetTy)->getNumElements(); 2713 const Type *EltTy = cast<PackedType>(RetTy)->getElementType(); 2714 2715 // Figure out if there is a Packed type corresponding to this Vector 2716 // type. If so, convert to the packed type. 2717 MVT::ValueType TVT = MVT::getVectorType(getValueType(EltTy), NumElems); 2718 if (TVT != MVT::Other && isTypeLegal(TVT)) { 2719 // Insert a VBIT_CONVERT of the FORMAL_ARGUMENTS to a 2720 // "N x PTyElementVT" MVT::Vector type. 2721 ResVal = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, ResVal, 2722 DAG.getConstant(NumElems, MVT::i32), 2723 DAG.getValueType(getValueType(EltTy))); 2724 } else { 2725 abort(); 2726 } 2727 } else if (MVT::isInteger(VT)) { 2728 unsigned AssertOp = RetTy->isSigned() ? 2729 ISD::AssertSext : ISD::AssertZext; 2730 ResVal = DAG.getNode(AssertOp, ResVal.getValueType(), ResVal, 2731 DAG.getValueType(VT)); 2732 ResVal = DAG.getNode(ISD::TRUNCATE, VT, ResVal); 2733 } else { 2734 assert(MVT::isFloatingPoint(VT)); 2735 ResVal = DAG.getNode(ISD::FP_ROUND, VT, ResVal); 2736 } 2737 } 2738 } else if (RetTys.size() == 3) { 2739 ResVal = DAG.getNode(ISD::BUILD_PAIR, VT, 2740 Res.getValue(0), Res.getValue(1)); 2741 2742 } else { 2743 assert(0 && "Case not handled yet!"); 2744 } 2745 } 2746 2747 return std::make_pair(ResVal, Res.getValue(Res.Val->getNumValues()-1)); 2748} 2749 2750 2751 2752// It is always conservatively correct for llvm.returnaddress and 2753// llvm.frameaddress to return 0. 2754// 2755// FIXME: Change this to insert a FRAMEADDR/RETURNADDR node, and have that be 2756// expanded to 0 if the target wants. 2757std::pair<SDOperand, SDOperand> 2758TargetLowering::LowerFrameReturnAddress(bool isFrameAddr, SDOperand Chain, 2759 unsigned Depth, SelectionDAG &DAG) { 2760 return std::make_pair(DAG.getConstant(0, getPointerTy()), Chain); 2761} 2762 2763SDOperand TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { 2764 assert(0 && "LowerOperation not implemented for this target!"); 2765 abort(); 2766 return SDOperand(); 2767} 2768 2769SDOperand TargetLowering::CustomPromoteOperation(SDOperand Op, 2770 SelectionDAG &DAG) { 2771 assert(0 && "CustomPromoteOperation not implemented for this target!"); 2772 abort(); 2773 return SDOperand(); 2774} 2775 2776void SelectionDAGLowering::visitFrameReturnAddress(CallInst &I, bool isFrame) { 2777 unsigned Depth = (unsigned)cast<ConstantInt>(I.getOperand(1))->getZExtValue(); 2778 std::pair<SDOperand,SDOperand> Result = 2779 TLI.LowerFrameReturnAddress(isFrame, getRoot(), Depth, DAG); 2780 setValue(&I, Result.first); 2781 DAG.setRoot(Result.second); 2782} 2783 2784/// getMemsetValue - Vectorized representation of the memset value 2785/// operand. 2786static SDOperand getMemsetValue(SDOperand Value, MVT::ValueType VT, 2787 SelectionDAG &DAG) { 2788 MVT::ValueType CurVT = VT; 2789 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) { 2790 uint64_t Val = C->getValue() & 255; 2791 unsigned Shift = 8; 2792 while (CurVT != MVT::i8) { 2793 Val = (Val << Shift) | Val; 2794 Shift <<= 1; 2795 CurVT = (MVT::ValueType)((unsigned)CurVT - 1); 2796 } 2797 return DAG.getConstant(Val, VT); 2798 } else { 2799 Value = DAG.getNode(ISD::ZERO_EXTEND, VT, Value); 2800 unsigned Shift = 8; 2801 while (CurVT != MVT::i8) { 2802 Value = 2803 DAG.getNode(ISD::OR, VT, 2804 DAG.getNode(ISD::SHL, VT, Value, 2805 DAG.getConstant(Shift, MVT::i8)), Value); 2806 Shift <<= 1; 2807 CurVT = (MVT::ValueType)((unsigned)CurVT - 1); 2808 } 2809 2810 return Value; 2811 } 2812} 2813 2814/// getMemsetStringVal - Similar to getMemsetValue. Except this is only 2815/// used when a memcpy is turned into a memset when the source is a constant 2816/// string ptr. 2817static SDOperand getMemsetStringVal(MVT::ValueType VT, 2818 SelectionDAG &DAG, TargetLowering &TLI, 2819 std::string &Str, unsigned Offset) { 2820 MVT::ValueType CurVT = VT; 2821 uint64_t Val = 0; 2822 unsigned MSB = getSizeInBits(VT) / 8; 2823 if (TLI.isLittleEndian()) 2824 Offset = Offset + MSB - 1; 2825 for (unsigned i = 0; i != MSB; ++i) { 2826 Val = (Val << 8) | Str[Offset]; 2827 Offset += TLI.isLittleEndian() ? -1 : 1; 2828 } 2829 return DAG.getConstant(Val, VT); 2830} 2831 2832/// getMemBasePlusOffset - Returns base and offset node for the 2833static SDOperand getMemBasePlusOffset(SDOperand Base, unsigned Offset, 2834 SelectionDAG &DAG, TargetLowering &TLI) { 2835 MVT::ValueType VT = Base.getValueType(); 2836 return DAG.getNode(ISD::ADD, VT, Base, DAG.getConstant(Offset, VT)); 2837} 2838 2839/// MeetsMaxMemopRequirement - Determines if the number of memory ops required 2840/// to replace the memset / memcpy is below the threshold. It also returns the 2841/// types of the sequence of memory ops to perform memset / memcpy. 2842static bool MeetsMaxMemopRequirement(std::vector<MVT::ValueType> &MemOps, 2843 unsigned Limit, uint64_t Size, 2844 unsigned Align, TargetLowering &TLI) { 2845 MVT::ValueType VT; 2846 2847 if (TLI.allowsUnalignedMemoryAccesses()) { 2848 VT = MVT::i64; 2849 } else { 2850 switch (Align & 7) { 2851 case 0: 2852 VT = MVT::i64; 2853 break; 2854 case 4: 2855 VT = MVT::i32; 2856 break; 2857 case 2: 2858 VT = MVT::i16; 2859 break; 2860 default: 2861 VT = MVT::i8; 2862 break; 2863 } 2864 } 2865 2866 MVT::ValueType LVT = MVT::i64; 2867 while (!TLI.isTypeLegal(LVT)) 2868 LVT = (MVT::ValueType)((unsigned)LVT - 1); 2869 assert(MVT::isInteger(LVT)); 2870 2871 if (VT > LVT) 2872 VT = LVT; 2873 2874 unsigned NumMemOps = 0; 2875 while (Size != 0) { 2876 unsigned VTSize = getSizeInBits(VT) / 8; 2877 while (VTSize > Size) { 2878 VT = (MVT::ValueType)((unsigned)VT - 1); 2879 VTSize >>= 1; 2880 } 2881 assert(MVT::isInteger(VT)); 2882 2883 if (++NumMemOps > Limit) 2884 return false; 2885 MemOps.push_back(VT); 2886 Size -= VTSize; 2887 } 2888 2889 return true; 2890} 2891 2892void SelectionDAGLowering::visitMemIntrinsic(CallInst &I, unsigned Op) { 2893 SDOperand Op1 = getValue(I.getOperand(1)); 2894 SDOperand Op2 = getValue(I.getOperand(2)); 2895 SDOperand Op3 = getValue(I.getOperand(3)); 2896 SDOperand Op4 = getValue(I.getOperand(4)); 2897 unsigned Align = (unsigned)cast<ConstantSDNode>(Op4)->getValue(); 2898 if (Align == 0) Align = 1; 2899 2900 if (ConstantSDNode *Size = dyn_cast<ConstantSDNode>(Op3)) { 2901 std::vector<MVT::ValueType> MemOps; 2902 2903 // Expand memset / memcpy to a series of load / store ops 2904 // if the size operand falls below a certain threshold. 2905 SmallVector<SDOperand, 8> OutChains; 2906 switch (Op) { 2907 default: break; // Do nothing for now. 2908 case ISD::MEMSET: { 2909 if (MeetsMaxMemopRequirement(MemOps, TLI.getMaxStoresPerMemset(), 2910 Size->getValue(), Align, TLI)) { 2911 unsigned NumMemOps = MemOps.size(); 2912 unsigned Offset = 0; 2913 for (unsigned i = 0; i < NumMemOps; i++) { 2914 MVT::ValueType VT = MemOps[i]; 2915 unsigned VTSize = getSizeInBits(VT) / 8; 2916 SDOperand Value = getMemsetValue(Op2, VT, DAG); 2917 SDOperand Store = DAG.getStore(getRoot(), Value, 2918 getMemBasePlusOffset(Op1, Offset, DAG, TLI), 2919 I.getOperand(1), Offset); 2920 OutChains.push_back(Store); 2921 Offset += VTSize; 2922 } 2923 } 2924 break; 2925 } 2926 case ISD::MEMCPY: { 2927 if (MeetsMaxMemopRequirement(MemOps, TLI.getMaxStoresPerMemcpy(), 2928 Size->getValue(), Align, TLI)) { 2929 unsigned NumMemOps = MemOps.size(); 2930 unsigned SrcOff = 0, DstOff = 0, SrcDelta = 0; 2931 GlobalAddressSDNode *G = NULL; 2932 std::string Str; 2933 bool CopyFromStr = false; 2934 2935 if (Op2.getOpcode() == ISD::GlobalAddress) 2936 G = cast<GlobalAddressSDNode>(Op2); 2937 else if (Op2.getOpcode() == ISD::ADD && 2938 Op2.getOperand(0).getOpcode() == ISD::GlobalAddress && 2939 Op2.getOperand(1).getOpcode() == ISD::Constant) { 2940 G = cast<GlobalAddressSDNode>(Op2.getOperand(0)); 2941 SrcDelta = cast<ConstantSDNode>(Op2.getOperand(1))->getValue(); 2942 } 2943 if (G) { 2944 GlobalVariable *GV = dyn_cast<GlobalVariable>(G->getGlobal()); 2945 if (GV) { 2946 Str = GV->getStringValue(false); 2947 if (!Str.empty()) { 2948 CopyFromStr = true; 2949 SrcOff += SrcDelta; 2950 } 2951 } 2952 } 2953 2954 for (unsigned i = 0; i < NumMemOps; i++) { 2955 MVT::ValueType VT = MemOps[i]; 2956 unsigned VTSize = getSizeInBits(VT) / 8; 2957 SDOperand Value, Chain, Store; 2958 2959 if (CopyFromStr) { 2960 Value = getMemsetStringVal(VT, DAG, TLI, Str, SrcOff); 2961 Chain = getRoot(); 2962 Store = 2963 DAG.getStore(Chain, Value, 2964 getMemBasePlusOffset(Op1, DstOff, DAG, TLI), 2965 I.getOperand(1), DstOff); 2966 } else { 2967 Value = DAG.getLoad(VT, getRoot(), 2968 getMemBasePlusOffset(Op2, SrcOff, DAG, TLI), 2969 I.getOperand(2), SrcOff); 2970 Chain = Value.getValue(1); 2971 Store = 2972 DAG.getStore(Chain, Value, 2973 getMemBasePlusOffset(Op1, DstOff, DAG, TLI), 2974 I.getOperand(1), DstOff); 2975 } 2976 OutChains.push_back(Store); 2977 SrcOff += VTSize; 2978 DstOff += VTSize; 2979 } 2980 } 2981 break; 2982 } 2983 } 2984 2985 if (!OutChains.empty()) { 2986 DAG.setRoot(DAG.getNode(ISD::TokenFactor, MVT::Other, 2987 &OutChains[0], OutChains.size())); 2988 return; 2989 } 2990 } 2991 2992 DAG.setRoot(DAG.getNode(Op, MVT::Other, getRoot(), Op1, Op2, Op3, Op4)); 2993} 2994 2995//===----------------------------------------------------------------------===// 2996// SelectionDAGISel code 2997//===----------------------------------------------------------------------===// 2998 2999unsigned SelectionDAGISel::MakeReg(MVT::ValueType VT) { 3000 return RegMap->createVirtualRegister(TLI.getRegClassFor(VT)); 3001} 3002 3003void SelectionDAGISel::getAnalysisUsage(AnalysisUsage &AU) const { 3004 // FIXME: we only modify the CFG to split critical edges. This 3005 // updates dom and loop info. 3006 AU.addRequired<AliasAnalysis>(); 3007} 3008 3009 3010/// OptimizeNoopCopyExpression - We have determined that the specified cast 3011/// instruction is a noop copy (e.g. it's casting from one pointer type to 3012/// another, int->uint, or int->sbyte on PPC. 3013/// 3014/// Return true if any changes are made. 3015static bool OptimizeNoopCopyExpression(CastInst *CI) { 3016 BasicBlock *DefBB = CI->getParent(); 3017 3018 /// InsertedCasts - Only insert a cast in each block once. 3019 std::map<BasicBlock*, CastInst*> InsertedCasts; 3020 3021 bool MadeChange = false; 3022 for (Value::use_iterator UI = CI->use_begin(), E = CI->use_end(); 3023 UI != E; ) { 3024 Use &TheUse = UI.getUse(); 3025 Instruction *User = cast<Instruction>(*UI); 3026 3027 // Figure out which BB this cast is used in. For PHI's this is the 3028 // appropriate predecessor block. 3029 BasicBlock *UserBB = User->getParent(); 3030 if (PHINode *PN = dyn_cast<PHINode>(User)) { 3031 unsigned OpVal = UI.getOperandNo()/2; 3032 UserBB = PN->getIncomingBlock(OpVal); 3033 } 3034 3035 // Preincrement use iterator so we don't invalidate it. 3036 ++UI; 3037 3038 // If this user is in the same block as the cast, don't change the cast. 3039 if (UserBB == DefBB) continue; 3040 3041 // If we have already inserted a cast into this block, use it. 3042 CastInst *&InsertedCast = InsertedCasts[UserBB]; 3043 3044 if (!InsertedCast) { 3045 BasicBlock::iterator InsertPt = UserBB->begin(); 3046 while (isa<PHINode>(InsertPt)) ++InsertPt; 3047 3048 InsertedCast = 3049 new CastInst(CI->getOperand(0), CI->getType(), "", InsertPt); 3050 MadeChange = true; 3051 } 3052 3053 // Replace a use of the cast with a use of the new casat. 3054 TheUse = InsertedCast; 3055 } 3056 3057 // If we removed all uses, nuke the cast. 3058 if (CI->use_empty()) 3059 CI->eraseFromParent(); 3060 3061 return MadeChange; 3062} 3063 3064/// InsertGEPComputeCode - Insert code into BB to compute Ptr+PtrOffset, 3065/// casting to the type of GEPI. 3066static Instruction *InsertGEPComputeCode(Instruction *&V, BasicBlock *BB, 3067 Instruction *GEPI, Value *Ptr, 3068 Value *PtrOffset) { 3069 if (V) return V; // Already computed. 3070 3071 BasicBlock::iterator InsertPt; 3072 if (BB == GEPI->getParent()) { 3073 // If insert into the GEP's block, insert right after the GEP. 3074 InsertPt = GEPI; 3075 ++InsertPt; 3076 } else { 3077 // Otherwise, insert at the top of BB, after any PHI nodes 3078 InsertPt = BB->begin(); 3079 while (isa<PHINode>(InsertPt)) ++InsertPt; 3080 } 3081 3082 // If Ptr is itself a cast, but in some other BB, emit a copy of the cast into 3083 // BB so that there is only one value live across basic blocks (the cast 3084 // operand). 3085 if (CastInst *CI = dyn_cast<CastInst>(Ptr)) 3086 if (CI->getParent() != BB && isa<PointerType>(CI->getOperand(0)->getType())) 3087 Ptr = new CastInst(CI->getOperand(0), CI->getType(), "", InsertPt); 3088 3089 // Add the offset, cast it to the right type. 3090 Ptr = BinaryOperator::createAdd(Ptr, PtrOffset, "", InsertPt); 3091 return V = new CastInst(Ptr, GEPI->getType(), "", InsertPt); 3092} 3093 3094/// ReplaceUsesOfGEPInst - Replace all uses of RepPtr with inserted code to 3095/// compute its value. The RepPtr value can be computed with Ptr+PtrOffset. One 3096/// trivial way of doing this would be to evaluate Ptr+PtrOffset in RepPtr's 3097/// block, then ReplaceAllUsesWith'ing everything. However, we would prefer to 3098/// sink PtrOffset into user blocks where doing so will likely allow us to fold 3099/// the constant add into a load or store instruction. Additionally, if a user 3100/// is a pointer-pointer cast, we look through it to find its users. 3101static void ReplaceUsesOfGEPInst(Instruction *RepPtr, Value *Ptr, 3102 Constant *PtrOffset, BasicBlock *DefBB, 3103 GetElementPtrInst *GEPI, 3104 std::map<BasicBlock*,Instruction*> &InsertedExprs) { 3105 while (!RepPtr->use_empty()) { 3106 Instruction *User = cast<Instruction>(RepPtr->use_back()); 3107 3108 // If the user is a Pointer-Pointer cast, recurse. 3109 if (isa<CastInst>(User) && isa<PointerType>(User->getType())) { 3110 ReplaceUsesOfGEPInst(User, Ptr, PtrOffset, DefBB, GEPI, InsertedExprs); 3111 3112 // Drop the use of RepPtr. The cast is dead. Don't delete it now, else we 3113 // could invalidate an iterator. 3114 User->setOperand(0, UndefValue::get(RepPtr->getType())); 3115 continue; 3116 } 3117 3118 // If this is a load of the pointer, or a store through the pointer, emit 3119 // the increment into the load/store block. 3120 Instruction *NewVal; 3121 if (isa<LoadInst>(User) || 3122 (isa<StoreInst>(User) && User->getOperand(0) != RepPtr)) { 3123 NewVal = InsertGEPComputeCode(InsertedExprs[User->getParent()], 3124 User->getParent(), GEPI, 3125 Ptr, PtrOffset); 3126 } else { 3127 // If this use is not foldable into the addressing mode, use a version 3128 // emitted in the GEP block. 3129 NewVal = InsertGEPComputeCode(InsertedExprs[DefBB], DefBB, GEPI, 3130 Ptr, PtrOffset); 3131 } 3132 3133 if (GEPI->getType() != RepPtr->getType()) { 3134 BasicBlock::iterator IP = NewVal; 3135 ++IP; 3136 NewVal = new CastInst(NewVal, RepPtr->getType(), "", IP); 3137 } 3138 User->replaceUsesOfWith(RepPtr, NewVal); 3139 } 3140} 3141 3142 3143/// OptimizeGEPExpression - Since we are doing basic-block-at-a-time instruction 3144/// selection, we want to be a bit careful about some things. In particular, if 3145/// we have a GEP instruction that is used in a different block than it is 3146/// defined, the addressing expression of the GEP cannot be folded into loads or 3147/// stores that use it. In this case, decompose the GEP and move constant 3148/// indices into blocks that use it. 3149static bool OptimizeGEPExpression(GetElementPtrInst *GEPI, 3150 const TargetData *TD) { 3151 // If this GEP is only used inside the block it is defined in, there is no 3152 // need to rewrite it. 3153 bool isUsedOutsideDefBB = false; 3154 BasicBlock *DefBB = GEPI->getParent(); 3155 for (Value::use_iterator UI = GEPI->use_begin(), E = GEPI->use_end(); 3156 UI != E; ++UI) { 3157 if (cast<Instruction>(*UI)->getParent() != DefBB) { 3158 isUsedOutsideDefBB = true; 3159 break; 3160 } 3161 } 3162 if (!isUsedOutsideDefBB) return false; 3163 3164 // If this GEP has no non-zero constant indices, there is nothing we can do, 3165 // ignore it. 3166 bool hasConstantIndex = false; 3167 bool hasVariableIndex = false; 3168 for (GetElementPtrInst::op_iterator OI = GEPI->op_begin()+1, 3169 E = GEPI->op_end(); OI != E; ++OI) { 3170 if (ConstantInt *CI = dyn_cast<ConstantInt>(*OI)) { 3171 if (CI->getZExtValue()) { 3172 hasConstantIndex = true; 3173 break; 3174 } 3175 } else { 3176 hasVariableIndex = true; 3177 } 3178 } 3179 3180 // If this is a "GEP X, 0, 0, 0", turn this into a cast. 3181 if (!hasConstantIndex && !hasVariableIndex) { 3182 Value *NC = new CastInst(GEPI->getOperand(0), GEPI->getType(), 3183 GEPI->getName(), GEPI); 3184 GEPI->replaceAllUsesWith(NC); 3185 GEPI->eraseFromParent(); 3186 return true; 3187 } 3188 3189 // If this is a GEP &Alloca, 0, 0, forward subst the frame index into uses. 3190 if (!hasConstantIndex && !isa<AllocaInst>(GEPI->getOperand(0))) 3191 return false; 3192 3193 // Otherwise, decompose the GEP instruction into multiplies and adds. Sum the 3194 // constant offset (which we now know is non-zero) and deal with it later. 3195 uint64_t ConstantOffset = 0; 3196 const Type *UIntPtrTy = TD->getIntPtrType(); 3197 Value *Ptr = new CastInst(GEPI->getOperand(0), UIntPtrTy, "", GEPI); 3198 const Type *Ty = GEPI->getOperand(0)->getType(); 3199 3200 for (GetElementPtrInst::op_iterator OI = GEPI->op_begin()+1, 3201 E = GEPI->op_end(); OI != E; ++OI) { 3202 Value *Idx = *OI; 3203 if (const StructType *StTy = dyn_cast<StructType>(Ty)) { 3204 unsigned Field = cast<ConstantInt>(Idx)->getZExtValue(); 3205 if (Field) 3206 ConstantOffset += TD->getStructLayout(StTy)->MemberOffsets[Field]; 3207 Ty = StTy->getElementType(Field); 3208 } else { 3209 Ty = cast<SequentialType>(Ty)->getElementType(); 3210 3211 // Handle constant subscripts. 3212 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) { 3213 if (CI->getZExtValue() == 0) continue; 3214 if (CI->getType()->isSigned()) 3215 ConstantOffset += (int64_t)TD->getTypeSize(Ty)*CI->getSExtValue(); 3216 else 3217 ConstantOffset += TD->getTypeSize(Ty)*CI->getZExtValue(); 3218 continue; 3219 } 3220 3221 // Ptr = Ptr + Idx * ElementSize; 3222 3223 // Cast Idx to UIntPtrTy if needed. 3224 Idx = new CastInst(Idx, UIntPtrTy, "", GEPI); 3225 3226 uint64_t ElementSize = TD->getTypeSize(Ty); 3227 // Mask off bits that should not be set. 3228 ElementSize &= ~0ULL >> (64-UIntPtrTy->getPrimitiveSizeInBits()); 3229 Constant *SizeCst = ConstantInt::get(UIntPtrTy, ElementSize); 3230 3231 // Multiply by the element size and add to the base. 3232 Idx = BinaryOperator::createMul(Idx, SizeCst, "", GEPI); 3233 Ptr = BinaryOperator::createAdd(Ptr, Idx, "", GEPI); 3234 } 3235 } 3236 3237 // Make sure that the offset fits in uintptr_t. 3238 ConstantOffset &= ~0ULL >> (64-UIntPtrTy->getPrimitiveSizeInBits()); 3239 Constant *PtrOffset = ConstantInt::get(UIntPtrTy, ConstantOffset); 3240 3241 // Okay, we have now emitted all of the variable index parts to the BB that 3242 // the GEP is defined in. Loop over all of the using instructions, inserting 3243 // an "add Ptr, ConstantOffset" into each block that uses it and update the 3244 // instruction to use the newly computed value, making GEPI dead. When the 3245 // user is a load or store instruction address, we emit the add into the user 3246 // block, otherwise we use a canonical version right next to the gep (these 3247 // won't be foldable as addresses, so we might as well share the computation). 3248 3249 std::map<BasicBlock*,Instruction*> InsertedExprs; 3250 ReplaceUsesOfGEPInst(GEPI, Ptr, PtrOffset, DefBB, GEPI, InsertedExprs); 3251 3252 // Finally, the GEP is dead, remove it. 3253 GEPI->eraseFromParent(); 3254 3255 return true; 3256} 3257 3258/// SplitCritEdgesForPHIConstants - If this block has any PHI nodes with 3259/// constant operands, and if any of the edges feeding the PHI node are 3260/// critical, split them so that the assignments of a constant to a register 3261/// will not be executed on a path that isn't relevant. 3262void SelectionDAGISel::SplitCritEdgesForPHIConstants(BasicBlock *BB) { 3263 // The most common case is that this is a PHI node with two incoming 3264 // successors handle this case efficiently, because it is simple. 3265 PHINode *PN = cast<PHINode>(BB->begin()); 3266 if (PN->getNumIncomingValues() == 2) { 3267 // If neither edge is critical, we never need to split. 3268 if (PN->getIncomingBlock(0)->getTerminator()->getNumSuccessors() == 1 && 3269 PN->getIncomingBlock(1)->getTerminator()->getNumSuccessors() == 1) 3270 return; 3271 3272 BasicBlock::iterator BBI = BB->begin(); 3273 while ((PN = dyn_cast<PHINode>(BBI++))) { 3274 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 3275 if (isa<Constant>(PN->getIncomingValue(i))) 3276 SplitCriticalEdge(PN->getIncomingBlock(i), BB); 3277 } 3278 return; 3279 } 3280 3281 // Otherwise, things are a bit trickier. 3282 3283 // BE SMART HERE. 3284 3285 BasicBlock::iterator BBI = BB->begin(); 3286 while ((PN = dyn_cast<PHINode>(BBI++))) { 3287 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 3288 if (isa<Constant>(PN->getIncomingValue(i))) 3289 SplitCriticalEdge(PN->getIncomingBlock(i), BB); 3290 } 3291} 3292 3293 3294bool SelectionDAGISel::runOnFunction(Function &Fn) { 3295 MachineFunction &MF = MachineFunction::construct(&Fn, TLI.getTargetMachine()); 3296 RegMap = MF.getSSARegMap(); 3297 DEBUG(std::cerr << "\n\n\n=== " << Fn.getName() << "\n"); 3298 3299 // First, split all critical edges for PHI nodes with incoming values that are 3300 // constants, this way the load of the constant into a vreg will not be placed 3301 // into MBBs that are used some other way. 3302 // 3303 // In this pass we also look for GEP and cast instructions that are used 3304 // across basic blocks and rewrite them to improve basic-block-at-a-time 3305 // selection. 3306 // 3307 // 3308 bool MadeChange = true; 3309 while (MadeChange) { 3310 MadeChange = false; 3311 for (Function::iterator BB = Fn.begin(), E = Fn.end(); BB != E; ++BB) { 3312 // If this block has any PHI nodes with constant operands, and if any of the 3313 // edges feeding the PHI node are critical, split them. 3314 if (isa<PHINode>(BB->begin())) 3315 SplitCritEdgesForPHIConstants(BB); 3316 3317 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) { 3318 Instruction *I = BBI++; 3319 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) { 3320 MadeChange |= OptimizeGEPExpression(GEPI, TLI.getTargetData()); 3321 } else if (CastInst *CI = dyn_cast<CastInst>(I)) { 3322 // If the source of the cast is a constant, then this should have 3323 // already been constant folded. The only reason NOT to constant fold 3324 // it is if something (e.g. LSR) was careful to place the constant 3325 // evaluation in a block other than then one that uses it (e.g. to hoist 3326 // the address of globals out of a loop). If this is the case, we don't 3327 // want to forward-subst the cast. 3328 if (isa<Constant>(CI->getOperand(0))) 3329 continue; 3330 3331 // If this is a noop copy, sink it into user blocks to reduce the number 3332 // of virtual registers that must be created and coallesced. 3333 MVT::ValueType SrcVT = TLI.getValueType(CI->getOperand(0)->getType()); 3334 MVT::ValueType DstVT = TLI.getValueType(CI->getType()); 3335 3336 // This is an fp<->int conversion? 3337 if (MVT::isInteger(SrcVT) != MVT::isInteger(DstVT)) 3338 continue; 3339 3340 // If this is an extension, it will be a zero or sign extension, which 3341 // isn't a noop. 3342 if (SrcVT < DstVT) continue; 3343 3344 // If these values will be promoted, find out what they will be promoted 3345 // to. This helps us consider truncates on PPC as noop copies when they 3346 // are. 3347 if (TLI.getTypeAction(SrcVT) == TargetLowering::Promote) 3348 SrcVT = TLI.getTypeToTransformTo(SrcVT); 3349 if (TLI.getTypeAction(DstVT) == TargetLowering::Promote) 3350 DstVT = TLI.getTypeToTransformTo(DstVT); 3351 3352 // If, after promotion, these are the same types, this is a noop copy. 3353 if (SrcVT == DstVT) 3354 MadeChange |= OptimizeNoopCopyExpression(CI); 3355 } 3356 } 3357 } 3358 } 3359 3360 FunctionLoweringInfo FuncInfo(TLI, Fn, MF); 3361 3362 for (Function::iterator I = Fn.begin(), E = Fn.end(); I != E; ++I) 3363 SelectBasicBlock(I, MF, FuncInfo); 3364 3365 return true; 3366} 3367 3368 3369SDOperand SelectionDAGISel:: 3370CopyValueToVirtualRegister(SelectionDAGLowering &SDL, Value *V, unsigned Reg) { 3371 SDOperand Op = SDL.getValue(V); 3372 assert((Op.getOpcode() != ISD::CopyFromReg || 3373 cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) && 3374 "Copy from a reg to the same reg!"); 3375 3376 // If this type is not legal, we must make sure to not create an invalid 3377 // register use. 3378 MVT::ValueType SrcVT = Op.getValueType(); 3379 MVT::ValueType DestVT = TLI.getTypeToTransformTo(SrcVT); 3380 SelectionDAG &DAG = SDL.DAG; 3381 if (SrcVT == DestVT) { 3382 return DAG.getCopyToReg(SDL.getRoot(), Reg, Op); 3383 } else if (SrcVT == MVT::Vector) { 3384 // Handle copies from generic vectors to registers. 3385 MVT::ValueType PTyElementVT, PTyLegalElementVT; 3386 unsigned NE = TLI.getPackedTypeBreakdown(cast<PackedType>(V->getType()), 3387 PTyElementVT, PTyLegalElementVT); 3388 3389 // Insert a VBIT_CONVERT of the input vector to a "N x PTyElementVT" 3390 // MVT::Vector type. 3391 Op = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, Op, 3392 DAG.getConstant(NE, MVT::i32), 3393 DAG.getValueType(PTyElementVT)); 3394 3395 // Loop over all of the elements of the resultant vector, 3396 // VEXTRACT_VECTOR_ELT'ing them, converting them to PTyLegalElementVT, then 3397 // copying them into output registers. 3398 SmallVector<SDOperand, 8> OutChains; 3399 SDOperand Root = SDL.getRoot(); 3400 for (unsigned i = 0; i != NE; ++i) { 3401 SDOperand Elt = DAG.getNode(ISD::VEXTRACT_VECTOR_ELT, PTyElementVT, 3402 Op, DAG.getConstant(i, TLI.getPointerTy())); 3403 if (PTyElementVT == PTyLegalElementVT) { 3404 // Elements are legal. 3405 OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Elt)); 3406 } else if (PTyLegalElementVT > PTyElementVT) { 3407 // Elements are promoted. 3408 if (MVT::isFloatingPoint(PTyLegalElementVT)) 3409 Elt = DAG.getNode(ISD::FP_EXTEND, PTyLegalElementVT, Elt); 3410 else 3411 Elt = DAG.getNode(ISD::ANY_EXTEND, PTyLegalElementVT, Elt); 3412 OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Elt)); 3413 } else { 3414 // Elements are expanded. 3415 // The src value is expanded into multiple registers. 3416 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, PTyLegalElementVT, 3417 Elt, DAG.getConstant(0, TLI.getPointerTy())); 3418 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, PTyLegalElementVT, 3419 Elt, DAG.getConstant(1, TLI.getPointerTy())); 3420 OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Lo)); 3421 OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Hi)); 3422 } 3423 } 3424 return DAG.getNode(ISD::TokenFactor, MVT::Other, 3425 &OutChains[0], OutChains.size()); 3426 } else if (SrcVT < DestVT) { 3427 // The src value is promoted to the register. 3428 if (MVT::isFloatingPoint(SrcVT)) 3429 Op = DAG.getNode(ISD::FP_EXTEND, DestVT, Op); 3430 else 3431 Op = DAG.getNode(ISD::ANY_EXTEND, DestVT, Op); 3432 return DAG.getCopyToReg(SDL.getRoot(), Reg, Op); 3433 } else { 3434 // The src value is expanded into multiple registers. 3435 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DestVT, 3436 Op, DAG.getConstant(0, TLI.getPointerTy())); 3437 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DestVT, 3438 Op, DAG.getConstant(1, TLI.getPointerTy())); 3439 Op = DAG.getCopyToReg(SDL.getRoot(), Reg, Lo); 3440 return DAG.getCopyToReg(Op, Reg+1, Hi); 3441 } 3442} 3443 3444void SelectionDAGISel:: 3445LowerArguments(BasicBlock *BB, SelectionDAGLowering &SDL, 3446 std::vector<SDOperand> &UnorderedChains) { 3447 // If this is the entry block, emit arguments. 3448 Function &F = *BB->getParent(); 3449 FunctionLoweringInfo &FuncInfo = SDL.FuncInfo; 3450 SDOperand OldRoot = SDL.DAG.getRoot(); 3451 std::vector<SDOperand> Args = TLI.LowerArguments(F, SDL.DAG); 3452 3453 unsigned a = 0; 3454 for (Function::arg_iterator AI = F.arg_begin(), E = F.arg_end(); 3455 AI != E; ++AI, ++a) 3456 if (!AI->use_empty()) { 3457 SDL.setValue(AI, Args[a]); 3458 3459 // If this argument is live outside of the entry block, insert a copy from 3460 // whereever we got it to the vreg that other BB's will reference it as. 3461 if (FuncInfo.ValueMap.count(AI)) { 3462 SDOperand Copy = 3463 CopyValueToVirtualRegister(SDL, AI, FuncInfo.ValueMap[AI]); 3464 UnorderedChains.push_back(Copy); 3465 } 3466 } 3467 3468 // Finally, if the target has anything special to do, allow it to do so. 3469 // FIXME: this should insert code into the DAG! 3470 EmitFunctionEntryCode(F, SDL.DAG.getMachineFunction()); 3471} 3472 3473void SelectionDAGISel::BuildSelectionDAG(SelectionDAG &DAG, BasicBlock *LLVMBB, 3474 std::vector<std::pair<MachineInstr*, unsigned> > &PHINodesToUpdate, 3475 FunctionLoweringInfo &FuncInfo) { 3476 SelectionDAGLowering SDL(DAG, TLI, FuncInfo); 3477 3478 std::vector<SDOperand> UnorderedChains; 3479 3480 // Lower any arguments needed in this block if this is the entry block. 3481 if (LLVMBB == &LLVMBB->getParent()->front()) 3482 LowerArguments(LLVMBB, SDL, UnorderedChains); 3483 3484 BB = FuncInfo.MBBMap[LLVMBB]; 3485 SDL.setCurrentBasicBlock(BB); 3486 3487 // Lower all of the non-terminator instructions. 3488 for (BasicBlock::iterator I = LLVMBB->begin(), E = --LLVMBB->end(); 3489 I != E; ++I) 3490 SDL.visit(*I); 3491 3492 // Ensure that all instructions which are used outside of their defining 3493 // blocks are available as virtual registers. 3494 for (BasicBlock::iterator I = LLVMBB->begin(), E = LLVMBB->end(); I != E;++I) 3495 if (!I->use_empty() && !isa<PHINode>(I)) { 3496 std::map<const Value*, unsigned>::iterator VMI =FuncInfo.ValueMap.find(I); 3497 if (VMI != FuncInfo.ValueMap.end()) 3498 UnorderedChains.push_back( 3499 CopyValueToVirtualRegister(SDL, I, VMI->second)); 3500 } 3501 3502 // Handle PHI nodes in successor blocks. Emit code into the SelectionDAG to 3503 // ensure constants are generated when needed. Remember the virtual registers 3504 // that need to be added to the Machine PHI nodes as input. We cannot just 3505 // directly add them, because expansion might result in multiple MBB's for one 3506 // BB. As such, the start of the BB might correspond to a different MBB than 3507 // the end. 3508 // 3509 3510 // Emit constants only once even if used by multiple PHI nodes. 3511 std::map<Constant*, unsigned> ConstantsOut; 3512 3513 // Check successor nodes PHI nodes that expect a constant to be available from 3514 // this block. 3515 TerminatorInst *TI = LLVMBB->getTerminator(); 3516 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) { 3517 BasicBlock *SuccBB = TI->getSuccessor(succ); 3518 if (!isa<PHINode>(SuccBB->begin())) continue; 3519 3520 MachineBasicBlock::iterator MBBI = FuncInfo.MBBMap[SuccBB]->begin(); 3521 PHINode *PN; 3522 3523 // At this point we know that there is a 1-1 correspondence between LLVM PHI 3524 // nodes and Machine PHI nodes, but the incoming operands have not been 3525 // emitted yet. 3526 for (BasicBlock::iterator I = SuccBB->begin(); 3527 (PN = dyn_cast<PHINode>(I)); ++I) 3528 if (!PN->use_empty()) { 3529 unsigned Reg; 3530 Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB); 3531 if (Constant *C = dyn_cast<Constant>(PHIOp)) { 3532 unsigned &RegOut = ConstantsOut[C]; 3533 if (RegOut == 0) { 3534 RegOut = FuncInfo.CreateRegForValue(C); 3535 UnorderedChains.push_back( 3536 CopyValueToVirtualRegister(SDL, C, RegOut)); 3537 } 3538 Reg = RegOut; 3539 } else { 3540 Reg = FuncInfo.ValueMap[PHIOp]; 3541 if (Reg == 0) { 3542 assert(isa<AllocaInst>(PHIOp) && 3543 FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) && 3544 "Didn't codegen value into a register!??"); 3545 Reg = FuncInfo.CreateRegForValue(PHIOp); 3546 UnorderedChains.push_back( 3547 CopyValueToVirtualRegister(SDL, PHIOp, Reg)); 3548 } 3549 } 3550 3551 // Remember that this register needs to added to the machine PHI node as 3552 // the input for this MBB. 3553 MVT::ValueType VT = TLI.getValueType(PN->getType()); 3554 unsigned NumElements; 3555 if (VT != MVT::Vector) 3556 NumElements = TLI.getNumElements(VT); 3557 else { 3558 MVT::ValueType VT1,VT2; 3559 NumElements = 3560 TLI.getPackedTypeBreakdown(cast<PackedType>(PN->getType()), 3561 VT1, VT2); 3562 } 3563 for (unsigned i = 0, e = NumElements; i != e; ++i) 3564 PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg+i)); 3565 } 3566 } 3567 ConstantsOut.clear(); 3568 3569 // Turn all of the unordered chains into one factored node. 3570 if (!UnorderedChains.empty()) { 3571 SDOperand Root = SDL.getRoot(); 3572 if (Root.getOpcode() != ISD::EntryToken) { 3573 unsigned i = 0, e = UnorderedChains.size(); 3574 for (; i != e; ++i) { 3575 assert(UnorderedChains[i].Val->getNumOperands() > 1); 3576 if (UnorderedChains[i].Val->getOperand(0) == Root) 3577 break; // Don't add the root if we already indirectly depend on it. 3578 } 3579 3580 if (i == e) 3581 UnorderedChains.push_back(Root); 3582 } 3583 DAG.setRoot(DAG.getNode(ISD::TokenFactor, MVT::Other, 3584 &UnorderedChains[0], UnorderedChains.size())); 3585 } 3586 3587 // Lower the terminator after the copies are emitted. 3588 SDL.visit(*LLVMBB->getTerminator()); 3589 3590 // Copy over any CaseBlock records that may now exist due to SwitchInst 3591 // lowering, as well as any jump table information. 3592 SwitchCases.clear(); 3593 SwitchCases = SDL.SwitchCases; 3594 JT = SDL.JT; 3595 3596 // Make sure the root of the DAG is up-to-date. 3597 DAG.setRoot(SDL.getRoot()); 3598} 3599 3600void SelectionDAGISel::CodeGenAndEmitDAG(SelectionDAG &DAG) { 3601 // Get alias analysis for load/store combining. 3602 AliasAnalysis &AA = getAnalysis<AliasAnalysis>(); 3603 3604 // Run the DAG combiner in pre-legalize mode. 3605 DAG.Combine(false, AA); 3606 3607 DEBUG(std::cerr << "Lowered selection DAG:\n"); 3608 DEBUG(DAG.dump()); 3609 3610 // Second step, hack on the DAG until it only uses operations and types that 3611 // the target supports. 3612 DAG.Legalize(); 3613 3614 DEBUG(std::cerr << "Legalized selection DAG:\n"); 3615 DEBUG(DAG.dump()); 3616 3617 // Run the DAG combiner in post-legalize mode. 3618 DAG.Combine(true, AA); 3619 3620 if (ViewISelDAGs) DAG.viewGraph(); 3621 3622 // Third, instruction select all of the operations to machine code, adding the 3623 // code to the MachineBasicBlock. 3624 InstructionSelectBasicBlock(DAG); 3625 3626 DEBUG(std::cerr << "Selected machine code:\n"); 3627 DEBUG(BB->dump()); 3628} 3629 3630void SelectionDAGISel::SelectBasicBlock(BasicBlock *LLVMBB, MachineFunction &MF, 3631 FunctionLoweringInfo &FuncInfo) { 3632 std::vector<std::pair<MachineInstr*, unsigned> > PHINodesToUpdate; 3633 { 3634 SelectionDAG DAG(TLI, MF, getAnalysisToUpdate<MachineDebugInfo>()); 3635 CurDAG = &DAG; 3636 3637 // First step, lower LLVM code to some DAG. This DAG may use operations and 3638 // types that are not supported by the target. 3639 BuildSelectionDAG(DAG, LLVMBB, PHINodesToUpdate, FuncInfo); 3640 3641 // Second step, emit the lowered DAG as machine code. 3642 CodeGenAndEmitDAG(DAG); 3643 } 3644 3645 // Next, now that we know what the last MBB the LLVM BB expanded is, update 3646 // PHI nodes in successors. 3647 if (SwitchCases.empty() && JT.Reg == 0) { 3648 for (unsigned i = 0, e = PHINodesToUpdate.size(); i != e; ++i) { 3649 MachineInstr *PHI = PHINodesToUpdate[i].first; 3650 assert(PHI->getOpcode() == TargetInstrInfo::PHI && 3651 "This is not a machine PHI node that we are updating!"); 3652 PHI->addRegOperand(PHINodesToUpdate[i].second, false); 3653 PHI->addMachineBasicBlockOperand(BB); 3654 } 3655 return; 3656 } 3657 3658 // If the JumpTable record is filled in, then we need to emit a jump table. 3659 // Updating the PHI nodes is tricky in this case, since we need to determine 3660 // whether the PHI is a successor of the range check MBB or the jump table MBB 3661 if (JT.Reg) { 3662 assert(SwitchCases.empty() && "Cannot have jump table and lowered switch"); 3663 SelectionDAG SDAG(TLI, MF, getAnalysisToUpdate<MachineDebugInfo>()); 3664 CurDAG = &SDAG; 3665 SelectionDAGLowering SDL(SDAG, TLI, FuncInfo); 3666 MachineBasicBlock *RangeBB = BB; 3667 // Set the current basic block to the mbb we wish to insert the code into 3668 BB = JT.MBB; 3669 SDL.setCurrentBasicBlock(BB); 3670 // Emit the code 3671 SDL.visitJumpTable(JT); 3672 SDAG.setRoot(SDL.getRoot()); 3673 CodeGenAndEmitDAG(SDAG); 3674 // Update PHI Nodes 3675 for (unsigned pi = 0, pe = PHINodesToUpdate.size(); pi != pe; ++pi) { 3676 MachineInstr *PHI = PHINodesToUpdate[pi].first; 3677 MachineBasicBlock *PHIBB = PHI->getParent(); 3678 assert(PHI->getOpcode() == TargetInstrInfo::PHI && 3679 "This is not a machine PHI node that we are updating!"); 3680 if (PHIBB == JT.Default) { 3681 PHI->addRegOperand(PHINodesToUpdate[pi].second, false); 3682 PHI->addMachineBasicBlockOperand(RangeBB); 3683 } 3684 if (BB->succ_end() != std::find(BB->succ_begin(),BB->succ_end(), PHIBB)) { 3685 PHI->addRegOperand(PHINodesToUpdate[pi].second, false); 3686 PHI->addMachineBasicBlockOperand(BB); 3687 } 3688 } 3689 return; 3690 } 3691 3692 // If the switch block involved a branch to one of the actual successors, we 3693 // need to update PHI nodes in that block. 3694 for (unsigned i = 0, e = PHINodesToUpdate.size(); i != e; ++i) { 3695 MachineInstr *PHI = PHINodesToUpdate[i].first; 3696 assert(PHI->getOpcode() == TargetInstrInfo::PHI && 3697 "This is not a machine PHI node that we are updating!"); 3698 if (BB->isSuccessor(PHI->getParent())) { 3699 PHI->addRegOperand(PHINodesToUpdate[i].second, false); 3700 PHI->addMachineBasicBlockOperand(BB); 3701 } 3702 } 3703 3704 // If we generated any switch lowering information, build and codegen any 3705 // additional DAGs necessary. 3706 for (unsigned i = 0, e = SwitchCases.size(); i != e; ++i) { 3707 SelectionDAG SDAG(TLI, MF, getAnalysisToUpdate<MachineDebugInfo>()); 3708 CurDAG = &SDAG; 3709 SelectionDAGLowering SDL(SDAG, TLI, FuncInfo); 3710 3711 // Set the current basic block to the mbb we wish to insert the code into 3712 BB = SwitchCases[i].ThisBB; 3713 SDL.setCurrentBasicBlock(BB); 3714 3715 // Emit the code 3716 SDL.visitSwitchCase(SwitchCases[i]); 3717 SDAG.setRoot(SDL.getRoot()); 3718 CodeGenAndEmitDAG(SDAG); 3719 3720 // Handle any PHI nodes in successors of this chunk, as if we were coming 3721 // from the original BB before switch expansion. Note that PHI nodes can 3722 // occur multiple times in PHINodesToUpdate. We have to be very careful to 3723 // handle them the right number of times. 3724 while ((BB = SwitchCases[i].TrueBB)) { // Handle LHS and RHS. 3725 for (MachineBasicBlock::iterator Phi = BB->begin(); 3726 Phi != BB->end() && Phi->getOpcode() == TargetInstrInfo::PHI; ++Phi){ 3727 // This value for this PHI node is recorded in PHINodesToUpdate, get it. 3728 for (unsigned pn = 0; ; ++pn) { 3729 assert(pn != PHINodesToUpdate.size() && "Didn't find PHI entry!"); 3730 if (PHINodesToUpdate[pn].first == Phi) { 3731 Phi->addRegOperand(PHINodesToUpdate[pn].second, false); 3732 Phi->addMachineBasicBlockOperand(SwitchCases[i].ThisBB); 3733 break; 3734 } 3735 } 3736 } 3737 3738 // Don't process RHS if same block as LHS. 3739 if (BB == SwitchCases[i].FalseBB) 3740 SwitchCases[i].FalseBB = 0; 3741 3742 // If we haven't handled the RHS, do so now. Otherwise, we're done. 3743 SwitchCases[i].TrueBB = SwitchCases[i].FalseBB; 3744 SwitchCases[i].FalseBB = 0; 3745 } 3746 assert(SwitchCases[i].TrueBB == 0 && SwitchCases[i].FalseBB == 0); 3747 } 3748} 3749 3750 3751//===----------------------------------------------------------------------===// 3752/// ScheduleAndEmitDAG - Pick a safe ordering and emit instructions for each 3753/// target node in the graph. 3754void SelectionDAGISel::ScheduleAndEmitDAG(SelectionDAG &DAG) { 3755 if (ViewSchedDAGs) DAG.viewGraph(); 3756 3757 RegisterScheduler::FunctionPassCtor Ctor = RegisterScheduler::getDefault(); 3758 3759 if (!Ctor) { 3760 Ctor = ISHeuristic; 3761 RegisterScheduler::setDefault(Ctor); 3762 } 3763 3764 ScheduleDAG *SL = Ctor(this, &DAG, BB); 3765 BB = SL->Run(); 3766 delete SL; 3767} 3768 3769 3770HazardRecognizer *SelectionDAGISel::CreateTargetHazardRecognizer() { 3771 return new HazardRecognizer(); 3772} 3773 3774//===----------------------------------------------------------------------===// 3775// Helper functions used by the generated instruction selector. 3776//===----------------------------------------------------------------------===// 3777// Calls to these methods are generated by tblgen. 3778 3779/// CheckAndMask - The isel is trying to match something like (and X, 255). If 3780/// the dag combiner simplified the 255, we still want to match. RHS is the 3781/// actual value in the DAG on the RHS of an AND, and DesiredMaskS is the value 3782/// specified in the .td file (e.g. 255). 3783bool SelectionDAGISel::CheckAndMask(SDOperand LHS, ConstantSDNode *RHS, 3784 int64_t DesiredMaskS) { 3785 uint64_t ActualMask = RHS->getValue(); 3786 uint64_t DesiredMask =DesiredMaskS & MVT::getIntVTBitMask(LHS.getValueType()); 3787 3788 // If the actual mask exactly matches, success! 3789 if (ActualMask == DesiredMask) 3790 return true; 3791 3792 // If the actual AND mask is allowing unallowed bits, this doesn't match. 3793 if (ActualMask & ~DesiredMask) 3794 return false; 3795 3796 // Otherwise, the DAG Combiner may have proven that the value coming in is 3797 // either already zero or is not demanded. Check for known zero input bits. 3798 uint64_t NeededMask = DesiredMask & ~ActualMask; 3799 if (getTargetLowering().MaskedValueIsZero(LHS, NeededMask)) 3800 return true; 3801 3802 // TODO: check to see if missing bits are just not demanded. 3803 3804 // Otherwise, this pattern doesn't match. 3805 return false; 3806} 3807 3808/// CheckOrMask - The isel is trying to match something like (or X, 255). If 3809/// the dag combiner simplified the 255, we still want to match. RHS is the 3810/// actual value in the DAG on the RHS of an OR, and DesiredMaskS is the value 3811/// specified in the .td file (e.g. 255). 3812bool SelectionDAGISel::CheckOrMask(SDOperand LHS, ConstantSDNode *RHS, 3813 int64_t DesiredMaskS) { 3814 uint64_t ActualMask = RHS->getValue(); 3815 uint64_t DesiredMask =DesiredMaskS & MVT::getIntVTBitMask(LHS.getValueType()); 3816 3817 // If the actual mask exactly matches, success! 3818 if (ActualMask == DesiredMask) 3819 return true; 3820 3821 // If the actual AND mask is allowing unallowed bits, this doesn't match. 3822 if (ActualMask & ~DesiredMask) 3823 return false; 3824 3825 // Otherwise, the DAG Combiner may have proven that the value coming in is 3826 // either already zero or is not demanded. Check for known zero input bits. 3827 uint64_t NeededMask = DesiredMask & ~ActualMask; 3828 3829 uint64_t KnownZero, KnownOne; 3830 getTargetLowering().ComputeMaskedBits(LHS, NeededMask, KnownZero, KnownOne); 3831 3832 // If all the missing bits in the or are already known to be set, match! 3833 if ((NeededMask & KnownOne) == NeededMask) 3834 return true; 3835 3836 // TODO: check to see if missing bits are just not demanded. 3837 3838 // Otherwise, this pattern doesn't match. 3839 return false; 3840} 3841 3842 3843/// SelectInlineAsmMemoryOperands - Calls to this are automatically generated 3844/// by tblgen. Others should not call it. 3845void SelectionDAGISel:: 3846SelectInlineAsmMemoryOperands(std::vector<SDOperand> &Ops, SelectionDAG &DAG) { 3847 std::vector<SDOperand> InOps; 3848 std::swap(InOps, Ops); 3849 3850 Ops.push_back(InOps[0]); // input chain. 3851 Ops.push_back(InOps[1]); // input asm string. 3852 3853 unsigned i = 2, e = InOps.size(); 3854 if (InOps[e-1].getValueType() == MVT::Flag) 3855 --e; // Don't process a flag operand if it is here. 3856 3857 while (i != e) { 3858 unsigned Flags = cast<ConstantSDNode>(InOps[i])->getValue(); 3859 if ((Flags & 7) != 4 /*MEM*/) { 3860 // Just skip over this operand, copying the operands verbatim. 3861 Ops.insert(Ops.end(), InOps.begin()+i, InOps.begin()+i+(Flags >> 3) + 1); 3862 i += (Flags >> 3) + 1; 3863 } else { 3864 assert((Flags >> 3) == 1 && "Memory operand with multiple values?"); 3865 // Otherwise, this is a memory operand. Ask the target to select it. 3866 std::vector<SDOperand> SelOps; 3867 if (SelectInlineAsmMemoryOperand(InOps[i+1], 'm', SelOps, DAG)) { 3868 std::cerr << "Could not match memory address. Inline asm failure!\n"; 3869 exit(1); 3870 } 3871 3872 // Add this to the output node. 3873 Ops.push_back(DAG.getConstant(4/*MEM*/ | (SelOps.size() << 3), MVT::i32)); 3874 Ops.insert(Ops.end(), SelOps.begin(), SelOps.end()); 3875 i += 2; 3876 } 3877 } 3878 3879 // Add the flag input back if present. 3880 if (e != InOps.size()) 3881 Ops.push_back(InOps.back()); 3882} 3883