SelectionDAGISel.cpp revision 6b6b6ef1677fa71b1072c2911b4c1f9524a558c9
1//===-- SelectionDAGISel.cpp - Implement the SelectionDAGISel class -------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file was developed by the LLVM research group and is distributed under 6// the University of Illinois Open Source License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This implements the SelectionDAGISel class. 11// 12//===----------------------------------------------------------------------===// 13 14#define DEBUG_TYPE "isel" 15#include "llvm/Analysis/AliasAnalysis.h" 16#include "llvm/CodeGen/SelectionDAGISel.h" 17#include "llvm/CodeGen/ScheduleDAG.h" 18#include "llvm/CallingConv.h" 19#include "llvm/Constants.h" 20#include "llvm/DerivedTypes.h" 21#include "llvm/Function.h" 22#include "llvm/GlobalVariable.h" 23#include "llvm/InlineAsm.h" 24#include "llvm/Instructions.h" 25#include "llvm/Intrinsics.h" 26#include "llvm/IntrinsicInst.h" 27#include "llvm/CodeGen/MachineDebugInfo.h" 28#include "llvm/CodeGen/MachineFunction.h" 29#include "llvm/CodeGen/MachineFrameInfo.h" 30#include "llvm/CodeGen/MachineJumpTableInfo.h" 31#include "llvm/CodeGen/MachineInstrBuilder.h" 32#include "llvm/CodeGen/SchedulerRegistry.h" 33#include "llvm/CodeGen/SelectionDAG.h" 34#include "llvm/CodeGen/SSARegMap.h" 35#include "llvm/Target/MRegisterInfo.h" 36#include "llvm/Target/TargetAsmInfo.h" 37#include "llvm/Target/TargetData.h" 38#include "llvm/Target/TargetFrameInfo.h" 39#include "llvm/Target/TargetInstrInfo.h" 40#include "llvm/Target/TargetLowering.h" 41#include "llvm/Target/TargetMachine.h" 42#include "llvm/Target/TargetOptions.h" 43#include "llvm/Transforms/Utils/BasicBlockUtils.h" 44#include "llvm/Support/MathExtras.h" 45#include "llvm/Support/Debug.h" 46#include "llvm/Support/Compiler.h" 47#include <algorithm> 48using namespace llvm; 49 50#ifndef NDEBUG 51static cl::opt<bool> 52ViewISelDAGs("view-isel-dags", cl::Hidden, 53 cl::desc("Pop up a window to show isel dags as they are selected")); 54static cl::opt<bool> 55ViewSchedDAGs("view-sched-dags", cl::Hidden, 56 cl::desc("Pop up a window to show sched dags as they are processed")); 57#else 58static const bool ViewISelDAGs = 0, ViewSchedDAGs = 0; 59#endif 60 61 62//===---------------------------------------------------------------------===// 63/// 64/// RegisterScheduler class - Track the registration of instruction schedulers. 65/// 66//===---------------------------------------------------------------------===// 67MachinePassRegistry RegisterScheduler::Registry; 68 69//===---------------------------------------------------------------------===// 70/// 71/// ISHeuristic command line option for instruction schedulers. 72/// 73//===---------------------------------------------------------------------===// 74namespace { 75 cl::opt<RegisterScheduler::FunctionPassCtor, false, 76 RegisterPassParser<RegisterScheduler> > 77 ISHeuristic("sched", 78 cl::init(&createDefaultScheduler), 79 cl::desc("Instruction schedulers available:")); 80 81 static RegisterScheduler 82 defaultListDAGScheduler("default", " Best scheduler for the target", 83 createDefaultScheduler); 84} // namespace 85 86namespace { 87 /// RegsForValue - This struct represents the physical registers that a 88 /// particular value is assigned and the type information about the value. 89 /// This is needed because values can be promoted into larger registers and 90 /// expanded into multiple smaller registers than the value. 91 struct VISIBILITY_HIDDEN RegsForValue { 92 /// Regs - This list hold the register (for legal and promoted values) 93 /// or register set (for expanded values) that the value should be assigned 94 /// to. 95 std::vector<unsigned> Regs; 96 97 /// RegVT - The value type of each register. 98 /// 99 MVT::ValueType RegVT; 100 101 /// ValueVT - The value type of the LLVM value, which may be promoted from 102 /// RegVT or made from merging the two expanded parts. 103 MVT::ValueType ValueVT; 104 105 RegsForValue() : RegVT(MVT::Other), ValueVT(MVT::Other) {} 106 107 RegsForValue(unsigned Reg, MVT::ValueType regvt, MVT::ValueType valuevt) 108 : RegVT(regvt), ValueVT(valuevt) { 109 Regs.push_back(Reg); 110 } 111 RegsForValue(const std::vector<unsigned> ®s, 112 MVT::ValueType regvt, MVT::ValueType valuevt) 113 : Regs(regs), RegVT(regvt), ValueVT(valuevt) { 114 } 115 116 /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from 117 /// this value and returns the result as a ValueVT value. This uses 118 /// Chain/Flag as the input and updates them for the output Chain/Flag. 119 SDOperand getCopyFromRegs(SelectionDAG &DAG, 120 SDOperand &Chain, SDOperand &Flag) const; 121 122 /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the 123 /// specified value into the registers specified by this object. This uses 124 /// Chain/Flag as the input and updates them for the output Chain/Flag. 125 void getCopyToRegs(SDOperand Val, SelectionDAG &DAG, 126 SDOperand &Chain, SDOperand &Flag, 127 MVT::ValueType PtrVT) const; 128 129 /// AddInlineAsmOperands - Add this value to the specified inlineasm node 130 /// operand list. This adds the code marker and includes the number of 131 /// values added into it. 132 void AddInlineAsmOperands(unsigned Code, SelectionDAG &DAG, 133 std::vector<SDOperand> &Ops) const; 134 }; 135} 136 137namespace llvm { 138 //===--------------------------------------------------------------------===// 139 /// createDefaultScheduler - This creates an instruction scheduler appropriate 140 /// for the target. 141 ScheduleDAG* createDefaultScheduler(SelectionDAGISel *IS, 142 SelectionDAG *DAG, 143 MachineBasicBlock *BB) { 144 TargetLowering &TLI = IS->getTargetLowering(); 145 146 if (TLI.getSchedulingPreference() == TargetLowering::SchedulingForLatency) { 147 return createTDListDAGScheduler(IS, DAG, BB); 148 } else { 149 assert(TLI.getSchedulingPreference() == 150 TargetLowering::SchedulingForRegPressure && "Unknown sched type!"); 151 return createBURRListDAGScheduler(IS, DAG, BB); 152 } 153 } 154 155 156 //===--------------------------------------------------------------------===// 157 /// FunctionLoweringInfo - This contains information that is global to a 158 /// function that is used when lowering a region of the function. 159 class FunctionLoweringInfo { 160 public: 161 TargetLowering &TLI; 162 Function &Fn; 163 MachineFunction &MF; 164 SSARegMap *RegMap; 165 166 FunctionLoweringInfo(TargetLowering &TLI, Function &Fn,MachineFunction &MF); 167 168 /// MBBMap - A mapping from LLVM basic blocks to their machine code entry. 169 std::map<const BasicBlock*, MachineBasicBlock *> MBBMap; 170 171 /// ValueMap - Since we emit code for the function a basic block at a time, 172 /// we must remember which virtual registers hold the values for 173 /// cross-basic-block values. 174 std::map<const Value*, unsigned> ValueMap; 175 176 /// StaticAllocaMap - Keep track of frame indices for fixed sized allocas in 177 /// the entry block. This allows the allocas to be efficiently referenced 178 /// anywhere in the function. 179 std::map<const AllocaInst*, int> StaticAllocaMap; 180 181 unsigned MakeReg(MVT::ValueType VT) { 182 return RegMap->createVirtualRegister(TLI.getRegClassFor(VT)); 183 } 184 185 /// isExportedInst - Return true if the specified value is an instruction 186 /// exported from its block. 187 bool isExportedInst(const Value *V) { 188 return ValueMap.count(V); 189 } 190 191 unsigned CreateRegForValue(const Value *V); 192 193 unsigned InitializeRegForValue(const Value *V) { 194 unsigned &R = ValueMap[V]; 195 assert(R == 0 && "Already initialized this value register!"); 196 return R = CreateRegForValue(V); 197 } 198 }; 199} 200 201/// isUsedOutsideOfDefiningBlock - Return true if this instruction is used by 202/// PHI nodes or outside of the basic block that defines it, or used by a 203/// switch instruction, which may expand to multiple basic blocks. 204static bool isUsedOutsideOfDefiningBlock(Instruction *I) { 205 if (isa<PHINode>(I)) return true; 206 BasicBlock *BB = I->getParent(); 207 for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E; ++UI) 208 if (cast<Instruction>(*UI)->getParent() != BB || isa<PHINode>(*UI) || 209 // FIXME: Remove switchinst special case. 210 isa<SwitchInst>(*UI)) 211 return true; 212 return false; 213} 214 215/// isOnlyUsedInEntryBlock - If the specified argument is only used in the 216/// entry block, return true. This includes arguments used by switches, since 217/// the switch may expand into multiple basic blocks. 218static bool isOnlyUsedInEntryBlock(Argument *A) { 219 BasicBlock *Entry = A->getParent()->begin(); 220 for (Value::use_iterator UI = A->use_begin(), E = A->use_end(); UI != E; ++UI) 221 if (cast<Instruction>(*UI)->getParent() != Entry || isa<SwitchInst>(*UI)) 222 return false; // Use not in entry block. 223 return true; 224} 225 226FunctionLoweringInfo::FunctionLoweringInfo(TargetLowering &tli, 227 Function &fn, MachineFunction &mf) 228 : TLI(tli), Fn(fn), MF(mf), RegMap(MF.getSSARegMap()) { 229 230 // Create a vreg for each argument register that is not dead and is used 231 // outside of the entry block for the function. 232 for (Function::arg_iterator AI = Fn.arg_begin(), E = Fn.arg_end(); 233 AI != E; ++AI) 234 if (!isOnlyUsedInEntryBlock(AI)) 235 InitializeRegForValue(AI); 236 237 // Initialize the mapping of values to registers. This is only set up for 238 // instruction values that are used outside of the block that defines 239 // them. 240 Function::iterator BB = Fn.begin(), EB = Fn.end(); 241 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) 242 if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) 243 if (ConstantInt *CUI = dyn_cast<ConstantInt>(AI->getArraySize())) { 244 const Type *Ty = AI->getAllocatedType(); 245 uint64_t TySize = TLI.getTargetData()->getTypeSize(Ty); 246 unsigned Align = 247 std::max((unsigned)TLI.getTargetData()->getTypeAlignment(Ty), 248 AI->getAlignment()); 249 250 // If the alignment of the value is smaller than the size of the 251 // value, and if the size of the value is particularly small 252 // (<= 8 bytes), round up to the size of the value for potentially 253 // better performance. 254 // 255 // FIXME: This could be made better with a preferred alignment hook in 256 // TargetData. It serves primarily to 8-byte align doubles for X86. 257 if (Align < TySize && TySize <= 8) Align = TySize; 258 TySize *= CUI->getZExtValue(); // Get total allocated size. 259 if (TySize == 0) TySize = 1; // Don't create zero-sized stack objects. 260 StaticAllocaMap[AI] = 261 MF.getFrameInfo()->CreateStackObject((unsigned)TySize, Align); 262 } 263 264 for (; BB != EB; ++BB) 265 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) 266 if (!I->use_empty() && isUsedOutsideOfDefiningBlock(I)) 267 if (!isa<AllocaInst>(I) || 268 !StaticAllocaMap.count(cast<AllocaInst>(I))) 269 InitializeRegForValue(I); 270 271 // Create an initial MachineBasicBlock for each LLVM BasicBlock in F. This 272 // also creates the initial PHI MachineInstrs, though none of the input 273 // operands are populated. 274 for (BB = Fn.begin(), EB = Fn.end(); BB != EB; ++BB) { 275 MachineBasicBlock *MBB = new MachineBasicBlock(BB); 276 MBBMap[BB] = MBB; 277 MF.getBasicBlockList().push_back(MBB); 278 279 // Create Machine PHI nodes for LLVM PHI nodes, lowering them as 280 // appropriate. 281 PHINode *PN; 282 for (BasicBlock::iterator I = BB->begin();(PN = dyn_cast<PHINode>(I)); ++I){ 283 if (PN->use_empty()) continue; 284 285 MVT::ValueType VT = TLI.getValueType(PN->getType()); 286 unsigned NumElements; 287 if (VT != MVT::Vector) 288 NumElements = TLI.getNumElements(VT); 289 else { 290 MVT::ValueType VT1,VT2; 291 NumElements = 292 TLI.getPackedTypeBreakdown(cast<PackedType>(PN->getType()), 293 VT1, VT2); 294 } 295 unsigned PHIReg = ValueMap[PN]; 296 assert(PHIReg && "PHI node does not have an assigned virtual register!"); 297 const TargetInstrInfo *TII = TLI.getTargetMachine().getInstrInfo(); 298 for (unsigned i = 0; i != NumElements; ++i) 299 BuildMI(MBB, TII->get(TargetInstrInfo::PHI), PHIReg+i); 300 } 301 } 302} 303 304/// CreateRegForValue - Allocate the appropriate number of virtual registers of 305/// the correctly promoted or expanded types. Assign these registers 306/// consecutive vreg numbers and return the first assigned number. 307unsigned FunctionLoweringInfo::CreateRegForValue(const Value *V) { 308 MVT::ValueType VT = TLI.getValueType(V->getType()); 309 310 // The number of multiples of registers that we need, to, e.g., split up 311 // a <2 x int64> -> 4 x i32 registers. 312 unsigned NumVectorRegs = 1; 313 314 // If this is a packed type, figure out what type it will decompose into 315 // and how many of the elements it will use. 316 if (VT == MVT::Vector) { 317 const PackedType *PTy = cast<PackedType>(V->getType()); 318 unsigned NumElts = PTy->getNumElements(); 319 MVT::ValueType EltTy = TLI.getValueType(PTy->getElementType()); 320 321 // Divide the input until we get to a supported size. This will always 322 // end with a scalar if the target doesn't support vectors. 323 while (NumElts > 1 && !TLI.isTypeLegal(getVectorType(EltTy, NumElts))) { 324 NumElts >>= 1; 325 NumVectorRegs <<= 1; 326 } 327 if (NumElts == 1) 328 VT = EltTy; 329 else 330 VT = getVectorType(EltTy, NumElts); 331 } 332 333 // The common case is that we will only create one register for this 334 // value. If we have that case, create and return the virtual register. 335 unsigned NV = TLI.getNumElements(VT); 336 if (NV == 1) { 337 // If we are promoting this value, pick the next largest supported type. 338 MVT::ValueType PromotedType = TLI.getTypeToTransformTo(VT); 339 unsigned Reg = MakeReg(PromotedType); 340 // If this is a vector of supported or promoted types (e.g. 4 x i16), 341 // create all of the registers. 342 for (unsigned i = 1; i != NumVectorRegs; ++i) 343 MakeReg(PromotedType); 344 return Reg; 345 } 346 347 // If this value is represented with multiple target registers, make sure 348 // to create enough consecutive registers of the right (smaller) type. 349 VT = TLI.getTypeToExpandTo(VT); 350 unsigned R = MakeReg(VT); 351 for (unsigned i = 1; i != NV*NumVectorRegs; ++i) 352 MakeReg(VT); 353 return R; 354} 355 356//===----------------------------------------------------------------------===// 357/// SelectionDAGLowering - This is the common target-independent lowering 358/// implementation that is parameterized by a TargetLowering object. 359/// Also, targets can overload any lowering method. 360/// 361namespace llvm { 362class SelectionDAGLowering { 363 MachineBasicBlock *CurMBB; 364 365 std::map<const Value*, SDOperand> NodeMap; 366 367 /// PendingLoads - Loads are not emitted to the program immediately. We bunch 368 /// them up and then emit token factor nodes when possible. This allows us to 369 /// get simple disambiguation between loads without worrying about alias 370 /// analysis. 371 std::vector<SDOperand> PendingLoads; 372 373 /// Case - A pair of values to record the Value for a switch case, and the 374 /// case's target basic block. 375 typedef std::pair<Constant*, MachineBasicBlock*> Case; 376 typedef std::vector<Case>::iterator CaseItr; 377 typedef std::pair<CaseItr, CaseItr> CaseRange; 378 379 /// CaseRec - A struct with ctor used in lowering switches to a binary tree 380 /// of conditional branches. 381 struct CaseRec { 382 CaseRec(MachineBasicBlock *bb, Constant *lt, Constant *ge, CaseRange r) : 383 CaseBB(bb), LT(lt), GE(ge), Range(r) {} 384 385 /// CaseBB - The MBB in which to emit the compare and branch 386 MachineBasicBlock *CaseBB; 387 /// LT, GE - If nonzero, we know the current case value must be less-than or 388 /// greater-than-or-equal-to these Constants. 389 Constant *LT; 390 Constant *GE; 391 /// Range - A pair of iterators representing the range of case values to be 392 /// processed at this point in the binary search tree. 393 CaseRange Range; 394 }; 395 396 /// The comparison function for sorting Case values. 397 struct CaseCmp { 398 bool operator () (const Case& C1, const Case& C2) { 399 assert(isa<ConstantInt>(C1.first) && isa<ConstantInt>(C2.first)); 400 return cast<const ConstantInt>(C1.first)->getSExtValue() < 401 cast<const ConstantInt>(C2.first)->getSExtValue(); 402 } 403 }; 404 405public: 406 // TLI - This is information that describes the available target features we 407 // need for lowering. This indicates when operations are unavailable, 408 // implemented with a libcall, etc. 409 TargetLowering &TLI; 410 SelectionDAG &DAG; 411 const TargetData *TD; 412 413 /// SwitchCases - Vector of CaseBlock structures used to communicate 414 /// SwitchInst code generation information. 415 std::vector<SelectionDAGISel::CaseBlock> SwitchCases; 416 SelectionDAGISel::JumpTable JT; 417 418 /// FuncInfo - Information about the function as a whole. 419 /// 420 FunctionLoweringInfo &FuncInfo; 421 422 SelectionDAGLowering(SelectionDAG &dag, TargetLowering &tli, 423 FunctionLoweringInfo &funcinfo) 424 : TLI(tli), DAG(dag), TD(DAG.getTarget().getTargetData()), 425 JT(0,0,0,0), FuncInfo(funcinfo) { 426 } 427 428 /// getRoot - Return the current virtual root of the Selection DAG. 429 /// 430 SDOperand getRoot() { 431 if (PendingLoads.empty()) 432 return DAG.getRoot(); 433 434 if (PendingLoads.size() == 1) { 435 SDOperand Root = PendingLoads[0]; 436 DAG.setRoot(Root); 437 PendingLoads.clear(); 438 return Root; 439 } 440 441 // Otherwise, we have to make a token factor node. 442 SDOperand Root = DAG.getNode(ISD::TokenFactor, MVT::Other, 443 &PendingLoads[0], PendingLoads.size()); 444 PendingLoads.clear(); 445 DAG.setRoot(Root); 446 return Root; 447 } 448 449 SDOperand CopyValueToVirtualRegister(Value *V, unsigned Reg); 450 451 void visit(Instruction &I) { visit(I.getOpcode(), I); } 452 453 void visit(unsigned Opcode, User &I) { 454 // Note: this doesn't use InstVisitor, because it has to work with 455 // ConstantExpr's in addition to instructions. 456 switch (Opcode) { 457 default: assert(0 && "Unknown instruction type encountered!"); 458 abort(); 459 // Build the switch statement using the Instruction.def file. 460#define HANDLE_INST(NUM, OPCODE, CLASS) \ 461 case Instruction::OPCODE:return visit##OPCODE((CLASS&)I); 462#include "llvm/Instruction.def" 463 } 464 } 465 466 void setCurrentBasicBlock(MachineBasicBlock *MBB) { CurMBB = MBB; } 467 468 SDOperand getLoadFrom(const Type *Ty, SDOperand Ptr, 469 const Value *SV, SDOperand Root, 470 bool isVolatile); 471 472 SDOperand getIntPtrConstant(uint64_t Val) { 473 return DAG.getConstant(Val, TLI.getPointerTy()); 474 } 475 476 SDOperand getValue(const Value *V); 477 478 const SDOperand &setValue(const Value *V, SDOperand NewN) { 479 SDOperand &N = NodeMap[V]; 480 assert(N.Val == 0 && "Already set a value for this node!"); 481 return N = NewN; 482 } 483 484 RegsForValue GetRegistersForValue(const std::string &ConstrCode, 485 MVT::ValueType VT, 486 bool OutReg, bool InReg, 487 std::set<unsigned> &OutputRegs, 488 std::set<unsigned> &InputRegs); 489 490 void FindMergedConditions(Value *Cond, MachineBasicBlock *TBB, 491 MachineBasicBlock *FBB, MachineBasicBlock *CurBB, 492 unsigned Opc); 493 bool isExportableFromCurrentBlock(Value *V, const BasicBlock *FromBB); 494 void ExportFromCurrentBlock(Value *V); 495 496 // Terminator instructions. 497 void visitRet(ReturnInst &I); 498 void visitBr(BranchInst &I); 499 void visitSwitch(SwitchInst &I); 500 void visitUnreachable(UnreachableInst &I) { /* noop */ } 501 502 // Helper for visitSwitch 503 void visitSwitchCase(SelectionDAGISel::CaseBlock &CB); 504 void visitJumpTable(SelectionDAGISel::JumpTable &JT); 505 506 // These all get lowered before this pass. 507 void visitInvoke(InvokeInst &I) { assert(0 && "TODO"); } 508 void visitUnwind(UnwindInst &I) { assert(0 && "TODO"); } 509 510 void visitIntBinary(User &I, unsigned IntOp, unsigned VecOp); 511 void visitFPBinary(User &I, unsigned FPOp, unsigned VecOp); 512 void visitShift(User &I, unsigned Opcode); 513 void visitAdd(User &I) { 514 if (I.getType()->isFloatingPoint()) 515 visitFPBinary(I, ISD::FADD, ISD::VADD); 516 else 517 visitIntBinary(I, ISD::ADD, ISD::VADD); 518 } 519 void visitSub(User &I); 520 void visitMul(User &I) { 521 if (I.getType()->isFloatingPoint()) 522 visitFPBinary(I, ISD::FMUL, ISD::VMUL); 523 else 524 visitIntBinary(I, ISD::MUL, ISD::VMUL); 525 } 526 void visitURem(User &I) { visitIntBinary(I, ISD::UREM, 0); } 527 void visitSRem(User &I) { visitIntBinary(I, ISD::SREM, 0); } 528 void visitFRem(User &I) { visitFPBinary (I, ISD::FREM, 0); } 529 void visitUDiv(User &I) { visitIntBinary(I, ISD::UDIV, ISD::VUDIV); } 530 void visitSDiv(User &I) { visitIntBinary(I, ISD::SDIV, ISD::VSDIV); } 531 void visitFDiv(User &I) { visitFPBinary (I, ISD::FDIV, ISD::VSDIV); } 532 void visitAnd(User &I) { visitIntBinary(I, ISD::AND, ISD::VAND); } 533 void visitOr (User &I) { visitIntBinary(I, ISD::OR, ISD::VOR); } 534 void visitXor(User &I) { visitIntBinary(I, ISD::XOR, ISD::VXOR); } 535 void visitShl(User &I) { visitShift(I, ISD::SHL); } 536 void visitLShr(User &I) { visitShift(I, ISD::SRL); } 537 void visitAShr(User &I) { visitShift(I, ISD::SRA); } 538 void visitICmp(User &I); 539 void visitFCmp(User &I); 540 // Visit the conversion instructions 541 void visitTrunc(User &I); 542 void visitZExt(User &I); 543 void visitSExt(User &I); 544 void visitFPTrunc(User &I); 545 void visitFPExt(User &I); 546 void visitFPToUI(User &I); 547 void visitFPToSI(User &I); 548 void visitUIToFP(User &I); 549 void visitSIToFP(User &I); 550 void visitPtrToInt(User &I); 551 void visitIntToPtr(User &I); 552 void visitBitCast(User &I); 553 554 void visitExtractElement(User &I); 555 void visitInsertElement(User &I); 556 void visitShuffleVector(User &I); 557 558 void visitGetElementPtr(User &I); 559 void visitSelect(User &I); 560 561 void visitMalloc(MallocInst &I); 562 void visitFree(FreeInst &I); 563 void visitAlloca(AllocaInst &I); 564 void visitLoad(LoadInst &I); 565 void visitStore(StoreInst &I); 566 void visitPHI(PHINode &I) { } // PHI nodes are handled specially. 567 void visitCall(CallInst &I); 568 void visitInlineAsm(CallInst &I); 569 const char *visitIntrinsicCall(CallInst &I, unsigned Intrinsic); 570 void visitTargetIntrinsic(CallInst &I, unsigned Intrinsic); 571 572 void visitVAStart(CallInst &I); 573 void visitVAArg(VAArgInst &I); 574 void visitVAEnd(CallInst &I); 575 void visitVACopy(CallInst &I); 576 void visitFrameReturnAddress(CallInst &I, bool isFrameAddress); 577 578 void visitMemIntrinsic(CallInst &I, unsigned Op); 579 580 void visitUserOp1(Instruction &I) { 581 assert(0 && "UserOp1 should not exist at instruction selection time!"); 582 abort(); 583 } 584 void visitUserOp2(Instruction &I) { 585 assert(0 && "UserOp2 should not exist at instruction selection time!"); 586 abort(); 587 } 588}; 589} // end namespace llvm 590 591SDOperand SelectionDAGLowering::getValue(const Value *V) { 592 SDOperand &N = NodeMap[V]; 593 if (N.Val) return N; 594 595 const Type *VTy = V->getType(); 596 MVT::ValueType VT = TLI.getValueType(VTy); 597 if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(V))) { 598 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) { 599 visit(CE->getOpcode(), *CE); 600 assert(N.Val && "visit didn't populate the ValueMap!"); 601 return N; 602 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(C)) { 603 return N = DAG.getGlobalAddress(GV, VT); 604 } else if (isa<ConstantPointerNull>(C)) { 605 return N = DAG.getConstant(0, TLI.getPointerTy()); 606 } else if (isa<UndefValue>(C)) { 607 if (!isa<PackedType>(VTy)) 608 return N = DAG.getNode(ISD::UNDEF, VT); 609 610 // Create a VBUILD_VECTOR of undef nodes. 611 const PackedType *PTy = cast<PackedType>(VTy); 612 unsigned NumElements = PTy->getNumElements(); 613 MVT::ValueType PVT = TLI.getValueType(PTy->getElementType()); 614 615 SmallVector<SDOperand, 8> Ops; 616 Ops.assign(NumElements, DAG.getNode(ISD::UNDEF, PVT)); 617 618 // Create a VConstant node with generic Vector type. 619 Ops.push_back(DAG.getConstant(NumElements, MVT::i32)); 620 Ops.push_back(DAG.getValueType(PVT)); 621 return N = DAG.getNode(ISD::VBUILD_VECTOR, MVT::Vector, 622 &Ops[0], Ops.size()); 623 } else if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) { 624 return N = DAG.getConstantFP(CFP->getValue(), VT); 625 } else if (const PackedType *PTy = dyn_cast<PackedType>(VTy)) { 626 unsigned NumElements = PTy->getNumElements(); 627 MVT::ValueType PVT = TLI.getValueType(PTy->getElementType()); 628 629 // Now that we know the number and type of the elements, push a 630 // Constant or ConstantFP node onto the ops list for each element of 631 // the packed constant. 632 SmallVector<SDOperand, 8> Ops; 633 if (ConstantPacked *CP = dyn_cast<ConstantPacked>(C)) { 634 for (unsigned i = 0; i != NumElements; ++i) 635 Ops.push_back(getValue(CP->getOperand(i))); 636 } else { 637 assert(isa<ConstantAggregateZero>(C) && "Unknown packed constant!"); 638 SDOperand Op; 639 if (MVT::isFloatingPoint(PVT)) 640 Op = DAG.getConstantFP(0, PVT); 641 else 642 Op = DAG.getConstant(0, PVT); 643 Ops.assign(NumElements, Op); 644 } 645 646 // Create a VBUILD_VECTOR node with generic Vector type. 647 Ops.push_back(DAG.getConstant(NumElements, MVT::i32)); 648 Ops.push_back(DAG.getValueType(PVT)); 649 return N = DAG.getNode(ISD::VBUILD_VECTOR,MVT::Vector,&Ops[0],Ops.size()); 650 } else { 651 // Canonicalize all constant ints to be unsigned. 652 return N = DAG.getConstant(cast<ConstantInt>(C)->getZExtValue(),VT); 653 } 654 } 655 656 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) { 657 std::map<const AllocaInst*, int>::iterator SI = 658 FuncInfo.StaticAllocaMap.find(AI); 659 if (SI != FuncInfo.StaticAllocaMap.end()) 660 return DAG.getFrameIndex(SI->second, TLI.getPointerTy()); 661 } 662 663 std::map<const Value*, unsigned>::const_iterator VMI = 664 FuncInfo.ValueMap.find(V); 665 assert(VMI != FuncInfo.ValueMap.end() && "Value not in map!"); 666 667 unsigned InReg = VMI->second; 668 669 // If this type is not legal, make it so now. 670 if (VT != MVT::Vector) { 671 if (TLI.getTypeAction(VT) == TargetLowering::Expand) { 672 // Source must be expanded. This input value is actually coming from the 673 // register pair VMI->second and VMI->second+1. 674 MVT::ValueType DestVT = TLI.getTypeToExpandTo(VT); 675 unsigned NumVals = TLI.getNumElements(VT); 676 N = DAG.getCopyFromReg(DAG.getEntryNode(), InReg, DestVT); 677 if (NumVals == 1) 678 N = DAG.getNode(ISD::BIT_CONVERT, VT, N); 679 else { 680 assert(NumVals == 2 && "1 to 4 (and more) expansion not implemented!"); 681 N = DAG.getNode(ISD::BUILD_PAIR, VT, N, 682 DAG.getCopyFromReg(DAG.getEntryNode(), InReg+1, DestVT)); 683 } 684 } else { 685 MVT::ValueType DestVT = TLI.getTypeToTransformTo(VT); 686 N = DAG.getCopyFromReg(DAG.getEntryNode(), InReg, DestVT); 687 if (TLI.getTypeAction(VT) == TargetLowering::Promote) // Promotion case 688 N = MVT::isFloatingPoint(VT) 689 ? DAG.getNode(ISD::FP_ROUND, VT, N) 690 : DAG.getNode(ISD::TRUNCATE, VT, N); 691 } 692 } else { 693 // Otherwise, if this is a vector, make it available as a generic vector 694 // here. 695 MVT::ValueType PTyElementVT, PTyLegalElementVT; 696 const PackedType *PTy = cast<PackedType>(VTy); 697 unsigned NE = TLI.getPackedTypeBreakdown(PTy, PTyElementVT, 698 PTyLegalElementVT); 699 700 // Build a VBUILD_VECTOR with the input registers. 701 SmallVector<SDOperand, 8> Ops; 702 if (PTyElementVT == PTyLegalElementVT) { 703 // If the value types are legal, just VBUILD the CopyFromReg nodes. 704 for (unsigned i = 0; i != NE; ++i) 705 Ops.push_back(DAG.getCopyFromReg(DAG.getEntryNode(), InReg++, 706 PTyElementVT)); 707 } else if (PTyElementVT < PTyLegalElementVT) { 708 // If the register was promoted, use TRUNCATE of FP_ROUND as appropriate. 709 for (unsigned i = 0; i != NE; ++i) { 710 SDOperand Op = DAG.getCopyFromReg(DAG.getEntryNode(), InReg++, 711 PTyElementVT); 712 if (MVT::isFloatingPoint(PTyElementVT)) 713 Op = DAG.getNode(ISD::FP_ROUND, PTyElementVT, Op); 714 else 715 Op = DAG.getNode(ISD::TRUNCATE, PTyElementVT, Op); 716 Ops.push_back(Op); 717 } 718 } else { 719 // If the register was expanded, use BUILD_PAIR. 720 assert((NE & 1) == 0 && "Must expand into a multiple of 2 elements!"); 721 for (unsigned i = 0; i != NE/2; ++i) { 722 SDOperand Op0 = DAG.getCopyFromReg(DAG.getEntryNode(), InReg++, 723 PTyElementVT); 724 SDOperand Op1 = DAG.getCopyFromReg(DAG.getEntryNode(), InReg++, 725 PTyElementVT); 726 Ops.push_back(DAG.getNode(ISD::BUILD_PAIR, VT, Op0, Op1)); 727 } 728 } 729 730 Ops.push_back(DAG.getConstant(NE, MVT::i32)); 731 Ops.push_back(DAG.getValueType(PTyLegalElementVT)); 732 N = DAG.getNode(ISD::VBUILD_VECTOR, MVT::Vector, &Ops[0], Ops.size()); 733 734 // Finally, use a VBIT_CONVERT to make this available as the appropriate 735 // vector type. 736 N = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, N, 737 DAG.getConstant(PTy->getNumElements(), 738 MVT::i32), 739 DAG.getValueType(TLI.getValueType(PTy->getElementType()))); 740 } 741 742 return N; 743} 744 745 746void SelectionDAGLowering::visitRet(ReturnInst &I) { 747 if (I.getNumOperands() == 0) { 748 DAG.setRoot(DAG.getNode(ISD::RET, MVT::Other, getRoot())); 749 return; 750 } 751 SmallVector<SDOperand, 8> NewValues; 752 NewValues.push_back(getRoot()); 753 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) { 754 SDOperand RetOp = getValue(I.getOperand(i)); 755 756 // If this is an integer return value, we need to promote it ourselves to 757 // the full width of a register, since LegalizeOp will use ANY_EXTEND rather 758 // than sign/zero. 759 // FIXME: C calling convention requires the return type to be promoted to 760 // at least 32-bit. But this is not necessary for non-C calling conventions. 761 if (MVT::isInteger(RetOp.getValueType()) && 762 RetOp.getValueType() < MVT::i64) { 763 MVT::ValueType TmpVT; 764 if (TLI.getTypeAction(MVT::i32) == TargetLowering::Promote) 765 TmpVT = TLI.getTypeToTransformTo(MVT::i32); 766 else 767 TmpVT = MVT::i32; 768 const FunctionType *FTy = I.getParent()->getParent()->getFunctionType(); 769 ISD::NodeType ExtendKind = ISD::ANY_EXTEND; 770 if (FTy->paramHasAttr(0, FunctionType::SExtAttribute)) 771 ExtendKind = ISD::SIGN_EXTEND; 772 if (FTy->paramHasAttr(0, FunctionType::ZExtAttribute)) 773 ExtendKind = ISD::ZERO_EXTEND; 774 RetOp = DAG.getNode(ExtendKind, TmpVT, RetOp); 775 } 776 NewValues.push_back(RetOp); 777 NewValues.push_back(DAG.getConstant(false, MVT::i32)); 778 } 779 DAG.setRoot(DAG.getNode(ISD::RET, MVT::Other, 780 &NewValues[0], NewValues.size())); 781} 782 783/// ExportFromCurrentBlock - If this condition isn't known to be exported from 784/// the current basic block, add it to ValueMap now so that we'll get a 785/// CopyTo/FromReg. 786void SelectionDAGLowering::ExportFromCurrentBlock(Value *V) { 787 // No need to export constants. 788 if (!isa<Instruction>(V) && !isa<Argument>(V)) return; 789 790 // Already exported? 791 if (FuncInfo.isExportedInst(V)) return; 792 793 unsigned Reg = FuncInfo.InitializeRegForValue(V); 794 PendingLoads.push_back(CopyValueToVirtualRegister(V, Reg)); 795} 796 797bool SelectionDAGLowering::isExportableFromCurrentBlock(Value *V, 798 const BasicBlock *FromBB) { 799 // The operands of the setcc have to be in this block. We don't know 800 // how to export them from some other block. 801 if (Instruction *VI = dyn_cast<Instruction>(V)) { 802 // Can export from current BB. 803 if (VI->getParent() == FromBB) 804 return true; 805 806 // Is already exported, noop. 807 return FuncInfo.isExportedInst(V); 808 } 809 810 // If this is an argument, we can export it if the BB is the entry block or 811 // if it is already exported. 812 if (isa<Argument>(V)) { 813 if (FromBB == &FromBB->getParent()->getEntryBlock()) 814 return true; 815 816 // Otherwise, can only export this if it is already exported. 817 return FuncInfo.isExportedInst(V); 818 } 819 820 // Otherwise, constants can always be exported. 821 return true; 822} 823 824static bool InBlock(const Value *V, const BasicBlock *BB) { 825 if (const Instruction *I = dyn_cast<Instruction>(V)) 826 return I->getParent() == BB; 827 return true; 828} 829 830/// FindMergedConditions - If Cond is an expression like 831void SelectionDAGLowering::FindMergedConditions(Value *Cond, 832 MachineBasicBlock *TBB, 833 MachineBasicBlock *FBB, 834 MachineBasicBlock *CurBB, 835 unsigned Opc) { 836 // If this node is not part of the or/and tree, emit it as a branch. 837 Instruction *BOp = dyn_cast<Instruction>(Cond); 838 839 if (!BOp || !(isa<BinaryOperator>(BOp) || isa<CmpInst>(BOp)) || 840 (unsigned)BOp->getOpcode() != Opc || !BOp->hasOneUse() || 841 BOp->getParent() != CurBB->getBasicBlock() || 842 !InBlock(BOp->getOperand(0), CurBB->getBasicBlock()) || 843 !InBlock(BOp->getOperand(1), CurBB->getBasicBlock())) { 844 const BasicBlock *BB = CurBB->getBasicBlock(); 845 846 // If the leaf of the tree is a comparison, merge the condition into 847 // the caseblock. 848 if ((isa<ICmpInst>(Cond) || isa<FCmpInst>(Cond)) && 849 // The operands of the cmp have to be in this block. We don't know 850 // how to export them from some other block. If this is the first block 851 // of the sequence, no exporting is needed. 852 (CurBB == CurMBB || 853 (isExportableFromCurrentBlock(BOp->getOperand(0), BB) && 854 isExportableFromCurrentBlock(BOp->getOperand(1), BB)))) { 855 BOp = cast<Instruction>(Cond); 856 ISD::CondCode Condition; 857 if (ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) { 858 switch (IC->getPredicate()) { 859 default: assert(0 && "Unknown icmp predicate opcode!"); 860 case ICmpInst::ICMP_EQ: Condition = ISD::SETEQ; break; 861 case ICmpInst::ICMP_NE: Condition = ISD::SETNE; break; 862 case ICmpInst::ICMP_SLE: Condition = ISD::SETLE; break; 863 case ICmpInst::ICMP_ULE: Condition = ISD::SETULE; break; 864 case ICmpInst::ICMP_SGE: Condition = ISD::SETGE; break; 865 case ICmpInst::ICMP_UGE: Condition = ISD::SETUGE; break; 866 case ICmpInst::ICMP_SLT: Condition = ISD::SETLT; break; 867 case ICmpInst::ICMP_ULT: Condition = ISD::SETULT; break; 868 case ICmpInst::ICMP_SGT: Condition = ISD::SETGT; break; 869 case ICmpInst::ICMP_UGT: Condition = ISD::SETUGT; break; 870 } 871 } else if (FCmpInst *FC = dyn_cast<FCmpInst>(Cond)) { 872 ISD::CondCode FPC, FOC; 873 switch (FC->getPredicate()) { 874 default: assert(0 && "Unknown fcmp predicate opcode!"); 875 case FCmpInst::FCMP_FALSE: FOC = FPC = ISD::SETFALSE; break; 876 case FCmpInst::FCMP_OEQ: FOC = ISD::SETEQ; FPC = ISD::SETOEQ; break; 877 case FCmpInst::FCMP_OGT: FOC = ISD::SETGT; FPC = ISD::SETOGT; break; 878 case FCmpInst::FCMP_OGE: FOC = ISD::SETGE; FPC = ISD::SETOGE; break; 879 case FCmpInst::FCMP_OLT: FOC = ISD::SETLT; FPC = ISD::SETOLT; break; 880 case FCmpInst::FCMP_OLE: FOC = ISD::SETLE; FPC = ISD::SETOLE; break; 881 case FCmpInst::FCMP_ONE: FOC = ISD::SETNE; FPC = ISD::SETONE; break; 882 case FCmpInst::FCMP_ORD: FOC = ISD::SETEQ; FPC = ISD::SETO; break; 883 case FCmpInst::FCMP_UNO: FOC = ISD::SETNE; FPC = ISD::SETUO; break; 884 case FCmpInst::FCMP_UEQ: FOC = ISD::SETEQ; FPC = ISD::SETUEQ; break; 885 case FCmpInst::FCMP_UGT: FOC = ISD::SETGT; FPC = ISD::SETUGT; break; 886 case FCmpInst::FCMP_UGE: FOC = ISD::SETGE; FPC = ISD::SETUGE; break; 887 case FCmpInst::FCMP_ULT: FOC = ISD::SETLT; FPC = ISD::SETULT; break; 888 case FCmpInst::FCMP_ULE: FOC = ISD::SETLE; FPC = ISD::SETULE; break; 889 case FCmpInst::FCMP_UNE: FOC = ISD::SETNE; FPC = ISD::SETUNE; break; 890 case FCmpInst::FCMP_TRUE: FOC = FPC = ISD::SETTRUE; break; 891 } 892 if (FiniteOnlyFPMath()) 893 Condition = FOC; 894 else 895 Condition = FPC; 896 } else { 897 assert(0 && "Unknown compare instruction"); 898 } 899 900 SelectionDAGISel::CaseBlock CB(Condition, BOp->getOperand(0), 901 BOp->getOperand(1), TBB, FBB, CurBB); 902 SwitchCases.push_back(CB); 903 return; 904 } 905 906 // Create a CaseBlock record representing this branch. 907 SelectionDAGISel::CaseBlock CB(ISD::SETEQ, Cond, ConstantInt::getTrue(), 908 TBB, FBB, CurBB); 909 SwitchCases.push_back(CB); 910 return; 911 } 912 913 914 // Create TmpBB after CurBB. 915 MachineFunction::iterator BBI = CurBB; 916 MachineBasicBlock *TmpBB = new MachineBasicBlock(CurBB->getBasicBlock()); 917 CurBB->getParent()->getBasicBlockList().insert(++BBI, TmpBB); 918 919 if (Opc == Instruction::Or) { 920 // Codegen X | Y as: 921 // jmp_if_X TBB 922 // jmp TmpBB 923 // TmpBB: 924 // jmp_if_Y TBB 925 // jmp FBB 926 // 927 928 // Emit the LHS condition. 929 FindMergedConditions(BOp->getOperand(0), TBB, TmpBB, CurBB, Opc); 930 931 // Emit the RHS condition into TmpBB. 932 FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, Opc); 933 } else { 934 assert(Opc == Instruction::And && "Unknown merge op!"); 935 // Codegen X & Y as: 936 // jmp_if_X TmpBB 937 // jmp FBB 938 // TmpBB: 939 // jmp_if_Y TBB 940 // jmp FBB 941 // 942 // This requires creation of TmpBB after CurBB. 943 944 // Emit the LHS condition. 945 FindMergedConditions(BOp->getOperand(0), TmpBB, FBB, CurBB, Opc); 946 947 // Emit the RHS condition into TmpBB. 948 FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, Opc); 949 } 950} 951 952/// If the set of cases should be emitted as a series of branches, return true. 953/// If we should emit this as a bunch of and/or'd together conditions, return 954/// false. 955static bool 956ShouldEmitAsBranches(const std::vector<SelectionDAGISel::CaseBlock> &Cases) { 957 if (Cases.size() != 2) return true; 958 959 // If this is two comparisons of the same values or'd or and'd together, they 960 // will get folded into a single comparison, so don't emit two blocks. 961 if ((Cases[0].CmpLHS == Cases[1].CmpLHS && 962 Cases[0].CmpRHS == Cases[1].CmpRHS) || 963 (Cases[0].CmpRHS == Cases[1].CmpLHS && 964 Cases[0].CmpLHS == Cases[1].CmpRHS)) { 965 return false; 966 } 967 968 return true; 969} 970 971void SelectionDAGLowering::visitBr(BranchInst &I) { 972 // Update machine-CFG edges. 973 MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)]; 974 975 // Figure out which block is immediately after the current one. 976 MachineBasicBlock *NextBlock = 0; 977 MachineFunction::iterator BBI = CurMBB; 978 if (++BBI != CurMBB->getParent()->end()) 979 NextBlock = BBI; 980 981 if (I.isUnconditional()) { 982 // If this is not a fall-through branch, emit the branch. 983 if (Succ0MBB != NextBlock) 984 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, getRoot(), 985 DAG.getBasicBlock(Succ0MBB))); 986 987 // Update machine-CFG edges. 988 CurMBB->addSuccessor(Succ0MBB); 989 990 return; 991 } 992 993 // If this condition is one of the special cases we handle, do special stuff 994 // now. 995 Value *CondVal = I.getCondition(); 996 MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)]; 997 998 // If this is a series of conditions that are or'd or and'd together, emit 999 // this as a sequence of branches instead of setcc's with and/or operations. 1000 // For example, instead of something like: 1001 // cmp A, B 1002 // C = seteq 1003 // cmp D, E 1004 // F = setle 1005 // or C, F 1006 // jnz foo 1007 // Emit: 1008 // cmp A, B 1009 // je foo 1010 // cmp D, E 1011 // jle foo 1012 // 1013 if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(CondVal)) { 1014 if (BOp->hasOneUse() && 1015 (BOp->getOpcode() == Instruction::And || 1016 BOp->getOpcode() == Instruction::Or)) { 1017 FindMergedConditions(BOp, Succ0MBB, Succ1MBB, CurMBB, BOp->getOpcode()); 1018 // If the compares in later blocks need to use values not currently 1019 // exported from this block, export them now. This block should always 1020 // be the first entry. 1021 assert(SwitchCases[0].ThisBB == CurMBB && "Unexpected lowering!"); 1022 1023 // Allow some cases to be rejected. 1024 if (ShouldEmitAsBranches(SwitchCases)) { 1025 for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i) { 1026 ExportFromCurrentBlock(SwitchCases[i].CmpLHS); 1027 ExportFromCurrentBlock(SwitchCases[i].CmpRHS); 1028 } 1029 1030 // Emit the branch for this block. 1031 visitSwitchCase(SwitchCases[0]); 1032 SwitchCases.erase(SwitchCases.begin()); 1033 return; 1034 } 1035 1036 // Okay, we decided not to do this, remove any inserted MBB's and clear 1037 // SwitchCases. 1038 for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i) 1039 CurMBB->getParent()->getBasicBlockList().erase(SwitchCases[i].ThisBB); 1040 1041 SwitchCases.clear(); 1042 } 1043 } 1044 1045 // Create a CaseBlock record representing this branch. 1046 SelectionDAGISel::CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(), 1047 Succ0MBB, Succ1MBB, CurMBB); 1048 // Use visitSwitchCase to actually insert the fast branch sequence for this 1049 // cond branch. 1050 visitSwitchCase(CB); 1051} 1052 1053/// visitSwitchCase - Emits the necessary code to represent a single node in 1054/// the binary search tree resulting from lowering a switch instruction. 1055void SelectionDAGLowering::visitSwitchCase(SelectionDAGISel::CaseBlock &CB) { 1056 SDOperand Cond; 1057 SDOperand CondLHS = getValue(CB.CmpLHS); 1058 1059 // Build the setcc now, fold "(X == true)" to X and "(X == false)" to !X to 1060 // handle common cases produced by branch lowering. 1061 if (CB.CmpRHS == ConstantInt::getTrue() && CB.CC == ISD::SETEQ) 1062 Cond = CondLHS; 1063 else if (CB.CmpRHS == ConstantInt::getFalse() && CB.CC == ISD::SETEQ) { 1064 SDOperand True = DAG.getConstant(1, CondLHS.getValueType()); 1065 Cond = DAG.getNode(ISD::XOR, CondLHS.getValueType(), CondLHS, True); 1066 } else 1067 Cond = DAG.getSetCC(MVT::i1, CondLHS, getValue(CB.CmpRHS), CB.CC); 1068 1069 // Set NextBlock to be the MBB immediately after the current one, if any. 1070 // This is used to avoid emitting unnecessary branches to the next block. 1071 MachineBasicBlock *NextBlock = 0; 1072 MachineFunction::iterator BBI = CurMBB; 1073 if (++BBI != CurMBB->getParent()->end()) 1074 NextBlock = BBI; 1075 1076 // If the lhs block is the next block, invert the condition so that we can 1077 // fall through to the lhs instead of the rhs block. 1078 if (CB.TrueBB == NextBlock) { 1079 std::swap(CB.TrueBB, CB.FalseBB); 1080 SDOperand True = DAG.getConstant(1, Cond.getValueType()); 1081 Cond = DAG.getNode(ISD::XOR, Cond.getValueType(), Cond, True); 1082 } 1083 SDOperand BrCond = DAG.getNode(ISD::BRCOND, MVT::Other, getRoot(), Cond, 1084 DAG.getBasicBlock(CB.TrueBB)); 1085 if (CB.FalseBB == NextBlock) 1086 DAG.setRoot(BrCond); 1087 else 1088 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, BrCond, 1089 DAG.getBasicBlock(CB.FalseBB))); 1090 // Update successor info 1091 CurMBB->addSuccessor(CB.TrueBB); 1092 CurMBB->addSuccessor(CB.FalseBB); 1093} 1094 1095void SelectionDAGLowering::visitJumpTable(SelectionDAGISel::JumpTable &JT) { 1096 // Emit the code for the jump table 1097 MVT::ValueType PTy = TLI.getPointerTy(); 1098 SDOperand Index = DAG.getCopyFromReg(getRoot(), JT.Reg, PTy); 1099 SDOperand Table = DAG.getJumpTable(JT.JTI, PTy); 1100 DAG.setRoot(DAG.getNode(ISD::BR_JT, MVT::Other, Index.getValue(1), 1101 Table, Index)); 1102 return; 1103} 1104 1105void SelectionDAGLowering::visitSwitch(SwitchInst &I) { 1106 // Figure out which block is immediately after the current one. 1107 MachineBasicBlock *NextBlock = 0; 1108 MachineFunction::iterator BBI = CurMBB; 1109 1110 if (++BBI != CurMBB->getParent()->end()) 1111 NextBlock = BBI; 1112 1113 MachineBasicBlock *Default = FuncInfo.MBBMap[I.getDefaultDest()]; 1114 1115 // If there is only the default destination, branch to it if it is not the 1116 // next basic block. Otherwise, just fall through. 1117 if (I.getNumOperands() == 2) { 1118 // Update machine-CFG edges. 1119 1120 // If this is not a fall-through branch, emit the branch. 1121 if (Default != NextBlock) 1122 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, getRoot(), 1123 DAG.getBasicBlock(Default))); 1124 1125 CurMBB->addSuccessor(Default); 1126 return; 1127 } 1128 1129 // If there are any non-default case statements, create a vector of Cases 1130 // representing each one, and sort the vector so that we can efficiently 1131 // create a binary search tree from them. 1132 std::vector<Case> Cases; 1133 1134 for (unsigned i = 1; i < I.getNumSuccessors(); ++i) { 1135 MachineBasicBlock *SMBB = FuncInfo.MBBMap[I.getSuccessor(i)]; 1136 Cases.push_back(Case(I.getSuccessorValue(i), SMBB)); 1137 } 1138 1139 std::sort(Cases.begin(), Cases.end(), CaseCmp()); 1140 1141 // Get the Value to be switched on and default basic blocks, which will be 1142 // inserted into CaseBlock records, representing basic blocks in the binary 1143 // search tree. 1144 Value *SV = I.getOperand(0); 1145 1146 // Get the MachineFunction which holds the current MBB. This is used during 1147 // emission of jump tables, and when inserting any additional MBBs necessary 1148 // to represent the switch. 1149 MachineFunction *CurMF = CurMBB->getParent(); 1150 const BasicBlock *LLVMBB = CurMBB->getBasicBlock(); 1151 1152 // If the switch has few cases (two or less) emit a series of specific 1153 // tests. 1154 if (Cases.size() < 3) { 1155 // TODO: If any two of the cases has the same destination, and if one value 1156 // is the same as the other, but has one bit unset that the other has set, 1157 // use bit manipulation to do two compares at once. For example: 1158 // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)" 1159 1160 // Rearrange the case blocks so that the last one falls through if possible. 1161 if (NextBlock && Default != NextBlock && Cases.back().second != NextBlock) { 1162 // The last case block won't fall through into 'NextBlock' if we emit the 1163 // branches in this order. See if rearranging a case value would help. 1164 for (unsigned i = 0, e = Cases.size()-1; i != e; ++i) { 1165 if (Cases[i].second == NextBlock) { 1166 std::swap(Cases[i], Cases.back()); 1167 break; 1168 } 1169 } 1170 } 1171 1172 // Create a CaseBlock record representing a conditional branch to 1173 // the Case's target mbb if the value being switched on SV is equal 1174 // to C. 1175 MachineBasicBlock *CurBlock = CurMBB; 1176 for (unsigned i = 0, e = Cases.size(); i != e; ++i) { 1177 MachineBasicBlock *FallThrough; 1178 if (i != e-1) { 1179 FallThrough = new MachineBasicBlock(CurMBB->getBasicBlock()); 1180 CurMF->getBasicBlockList().insert(BBI, FallThrough); 1181 } else { 1182 // If the last case doesn't match, go to the default block. 1183 FallThrough = Default; 1184 } 1185 1186 SelectionDAGISel::CaseBlock CB(ISD::SETEQ, SV, Cases[i].first, 1187 Cases[i].second, FallThrough, CurBlock); 1188 1189 // If emitting the first comparison, just call visitSwitchCase to emit the 1190 // code into the current block. Otherwise, push the CaseBlock onto the 1191 // vector to be later processed by SDISel, and insert the node's MBB 1192 // before the next MBB. 1193 if (CurBlock == CurMBB) 1194 visitSwitchCase(CB); 1195 else 1196 SwitchCases.push_back(CB); 1197 1198 CurBlock = FallThrough; 1199 } 1200 return; 1201 } 1202 1203 // If the switch has more than 5 blocks, and at least 31.25% dense, and the 1204 // target supports indirect branches, then emit a jump table rather than 1205 // lowering the switch to a binary tree of conditional branches. 1206 if ((TLI.isOperationLegal(ISD::BR_JT, MVT::Other) || 1207 TLI.isOperationLegal(ISD::BRIND, MVT::Other)) && 1208 Cases.size() > 5) { 1209 uint64_t First =cast<ConstantInt>(Cases.front().first)->getZExtValue(); 1210 uint64_t Last = cast<ConstantInt>(Cases.back().first)->getZExtValue(); 1211 double Density = (double)Cases.size() / (double)((Last - First) + 1ULL); 1212 1213 if (Density >= 0.3125) { 1214 // Create a new basic block to hold the code for loading the address 1215 // of the jump table, and jumping to it. Update successor information; 1216 // we will either branch to the default case for the switch, or the jump 1217 // table. 1218 MachineBasicBlock *JumpTableBB = new MachineBasicBlock(LLVMBB); 1219 CurMF->getBasicBlockList().insert(BBI, JumpTableBB); 1220 CurMBB->addSuccessor(Default); 1221 CurMBB->addSuccessor(JumpTableBB); 1222 1223 // Subtract the lowest switch case value from the value being switched on 1224 // and conditional branch to default mbb if the result is greater than the 1225 // difference between smallest and largest cases. 1226 SDOperand SwitchOp = getValue(SV); 1227 MVT::ValueType VT = SwitchOp.getValueType(); 1228 SDOperand SUB = DAG.getNode(ISD::SUB, VT, SwitchOp, 1229 DAG.getConstant(First, VT)); 1230 1231 // The SDNode we just created, which holds the value being switched on 1232 // minus the the smallest case value, needs to be copied to a virtual 1233 // register so it can be used as an index into the jump table in a 1234 // subsequent basic block. This value may be smaller or larger than the 1235 // target's pointer type, and therefore require extension or truncating. 1236 if (VT > TLI.getPointerTy()) 1237 SwitchOp = DAG.getNode(ISD::TRUNCATE, TLI.getPointerTy(), SUB); 1238 else 1239 SwitchOp = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(), SUB); 1240 1241 unsigned JumpTableReg = FuncInfo.MakeReg(TLI.getPointerTy()); 1242 SDOperand CopyTo = DAG.getCopyToReg(getRoot(), JumpTableReg, SwitchOp); 1243 1244 // Emit the range check for the jump table, and branch to the default 1245 // block for the switch statement if the value being switched on exceeds 1246 // the largest case in the switch. 1247 SDOperand CMP = DAG.getSetCC(TLI.getSetCCResultTy(), SUB, 1248 DAG.getConstant(Last-First,VT), ISD::SETUGT); 1249 DAG.setRoot(DAG.getNode(ISD::BRCOND, MVT::Other, CopyTo, CMP, 1250 DAG.getBasicBlock(Default))); 1251 1252 // Build a vector of destination BBs, corresponding to each target 1253 // of the jump table. If the value of the jump table slot corresponds to 1254 // a case statement, push the case's BB onto the vector, otherwise, push 1255 // the default BB. 1256 std::vector<MachineBasicBlock*> DestBBs; 1257 uint64_t TEI = First; 1258 for (CaseItr ii = Cases.begin(), ee = Cases.end(); ii != ee; ++TEI) 1259 if (cast<ConstantInt>(ii->first)->getZExtValue() == TEI) { 1260 DestBBs.push_back(ii->second); 1261 ++ii; 1262 } else { 1263 DestBBs.push_back(Default); 1264 } 1265 1266 // Update successor info. Add one edge to each unique successor. 1267 // Vector bool would be better, but vector<bool> is really slow. 1268 std::vector<unsigned char> SuccsHandled; 1269 SuccsHandled.resize(CurMBB->getParent()->getNumBlockIDs()); 1270 1271 for (std::vector<MachineBasicBlock*>::iterator I = DestBBs.begin(), 1272 E = DestBBs.end(); I != E; ++I) { 1273 if (!SuccsHandled[(*I)->getNumber()]) { 1274 SuccsHandled[(*I)->getNumber()] = true; 1275 JumpTableBB->addSuccessor(*I); 1276 } 1277 } 1278 1279 // Create a jump table index for this jump table, or return an existing 1280 // one. 1281 unsigned JTI = CurMF->getJumpTableInfo()->getJumpTableIndex(DestBBs); 1282 1283 // Set the jump table information so that we can codegen it as a second 1284 // MachineBasicBlock 1285 JT.Reg = JumpTableReg; 1286 JT.JTI = JTI; 1287 JT.MBB = JumpTableBB; 1288 JT.Default = Default; 1289 return; 1290 } 1291 } 1292 1293 // Push the initial CaseRec onto the worklist 1294 std::vector<CaseRec> CaseVec; 1295 CaseVec.push_back(CaseRec(CurMBB,0,0,CaseRange(Cases.begin(),Cases.end()))); 1296 1297 while (!CaseVec.empty()) { 1298 // Grab a record representing a case range to process off the worklist 1299 CaseRec CR = CaseVec.back(); 1300 CaseVec.pop_back(); 1301 1302 // Size is the number of Cases represented by this range. If Size is 1, 1303 // then we are processing a leaf of the binary search tree. Otherwise, 1304 // we need to pick a pivot, and push left and right ranges onto the 1305 // worklist. 1306 unsigned Size = CR.Range.second - CR.Range.first; 1307 1308 if (Size == 1) { 1309 // Create a CaseBlock record representing a conditional branch to 1310 // the Case's target mbb if the value being switched on SV is equal 1311 // to C. Otherwise, branch to default. 1312 Constant *C = CR.Range.first->first; 1313 MachineBasicBlock *Target = CR.Range.first->second; 1314 SelectionDAGISel::CaseBlock CB(ISD::SETEQ, SV, C, Target, Default, 1315 CR.CaseBB); 1316 1317 // If the MBB representing the leaf node is the current MBB, then just 1318 // call visitSwitchCase to emit the code into the current block. 1319 // Otherwise, push the CaseBlock onto the vector to be later processed 1320 // by SDISel, and insert the node's MBB before the next MBB. 1321 if (CR.CaseBB == CurMBB) 1322 visitSwitchCase(CB); 1323 else 1324 SwitchCases.push_back(CB); 1325 } else { 1326 // split case range at pivot 1327 CaseItr Pivot = CR.Range.first + (Size / 2); 1328 CaseRange LHSR(CR.Range.first, Pivot); 1329 CaseRange RHSR(Pivot, CR.Range.second); 1330 Constant *C = Pivot->first; 1331 MachineBasicBlock *FalseBB = 0, *TrueBB = 0; 1332 1333 // We know that we branch to the LHS if the Value being switched on is 1334 // less than the Pivot value, C. We use this to optimize our binary 1335 // tree a bit, by recognizing that if SV is greater than or equal to the 1336 // LHS's Case Value, and that Case Value is exactly one less than the 1337 // Pivot's Value, then we can branch directly to the LHS's Target, 1338 // rather than creating a leaf node for it. 1339 if ((LHSR.second - LHSR.first) == 1 && 1340 LHSR.first->first == CR.GE && 1341 cast<ConstantInt>(C)->getZExtValue() == 1342 (cast<ConstantInt>(CR.GE)->getZExtValue() + 1ULL)) { 1343 TrueBB = LHSR.first->second; 1344 } else { 1345 TrueBB = new MachineBasicBlock(LLVMBB); 1346 CurMF->getBasicBlockList().insert(BBI, TrueBB); 1347 CaseVec.push_back(CaseRec(TrueBB, C, CR.GE, LHSR)); 1348 } 1349 1350 // Similar to the optimization above, if the Value being switched on is 1351 // known to be less than the Constant CR.LT, and the current Case Value 1352 // is CR.LT - 1, then we can branch directly to the target block for 1353 // the current Case Value, rather than emitting a RHS leaf node for it. 1354 if ((RHSR.second - RHSR.first) == 1 && CR.LT && 1355 cast<ConstantInt>(RHSR.first->first)->getZExtValue() == 1356 (cast<ConstantInt>(CR.LT)->getZExtValue() - 1ULL)) { 1357 FalseBB = RHSR.first->second; 1358 } else { 1359 FalseBB = new MachineBasicBlock(LLVMBB); 1360 CurMF->getBasicBlockList().insert(BBI, FalseBB); 1361 CaseVec.push_back(CaseRec(FalseBB,CR.LT,C,RHSR)); 1362 } 1363 1364 // Create a CaseBlock record representing a conditional branch to 1365 // the LHS node if the value being switched on SV is less than C. 1366 // Otherwise, branch to LHS. 1367 ISD::CondCode CC = ISD::SETLT; 1368 SelectionDAGISel::CaseBlock CB(CC, SV, C, TrueBB, FalseBB, CR.CaseBB); 1369 1370 if (CR.CaseBB == CurMBB) 1371 visitSwitchCase(CB); 1372 else 1373 SwitchCases.push_back(CB); 1374 } 1375 } 1376} 1377 1378void SelectionDAGLowering::visitSub(User &I) { 1379 // -0.0 - X --> fneg 1380 if (I.getType()->isFloatingPoint()) { 1381 if (ConstantFP *CFP = dyn_cast<ConstantFP>(I.getOperand(0))) 1382 if (CFP->isExactlyValue(-0.0)) { 1383 SDOperand Op2 = getValue(I.getOperand(1)); 1384 setValue(&I, DAG.getNode(ISD::FNEG, Op2.getValueType(), Op2)); 1385 return; 1386 } 1387 visitFPBinary(I, ISD::FSUB, ISD::VSUB); 1388 } else 1389 visitIntBinary(I, ISD::SUB, ISD::VSUB); 1390} 1391 1392void 1393SelectionDAGLowering::visitIntBinary(User &I, unsigned IntOp, unsigned VecOp) { 1394 const Type *Ty = I.getType(); 1395 SDOperand Op1 = getValue(I.getOperand(0)); 1396 SDOperand Op2 = getValue(I.getOperand(1)); 1397 1398 if (const PackedType *PTy = dyn_cast<PackedType>(Ty)) { 1399 SDOperand Num = DAG.getConstant(PTy->getNumElements(), MVT::i32); 1400 SDOperand Typ = DAG.getValueType(TLI.getValueType(PTy->getElementType())); 1401 setValue(&I, DAG.getNode(VecOp, MVT::Vector, Op1, Op2, Num, Typ)); 1402 } else { 1403 setValue(&I, DAG.getNode(IntOp, Op1.getValueType(), Op1, Op2)); 1404 } 1405} 1406 1407void 1408SelectionDAGLowering::visitFPBinary(User &I, unsigned FPOp, unsigned VecOp) { 1409 const Type *Ty = I.getType(); 1410 SDOperand Op1 = getValue(I.getOperand(0)); 1411 SDOperand Op2 = getValue(I.getOperand(1)); 1412 1413 if (const PackedType *PTy = dyn_cast<PackedType>(Ty)) { 1414 SDOperand Num = DAG.getConstant(PTy->getNumElements(), MVT::i32); 1415 SDOperand Typ = DAG.getValueType(TLI.getValueType(PTy->getElementType())); 1416 setValue(&I, DAG.getNode(VecOp, MVT::Vector, Op1, Op2, Num, Typ)); 1417 } else { 1418 setValue(&I, DAG.getNode(FPOp, Op1.getValueType(), Op1, Op2)); 1419 } 1420} 1421 1422void SelectionDAGLowering::visitShift(User &I, unsigned Opcode) { 1423 SDOperand Op1 = getValue(I.getOperand(0)); 1424 SDOperand Op2 = getValue(I.getOperand(1)); 1425 1426 Op2 = DAG.getNode(ISD::ANY_EXTEND, TLI.getShiftAmountTy(), Op2); 1427 1428 setValue(&I, DAG.getNode(Opcode, Op1.getValueType(), Op1, Op2)); 1429} 1430 1431void SelectionDAGLowering::visitICmp(User &I) { 1432 ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE; 1433 if (ICmpInst *IC = dyn_cast<ICmpInst>(&I)) 1434 predicate = IC->getPredicate(); 1435 else if (ConstantExpr *IC = dyn_cast<ConstantExpr>(&I)) 1436 predicate = ICmpInst::Predicate(IC->getPredicate()); 1437 SDOperand Op1 = getValue(I.getOperand(0)); 1438 SDOperand Op2 = getValue(I.getOperand(1)); 1439 ISD::CondCode Opcode; 1440 switch (predicate) { 1441 case ICmpInst::ICMP_EQ : Opcode = ISD::SETEQ; break; 1442 case ICmpInst::ICMP_NE : Opcode = ISD::SETNE; break; 1443 case ICmpInst::ICMP_UGT : Opcode = ISD::SETUGT; break; 1444 case ICmpInst::ICMP_UGE : Opcode = ISD::SETUGE; break; 1445 case ICmpInst::ICMP_ULT : Opcode = ISD::SETULT; break; 1446 case ICmpInst::ICMP_ULE : Opcode = ISD::SETULE; break; 1447 case ICmpInst::ICMP_SGT : Opcode = ISD::SETGT; break; 1448 case ICmpInst::ICMP_SGE : Opcode = ISD::SETGE; break; 1449 case ICmpInst::ICMP_SLT : Opcode = ISD::SETLT; break; 1450 case ICmpInst::ICMP_SLE : Opcode = ISD::SETLE; break; 1451 default: 1452 assert(!"Invalid ICmp predicate value"); 1453 Opcode = ISD::SETEQ; 1454 break; 1455 } 1456 setValue(&I, DAG.getSetCC(MVT::i1, Op1, Op2, Opcode)); 1457} 1458 1459void SelectionDAGLowering::visitFCmp(User &I) { 1460 FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE; 1461 if (FCmpInst *FC = dyn_cast<FCmpInst>(&I)) 1462 predicate = FC->getPredicate(); 1463 else if (ConstantExpr *FC = dyn_cast<ConstantExpr>(&I)) 1464 predicate = FCmpInst::Predicate(FC->getPredicate()); 1465 SDOperand Op1 = getValue(I.getOperand(0)); 1466 SDOperand Op2 = getValue(I.getOperand(1)); 1467 ISD::CondCode Condition, FOC, FPC; 1468 switch (predicate) { 1469 case FCmpInst::FCMP_FALSE: FOC = FPC = ISD::SETFALSE; break; 1470 case FCmpInst::FCMP_OEQ: FOC = ISD::SETEQ; FPC = ISD::SETOEQ; break; 1471 case FCmpInst::FCMP_OGT: FOC = ISD::SETGT; FPC = ISD::SETOGT; break; 1472 case FCmpInst::FCMP_OGE: FOC = ISD::SETGE; FPC = ISD::SETOGE; break; 1473 case FCmpInst::FCMP_OLT: FOC = ISD::SETLT; FPC = ISD::SETOLT; break; 1474 case FCmpInst::FCMP_OLE: FOC = ISD::SETLE; FPC = ISD::SETOLE; break; 1475 case FCmpInst::FCMP_ONE: FOC = ISD::SETNE; FPC = ISD::SETONE; break; 1476 case FCmpInst::FCMP_ORD: FOC = ISD::SETEQ; FPC = ISD::SETO; break; 1477 case FCmpInst::FCMP_UNO: FOC = ISD::SETNE; FPC = ISD::SETUO; break; 1478 case FCmpInst::FCMP_UEQ: FOC = ISD::SETEQ; FPC = ISD::SETUEQ; break; 1479 case FCmpInst::FCMP_UGT: FOC = ISD::SETGT; FPC = ISD::SETUGT; break; 1480 case FCmpInst::FCMP_UGE: FOC = ISD::SETGE; FPC = ISD::SETUGE; break; 1481 case FCmpInst::FCMP_ULT: FOC = ISD::SETLT; FPC = ISD::SETULT; break; 1482 case FCmpInst::FCMP_ULE: FOC = ISD::SETLE; FPC = ISD::SETULE; break; 1483 case FCmpInst::FCMP_UNE: FOC = ISD::SETNE; FPC = ISD::SETUNE; break; 1484 case FCmpInst::FCMP_TRUE: FOC = FPC = ISD::SETTRUE; break; 1485 default: 1486 assert(!"Invalid FCmp predicate value"); 1487 FOC = FPC = ISD::SETFALSE; 1488 break; 1489 } 1490 if (FiniteOnlyFPMath()) 1491 Condition = FOC; 1492 else 1493 Condition = FPC; 1494 setValue(&I, DAG.getSetCC(MVT::i1, Op1, Op2, Condition)); 1495} 1496 1497void SelectionDAGLowering::visitSelect(User &I) { 1498 SDOperand Cond = getValue(I.getOperand(0)); 1499 SDOperand TrueVal = getValue(I.getOperand(1)); 1500 SDOperand FalseVal = getValue(I.getOperand(2)); 1501 if (!isa<PackedType>(I.getType())) { 1502 setValue(&I, DAG.getNode(ISD::SELECT, TrueVal.getValueType(), Cond, 1503 TrueVal, FalseVal)); 1504 } else { 1505 setValue(&I, DAG.getNode(ISD::VSELECT, MVT::Vector, Cond, TrueVal, FalseVal, 1506 *(TrueVal.Val->op_end()-2), 1507 *(TrueVal.Val->op_end()-1))); 1508 } 1509} 1510 1511 1512void SelectionDAGLowering::visitTrunc(User &I) { 1513 // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest). 1514 SDOperand N = getValue(I.getOperand(0)); 1515 MVT::ValueType DestVT = TLI.getValueType(I.getType()); 1516 setValue(&I, DAG.getNode(ISD::TRUNCATE, DestVT, N)); 1517} 1518 1519void SelectionDAGLowering::visitZExt(User &I) { 1520 // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest). 1521 // ZExt also can't be a cast to bool for same reason. So, nothing much to do 1522 SDOperand N = getValue(I.getOperand(0)); 1523 MVT::ValueType DestVT = TLI.getValueType(I.getType()); 1524 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, DestVT, N)); 1525} 1526 1527void SelectionDAGLowering::visitSExt(User &I) { 1528 // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest). 1529 // SExt also can't be a cast to bool for same reason. So, nothing much to do 1530 SDOperand N = getValue(I.getOperand(0)); 1531 MVT::ValueType DestVT = TLI.getValueType(I.getType()); 1532 setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, DestVT, N)); 1533} 1534 1535void SelectionDAGLowering::visitFPTrunc(User &I) { 1536 // FPTrunc is never a no-op cast, no need to check 1537 SDOperand N = getValue(I.getOperand(0)); 1538 MVT::ValueType DestVT = TLI.getValueType(I.getType()); 1539 setValue(&I, DAG.getNode(ISD::FP_ROUND, DestVT, N)); 1540} 1541 1542void SelectionDAGLowering::visitFPExt(User &I){ 1543 // FPTrunc is never a no-op cast, no need to check 1544 SDOperand N = getValue(I.getOperand(0)); 1545 MVT::ValueType DestVT = TLI.getValueType(I.getType()); 1546 setValue(&I, DAG.getNode(ISD::FP_EXTEND, DestVT, N)); 1547} 1548 1549void SelectionDAGLowering::visitFPToUI(User &I) { 1550 // FPToUI is never a no-op cast, no need to check 1551 SDOperand N = getValue(I.getOperand(0)); 1552 MVT::ValueType DestVT = TLI.getValueType(I.getType()); 1553 setValue(&I, DAG.getNode(ISD::FP_TO_UINT, DestVT, N)); 1554} 1555 1556void SelectionDAGLowering::visitFPToSI(User &I) { 1557 // FPToSI is never a no-op cast, no need to check 1558 SDOperand N = getValue(I.getOperand(0)); 1559 MVT::ValueType DestVT = TLI.getValueType(I.getType()); 1560 setValue(&I, DAG.getNode(ISD::FP_TO_SINT, DestVT, N)); 1561} 1562 1563void SelectionDAGLowering::visitUIToFP(User &I) { 1564 // UIToFP is never a no-op cast, no need to check 1565 SDOperand N = getValue(I.getOperand(0)); 1566 MVT::ValueType DestVT = TLI.getValueType(I.getType()); 1567 setValue(&I, DAG.getNode(ISD::UINT_TO_FP, DestVT, N)); 1568} 1569 1570void SelectionDAGLowering::visitSIToFP(User &I){ 1571 // UIToFP is never a no-op cast, no need to check 1572 SDOperand N = getValue(I.getOperand(0)); 1573 MVT::ValueType DestVT = TLI.getValueType(I.getType()); 1574 setValue(&I, DAG.getNode(ISD::SINT_TO_FP, DestVT, N)); 1575} 1576 1577void SelectionDAGLowering::visitPtrToInt(User &I) { 1578 // What to do depends on the size of the integer and the size of the pointer. 1579 // We can either truncate, zero extend, or no-op, accordingly. 1580 SDOperand N = getValue(I.getOperand(0)); 1581 MVT::ValueType SrcVT = N.getValueType(); 1582 MVT::ValueType DestVT = TLI.getValueType(I.getType()); 1583 SDOperand Result; 1584 if (MVT::getSizeInBits(DestVT) < MVT::getSizeInBits(SrcVT)) 1585 Result = DAG.getNode(ISD::TRUNCATE, DestVT, N); 1586 else 1587 // Note: ZERO_EXTEND can handle cases where the sizes are equal too 1588 Result = DAG.getNode(ISD::ZERO_EXTEND, DestVT, N); 1589 setValue(&I, Result); 1590} 1591 1592void SelectionDAGLowering::visitIntToPtr(User &I) { 1593 // What to do depends on the size of the integer and the size of the pointer. 1594 // We can either truncate, zero extend, or no-op, accordingly. 1595 SDOperand N = getValue(I.getOperand(0)); 1596 MVT::ValueType SrcVT = N.getValueType(); 1597 MVT::ValueType DestVT = TLI.getValueType(I.getType()); 1598 if (MVT::getSizeInBits(DestVT) < MVT::getSizeInBits(SrcVT)) 1599 setValue(&I, DAG.getNode(ISD::TRUNCATE, DestVT, N)); 1600 else 1601 // Note: ZERO_EXTEND can handle cases where the sizes are equal too 1602 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, DestVT, N)); 1603} 1604 1605void SelectionDAGLowering::visitBitCast(User &I) { 1606 SDOperand N = getValue(I.getOperand(0)); 1607 MVT::ValueType DestVT = TLI.getValueType(I.getType()); 1608 if (DestVT == MVT::Vector) { 1609 // This is a cast to a vector from something else. 1610 // Get information about the output vector. 1611 const PackedType *DestTy = cast<PackedType>(I.getType()); 1612 MVT::ValueType EltVT = TLI.getValueType(DestTy->getElementType()); 1613 setValue(&I, DAG.getNode(ISD::VBIT_CONVERT, DestVT, N, 1614 DAG.getConstant(DestTy->getNumElements(),MVT::i32), 1615 DAG.getValueType(EltVT))); 1616 return; 1617 } 1618 MVT::ValueType SrcVT = N.getValueType(); 1619 if (SrcVT == MVT::Vector) { 1620 // This is a cast from a vctor to something else. 1621 // Get information about the input vector. 1622 setValue(&I, DAG.getNode(ISD::VBIT_CONVERT, DestVT, N)); 1623 return; 1624 } 1625 1626 // BitCast assures us that source and destination are the same size so this 1627 // is either a BIT_CONVERT or a no-op. 1628 if (DestVT != N.getValueType()) 1629 setValue(&I, DAG.getNode(ISD::BIT_CONVERT, DestVT, N)); // convert types 1630 else 1631 setValue(&I, N); // noop cast. 1632} 1633 1634void SelectionDAGLowering::visitInsertElement(User &I) { 1635 SDOperand InVec = getValue(I.getOperand(0)); 1636 SDOperand InVal = getValue(I.getOperand(1)); 1637 SDOperand InIdx = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(), 1638 getValue(I.getOperand(2))); 1639 1640 SDOperand Num = *(InVec.Val->op_end()-2); 1641 SDOperand Typ = *(InVec.Val->op_end()-1); 1642 setValue(&I, DAG.getNode(ISD::VINSERT_VECTOR_ELT, MVT::Vector, 1643 InVec, InVal, InIdx, Num, Typ)); 1644} 1645 1646void SelectionDAGLowering::visitExtractElement(User &I) { 1647 SDOperand InVec = getValue(I.getOperand(0)); 1648 SDOperand InIdx = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(), 1649 getValue(I.getOperand(1))); 1650 SDOperand Typ = *(InVec.Val->op_end()-1); 1651 setValue(&I, DAG.getNode(ISD::VEXTRACT_VECTOR_ELT, 1652 TLI.getValueType(I.getType()), InVec, InIdx)); 1653} 1654 1655void SelectionDAGLowering::visitShuffleVector(User &I) { 1656 SDOperand V1 = getValue(I.getOperand(0)); 1657 SDOperand V2 = getValue(I.getOperand(1)); 1658 SDOperand Mask = getValue(I.getOperand(2)); 1659 1660 SDOperand Num = *(V1.Val->op_end()-2); 1661 SDOperand Typ = *(V2.Val->op_end()-1); 1662 setValue(&I, DAG.getNode(ISD::VVECTOR_SHUFFLE, MVT::Vector, 1663 V1, V2, Mask, Num, Typ)); 1664} 1665 1666 1667void SelectionDAGLowering::visitGetElementPtr(User &I) { 1668 SDOperand N = getValue(I.getOperand(0)); 1669 const Type *Ty = I.getOperand(0)->getType(); 1670 1671 for (GetElementPtrInst::op_iterator OI = I.op_begin()+1, E = I.op_end(); 1672 OI != E; ++OI) { 1673 Value *Idx = *OI; 1674 if (const StructType *StTy = dyn_cast<StructType>(Ty)) { 1675 unsigned Field = cast<ConstantInt>(Idx)->getZExtValue(); 1676 if (Field) { 1677 // N = N + Offset 1678 uint64_t Offset = TD->getStructLayout(StTy)->MemberOffsets[Field]; 1679 N = DAG.getNode(ISD::ADD, N.getValueType(), N, 1680 getIntPtrConstant(Offset)); 1681 } 1682 Ty = StTy->getElementType(Field); 1683 } else { 1684 Ty = cast<SequentialType>(Ty)->getElementType(); 1685 1686 // If this is a constant subscript, handle it quickly. 1687 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) { 1688 if (CI->getZExtValue() == 0) continue; 1689 uint64_t Offs = 1690 TD->getTypeSize(Ty)*cast<ConstantInt>(CI)->getSExtValue(); 1691 N = DAG.getNode(ISD::ADD, N.getValueType(), N, getIntPtrConstant(Offs)); 1692 continue; 1693 } 1694 1695 // N = N + Idx * ElementSize; 1696 uint64_t ElementSize = TD->getTypeSize(Ty); 1697 SDOperand IdxN = getValue(Idx); 1698 1699 // If the index is smaller or larger than intptr_t, truncate or extend 1700 // it. 1701 if (IdxN.getValueType() < N.getValueType()) { 1702 IdxN = DAG.getNode(ISD::SIGN_EXTEND, N.getValueType(), IdxN); 1703 } else if (IdxN.getValueType() > N.getValueType()) 1704 IdxN = DAG.getNode(ISD::TRUNCATE, N.getValueType(), IdxN); 1705 1706 // If this is a multiply by a power of two, turn it into a shl 1707 // immediately. This is a very common case. 1708 if (isPowerOf2_64(ElementSize)) { 1709 unsigned Amt = Log2_64(ElementSize); 1710 IdxN = DAG.getNode(ISD::SHL, N.getValueType(), IdxN, 1711 DAG.getConstant(Amt, TLI.getShiftAmountTy())); 1712 N = DAG.getNode(ISD::ADD, N.getValueType(), N, IdxN); 1713 continue; 1714 } 1715 1716 SDOperand Scale = getIntPtrConstant(ElementSize); 1717 IdxN = DAG.getNode(ISD::MUL, N.getValueType(), IdxN, Scale); 1718 N = DAG.getNode(ISD::ADD, N.getValueType(), N, IdxN); 1719 } 1720 } 1721 setValue(&I, N); 1722} 1723 1724void SelectionDAGLowering::visitAlloca(AllocaInst &I) { 1725 // If this is a fixed sized alloca in the entry block of the function, 1726 // allocate it statically on the stack. 1727 if (FuncInfo.StaticAllocaMap.count(&I)) 1728 return; // getValue will auto-populate this. 1729 1730 const Type *Ty = I.getAllocatedType(); 1731 uint64_t TySize = TLI.getTargetData()->getTypeSize(Ty); 1732 unsigned Align = std::max((unsigned)TLI.getTargetData()->getTypeAlignment(Ty), 1733 I.getAlignment()); 1734 1735 SDOperand AllocSize = getValue(I.getArraySize()); 1736 MVT::ValueType IntPtr = TLI.getPointerTy(); 1737 if (IntPtr < AllocSize.getValueType()) 1738 AllocSize = DAG.getNode(ISD::TRUNCATE, IntPtr, AllocSize); 1739 else if (IntPtr > AllocSize.getValueType()) 1740 AllocSize = DAG.getNode(ISD::ZERO_EXTEND, IntPtr, AllocSize); 1741 1742 AllocSize = DAG.getNode(ISD::MUL, IntPtr, AllocSize, 1743 getIntPtrConstant(TySize)); 1744 1745 // Handle alignment. If the requested alignment is less than or equal to the 1746 // stack alignment, ignore it and round the size of the allocation up to the 1747 // stack alignment size. If the size is greater than the stack alignment, we 1748 // note this in the DYNAMIC_STACKALLOC node. 1749 unsigned StackAlign = 1750 TLI.getTargetMachine().getFrameInfo()->getStackAlignment(); 1751 if (Align <= StackAlign) { 1752 Align = 0; 1753 // Add SA-1 to the size. 1754 AllocSize = DAG.getNode(ISD::ADD, AllocSize.getValueType(), AllocSize, 1755 getIntPtrConstant(StackAlign-1)); 1756 // Mask out the low bits for alignment purposes. 1757 AllocSize = DAG.getNode(ISD::AND, AllocSize.getValueType(), AllocSize, 1758 getIntPtrConstant(~(uint64_t)(StackAlign-1))); 1759 } 1760 1761 SDOperand Ops[] = { getRoot(), AllocSize, getIntPtrConstant(Align) }; 1762 const MVT::ValueType *VTs = DAG.getNodeValueTypes(AllocSize.getValueType(), 1763 MVT::Other); 1764 SDOperand DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, VTs, 2, Ops, 3); 1765 DAG.setRoot(setValue(&I, DSA).getValue(1)); 1766 1767 // Inform the Frame Information that we have just allocated a variable-sized 1768 // object. 1769 CurMBB->getParent()->getFrameInfo()->CreateVariableSizedObject(); 1770} 1771 1772void SelectionDAGLowering::visitLoad(LoadInst &I) { 1773 SDOperand Ptr = getValue(I.getOperand(0)); 1774 1775 SDOperand Root; 1776 if (I.isVolatile()) 1777 Root = getRoot(); 1778 else { 1779 // Do not serialize non-volatile loads against each other. 1780 Root = DAG.getRoot(); 1781 } 1782 1783 setValue(&I, getLoadFrom(I.getType(), Ptr, I.getOperand(0), 1784 Root, I.isVolatile())); 1785} 1786 1787SDOperand SelectionDAGLowering::getLoadFrom(const Type *Ty, SDOperand Ptr, 1788 const Value *SV, SDOperand Root, 1789 bool isVolatile) { 1790 SDOperand L; 1791 if (const PackedType *PTy = dyn_cast<PackedType>(Ty)) { 1792 MVT::ValueType PVT = TLI.getValueType(PTy->getElementType()); 1793 L = DAG.getVecLoad(PTy->getNumElements(), PVT, Root, Ptr, 1794 DAG.getSrcValue(SV)); 1795 } else { 1796 L = DAG.getLoad(TLI.getValueType(Ty), Root, Ptr, SV, 0, isVolatile); 1797 } 1798 1799 if (isVolatile) 1800 DAG.setRoot(L.getValue(1)); 1801 else 1802 PendingLoads.push_back(L.getValue(1)); 1803 1804 return L; 1805} 1806 1807 1808void SelectionDAGLowering::visitStore(StoreInst &I) { 1809 Value *SrcV = I.getOperand(0); 1810 SDOperand Src = getValue(SrcV); 1811 SDOperand Ptr = getValue(I.getOperand(1)); 1812 DAG.setRoot(DAG.getStore(getRoot(), Src, Ptr, I.getOperand(1), 0, 1813 I.isVolatile())); 1814} 1815 1816/// IntrinsicCannotAccessMemory - Return true if the specified intrinsic cannot 1817/// access memory and has no other side effects at all. 1818static bool IntrinsicCannotAccessMemory(unsigned IntrinsicID) { 1819#define GET_NO_MEMORY_INTRINSICS 1820#include "llvm/Intrinsics.gen" 1821#undef GET_NO_MEMORY_INTRINSICS 1822 return false; 1823} 1824 1825// IntrinsicOnlyReadsMemory - Return true if the specified intrinsic doesn't 1826// have any side-effects or if it only reads memory. 1827static bool IntrinsicOnlyReadsMemory(unsigned IntrinsicID) { 1828#define GET_SIDE_EFFECT_INFO 1829#include "llvm/Intrinsics.gen" 1830#undef GET_SIDE_EFFECT_INFO 1831 return false; 1832} 1833 1834/// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC 1835/// node. 1836void SelectionDAGLowering::visitTargetIntrinsic(CallInst &I, 1837 unsigned Intrinsic) { 1838 bool HasChain = !IntrinsicCannotAccessMemory(Intrinsic); 1839 bool OnlyLoad = HasChain && IntrinsicOnlyReadsMemory(Intrinsic); 1840 1841 // Build the operand list. 1842 SmallVector<SDOperand, 8> Ops; 1843 if (HasChain) { // If this intrinsic has side-effects, chainify it. 1844 if (OnlyLoad) { 1845 // We don't need to serialize loads against other loads. 1846 Ops.push_back(DAG.getRoot()); 1847 } else { 1848 Ops.push_back(getRoot()); 1849 } 1850 } 1851 1852 // Add the intrinsic ID as an integer operand. 1853 Ops.push_back(DAG.getConstant(Intrinsic, TLI.getPointerTy())); 1854 1855 // Add all operands of the call to the operand list. 1856 for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) { 1857 SDOperand Op = getValue(I.getOperand(i)); 1858 1859 // If this is a vector type, force it to the right packed type. 1860 if (Op.getValueType() == MVT::Vector) { 1861 const PackedType *OpTy = cast<PackedType>(I.getOperand(i)->getType()); 1862 MVT::ValueType EltVT = TLI.getValueType(OpTy->getElementType()); 1863 1864 MVT::ValueType VVT = MVT::getVectorType(EltVT, OpTy->getNumElements()); 1865 assert(VVT != MVT::Other && "Intrinsic uses a non-legal type?"); 1866 Op = DAG.getNode(ISD::VBIT_CONVERT, VVT, Op); 1867 } 1868 1869 assert(TLI.isTypeLegal(Op.getValueType()) && 1870 "Intrinsic uses a non-legal type?"); 1871 Ops.push_back(Op); 1872 } 1873 1874 std::vector<MVT::ValueType> VTs; 1875 if (I.getType() != Type::VoidTy) { 1876 MVT::ValueType VT = TLI.getValueType(I.getType()); 1877 if (VT == MVT::Vector) { 1878 const PackedType *DestTy = cast<PackedType>(I.getType()); 1879 MVT::ValueType EltVT = TLI.getValueType(DestTy->getElementType()); 1880 1881 VT = MVT::getVectorType(EltVT, DestTy->getNumElements()); 1882 assert(VT != MVT::Other && "Intrinsic uses a non-legal type?"); 1883 } 1884 1885 assert(TLI.isTypeLegal(VT) && "Intrinsic uses a non-legal type?"); 1886 VTs.push_back(VT); 1887 } 1888 if (HasChain) 1889 VTs.push_back(MVT::Other); 1890 1891 const MVT::ValueType *VTList = DAG.getNodeValueTypes(VTs); 1892 1893 // Create the node. 1894 SDOperand Result; 1895 if (!HasChain) 1896 Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, VTList, VTs.size(), 1897 &Ops[0], Ops.size()); 1898 else if (I.getType() != Type::VoidTy) 1899 Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, VTList, VTs.size(), 1900 &Ops[0], Ops.size()); 1901 else 1902 Result = DAG.getNode(ISD::INTRINSIC_VOID, VTList, VTs.size(), 1903 &Ops[0], Ops.size()); 1904 1905 if (HasChain) { 1906 SDOperand Chain = Result.getValue(Result.Val->getNumValues()-1); 1907 if (OnlyLoad) 1908 PendingLoads.push_back(Chain); 1909 else 1910 DAG.setRoot(Chain); 1911 } 1912 if (I.getType() != Type::VoidTy) { 1913 if (const PackedType *PTy = dyn_cast<PackedType>(I.getType())) { 1914 MVT::ValueType EVT = TLI.getValueType(PTy->getElementType()); 1915 Result = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, Result, 1916 DAG.getConstant(PTy->getNumElements(), MVT::i32), 1917 DAG.getValueType(EVT)); 1918 } 1919 setValue(&I, Result); 1920 } 1921} 1922 1923/// visitIntrinsicCall - Lower the call to the specified intrinsic function. If 1924/// we want to emit this as a call to a named external function, return the name 1925/// otherwise lower it and return null. 1926const char * 1927SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) { 1928 switch (Intrinsic) { 1929 default: 1930 // By default, turn this into a target intrinsic node. 1931 visitTargetIntrinsic(I, Intrinsic); 1932 return 0; 1933 case Intrinsic::vastart: visitVAStart(I); return 0; 1934 case Intrinsic::vaend: visitVAEnd(I); return 0; 1935 case Intrinsic::vacopy: visitVACopy(I); return 0; 1936 case Intrinsic::returnaddress: visitFrameReturnAddress(I, false); return 0; 1937 case Intrinsic::frameaddress: visitFrameReturnAddress(I, true); return 0; 1938 case Intrinsic::setjmp: 1939 return "_setjmp"+!TLI.usesUnderscoreSetJmp(); 1940 break; 1941 case Intrinsic::longjmp: 1942 return "_longjmp"+!TLI.usesUnderscoreLongJmp(); 1943 break; 1944 case Intrinsic::memcpy_i32: 1945 case Intrinsic::memcpy_i64: 1946 visitMemIntrinsic(I, ISD::MEMCPY); 1947 return 0; 1948 case Intrinsic::memset_i32: 1949 case Intrinsic::memset_i64: 1950 visitMemIntrinsic(I, ISD::MEMSET); 1951 return 0; 1952 case Intrinsic::memmove_i32: 1953 case Intrinsic::memmove_i64: 1954 visitMemIntrinsic(I, ISD::MEMMOVE); 1955 return 0; 1956 1957 case Intrinsic::dbg_stoppoint: { 1958 MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo(); 1959 DbgStopPointInst &SPI = cast<DbgStopPointInst>(I); 1960 if (DebugInfo && SPI.getContext() && DebugInfo->Verify(SPI.getContext())) { 1961 SDOperand Ops[5]; 1962 1963 Ops[0] = getRoot(); 1964 Ops[1] = getValue(SPI.getLineValue()); 1965 Ops[2] = getValue(SPI.getColumnValue()); 1966 1967 DebugInfoDesc *DD = DebugInfo->getDescFor(SPI.getContext()); 1968 assert(DD && "Not a debug information descriptor"); 1969 CompileUnitDesc *CompileUnit = cast<CompileUnitDesc>(DD); 1970 1971 Ops[3] = DAG.getString(CompileUnit->getFileName()); 1972 Ops[4] = DAG.getString(CompileUnit->getDirectory()); 1973 1974 DAG.setRoot(DAG.getNode(ISD::LOCATION, MVT::Other, Ops, 5)); 1975 } 1976 1977 return 0; 1978 } 1979 case Intrinsic::dbg_region_start: { 1980 MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo(); 1981 DbgRegionStartInst &RSI = cast<DbgRegionStartInst>(I); 1982 if (DebugInfo && RSI.getContext() && DebugInfo->Verify(RSI.getContext())) { 1983 unsigned LabelID = DebugInfo->RecordRegionStart(RSI.getContext()); 1984 DAG.setRoot(DAG.getNode(ISD::DEBUG_LABEL, MVT::Other, getRoot(), 1985 DAG.getConstant(LabelID, MVT::i32))); 1986 } 1987 1988 return 0; 1989 } 1990 case Intrinsic::dbg_region_end: { 1991 MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo(); 1992 DbgRegionEndInst &REI = cast<DbgRegionEndInst>(I); 1993 if (DebugInfo && REI.getContext() && DebugInfo->Verify(REI.getContext())) { 1994 unsigned LabelID = DebugInfo->RecordRegionEnd(REI.getContext()); 1995 DAG.setRoot(DAG.getNode(ISD::DEBUG_LABEL, MVT::Other, 1996 getRoot(), DAG.getConstant(LabelID, MVT::i32))); 1997 } 1998 1999 return 0; 2000 } 2001 case Intrinsic::dbg_func_start: { 2002 MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo(); 2003 DbgFuncStartInst &FSI = cast<DbgFuncStartInst>(I); 2004 if (DebugInfo && FSI.getSubprogram() && 2005 DebugInfo->Verify(FSI.getSubprogram())) { 2006 unsigned LabelID = DebugInfo->RecordRegionStart(FSI.getSubprogram()); 2007 DAG.setRoot(DAG.getNode(ISD::DEBUG_LABEL, MVT::Other, 2008 getRoot(), DAG.getConstant(LabelID, MVT::i32))); 2009 } 2010 2011 return 0; 2012 } 2013 case Intrinsic::dbg_declare: { 2014 MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo(); 2015 DbgDeclareInst &DI = cast<DbgDeclareInst>(I); 2016 if (DebugInfo && DI.getVariable() && DebugInfo->Verify(DI.getVariable())) { 2017 SDOperand AddressOp = getValue(DI.getAddress()); 2018 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(AddressOp)) 2019 DebugInfo->RecordVariable(DI.getVariable(), FI->getIndex()); 2020 } 2021 2022 return 0; 2023 } 2024 2025 case Intrinsic::sqrt_f32: 2026 case Intrinsic::sqrt_f64: 2027 setValue(&I, DAG.getNode(ISD::FSQRT, 2028 getValue(I.getOperand(1)).getValueType(), 2029 getValue(I.getOperand(1)))); 2030 return 0; 2031 case Intrinsic::powi_f32: 2032 case Intrinsic::powi_f64: 2033 setValue(&I, DAG.getNode(ISD::FPOWI, 2034 getValue(I.getOperand(1)).getValueType(), 2035 getValue(I.getOperand(1)), 2036 getValue(I.getOperand(2)))); 2037 return 0; 2038 case Intrinsic::pcmarker: { 2039 SDOperand Tmp = getValue(I.getOperand(1)); 2040 DAG.setRoot(DAG.getNode(ISD::PCMARKER, MVT::Other, getRoot(), Tmp)); 2041 return 0; 2042 } 2043 case Intrinsic::readcyclecounter: { 2044 SDOperand Op = getRoot(); 2045 SDOperand Tmp = DAG.getNode(ISD::READCYCLECOUNTER, 2046 DAG.getNodeValueTypes(MVT::i64, MVT::Other), 2, 2047 &Op, 1); 2048 setValue(&I, Tmp); 2049 DAG.setRoot(Tmp.getValue(1)); 2050 return 0; 2051 } 2052 case Intrinsic::bswap_i16: 2053 case Intrinsic::bswap_i32: 2054 case Intrinsic::bswap_i64: 2055 setValue(&I, DAG.getNode(ISD::BSWAP, 2056 getValue(I.getOperand(1)).getValueType(), 2057 getValue(I.getOperand(1)))); 2058 return 0; 2059 case Intrinsic::cttz_i8: 2060 case Intrinsic::cttz_i16: 2061 case Intrinsic::cttz_i32: 2062 case Intrinsic::cttz_i64: 2063 setValue(&I, DAG.getNode(ISD::CTTZ, 2064 getValue(I.getOperand(1)).getValueType(), 2065 getValue(I.getOperand(1)))); 2066 return 0; 2067 case Intrinsic::ctlz_i8: 2068 case Intrinsic::ctlz_i16: 2069 case Intrinsic::ctlz_i32: 2070 case Intrinsic::ctlz_i64: 2071 setValue(&I, DAG.getNode(ISD::CTLZ, 2072 getValue(I.getOperand(1)).getValueType(), 2073 getValue(I.getOperand(1)))); 2074 return 0; 2075 case Intrinsic::ctpop_i8: 2076 case Intrinsic::ctpop_i16: 2077 case Intrinsic::ctpop_i32: 2078 case Intrinsic::ctpop_i64: 2079 setValue(&I, DAG.getNode(ISD::CTPOP, 2080 getValue(I.getOperand(1)).getValueType(), 2081 getValue(I.getOperand(1)))); 2082 return 0; 2083 case Intrinsic::stacksave: { 2084 SDOperand Op = getRoot(); 2085 SDOperand Tmp = DAG.getNode(ISD::STACKSAVE, 2086 DAG.getNodeValueTypes(TLI.getPointerTy(), MVT::Other), 2, &Op, 1); 2087 setValue(&I, Tmp); 2088 DAG.setRoot(Tmp.getValue(1)); 2089 return 0; 2090 } 2091 case Intrinsic::stackrestore: { 2092 SDOperand Tmp = getValue(I.getOperand(1)); 2093 DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, MVT::Other, getRoot(), Tmp)); 2094 return 0; 2095 } 2096 case Intrinsic::prefetch: 2097 // FIXME: Currently discarding prefetches. 2098 return 0; 2099 } 2100} 2101 2102 2103void SelectionDAGLowering::visitCall(CallInst &I) { 2104 const char *RenameFn = 0; 2105 if (Function *F = I.getCalledFunction()) { 2106 if (F->isExternal()) 2107 if (unsigned IID = F->getIntrinsicID()) { 2108 RenameFn = visitIntrinsicCall(I, IID); 2109 if (!RenameFn) 2110 return; 2111 } else { // Not an LLVM intrinsic. 2112 const std::string &Name = F->getName(); 2113 if (Name[0] == 'c' && (Name == "copysign" || Name == "copysignf")) { 2114 if (I.getNumOperands() == 3 && // Basic sanity checks. 2115 I.getOperand(1)->getType()->isFloatingPoint() && 2116 I.getType() == I.getOperand(1)->getType() && 2117 I.getType() == I.getOperand(2)->getType()) { 2118 SDOperand LHS = getValue(I.getOperand(1)); 2119 SDOperand RHS = getValue(I.getOperand(2)); 2120 setValue(&I, DAG.getNode(ISD::FCOPYSIGN, LHS.getValueType(), 2121 LHS, RHS)); 2122 return; 2123 } 2124 } else if (Name[0] == 'f' && (Name == "fabs" || Name == "fabsf")) { 2125 if (I.getNumOperands() == 2 && // Basic sanity checks. 2126 I.getOperand(1)->getType()->isFloatingPoint() && 2127 I.getType() == I.getOperand(1)->getType()) { 2128 SDOperand Tmp = getValue(I.getOperand(1)); 2129 setValue(&I, DAG.getNode(ISD::FABS, Tmp.getValueType(), Tmp)); 2130 return; 2131 } 2132 } else if (Name[0] == 's' && (Name == "sin" || Name == "sinf")) { 2133 if (I.getNumOperands() == 2 && // Basic sanity checks. 2134 I.getOperand(1)->getType()->isFloatingPoint() && 2135 I.getType() == I.getOperand(1)->getType()) { 2136 SDOperand Tmp = getValue(I.getOperand(1)); 2137 setValue(&I, DAG.getNode(ISD::FSIN, Tmp.getValueType(), Tmp)); 2138 return; 2139 } 2140 } else if (Name[0] == 'c' && (Name == "cos" || Name == "cosf")) { 2141 if (I.getNumOperands() == 2 && // Basic sanity checks. 2142 I.getOperand(1)->getType()->isFloatingPoint() && 2143 I.getType() == I.getOperand(1)->getType()) { 2144 SDOperand Tmp = getValue(I.getOperand(1)); 2145 setValue(&I, DAG.getNode(ISD::FCOS, Tmp.getValueType(), Tmp)); 2146 return; 2147 } 2148 } 2149 } 2150 } else if (isa<InlineAsm>(I.getOperand(0))) { 2151 visitInlineAsm(I); 2152 return; 2153 } 2154 2155 const PointerType *PT = cast<PointerType>(I.getCalledValue()->getType()); 2156 const FunctionType *FTy = cast<FunctionType>(PT->getElementType()); 2157 2158 SDOperand Callee; 2159 if (!RenameFn) 2160 Callee = getValue(I.getOperand(0)); 2161 else 2162 Callee = DAG.getExternalSymbol(RenameFn, TLI.getPointerTy()); 2163 TargetLowering::ArgListTy Args; 2164 TargetLowering::ArgListEntry Entry; 2165 Args.reserve(I.getNumOperands()); 2166 for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) { 2167 Value *Arg = I.getOperand(i); 2168 SDOperand ArgNode = getValue(Arg); 2169 Entry.Node = ArgNode; Entry.Ty = Arg->getType(); 2170 Entry.isSigned = FTy->paramHasAttr(i, FunctionType::SExtAttribute); 2171 Args.push_back(Entry); 2172 } 2173 2174 std::pair<SDOperand,SDOperand> Result = 2175 TLI.LowerCallTo(getRoot(), I.getType(), 2176 FTy->paramHasAttr(0,FunctionType::SExtAttribute), 2177 FTy->isVarArg(), I.getCallingConv(), I.isTailCall(), 2178 Callee, Args, DAG); 2179 if (I.getType() != Type::VoidTy) 2180 setValue(&I, Result.first); 2181 DAG.setRoot(Result.second); 2182} 2183 2184SDOperand RegsForValue::getCopyFromRegs(SelectionDAG &DAG, 2185 SDOperand &Chain, SDOperand &Flag)const{ 2186 SDOperand Val = DAG.getCopyFromReg(Chain, Regs[0], RegVT, Flag); 2187 Chain = Val.getValue(1); 2188 Flag = Val.getValue(2); 2189 2190 // If the result was expanded, copy from the top part. 2191 if (Regs.size() > 1) { 2192 assert(Regs.size() == 2 && 2193 "Cannot expand to more than 2 elts yet!"); 2194 SDOperand Hi = DAG.getCopyFromReg(Chain, Regs[1], RegVT, Flag); 2195 Chain = Hi.getValue(1); 2196 Flag = Hi.getValue(2); 2197 if (DAG.getTargetLoweringInfo().isLittleEndian()) 2198 return DAG.getNode(ISD::BUILD_PAIR, ValueVT, Val, Hi); 2199 else 2200 return DAG.getNode(ISD::BUILD_PAIR, ValueVT, Hi, Val); 2201 } 2202 2203 // Otherwise, if the return value was promoted or extended, truncate it to the 2204 // appropriate type. 2205 if (RegVT == ValueVT) 2206 return Val; 2207 2208 if (MVT::isInteger(RegVT)) { 2209 if (ValueVT < RegVT) 2210 return DAG.getNode(ISD::TRUNCATE, ValueVT, Val); 2211 else 2212 return DAG.getNode(ISD::ANY_EXTEND, ValueVT, Val); 2213 } else { 2214 return DAG.getNode(ISD::FP_ROUND, ValueVT, Val); 2215 } 2216} 2217 2218/// getCopyToRegs - Emit a series of CopyToReg nodes that copies the 2219/// specified value into the registers specified by this object. This uses 2220/// Chain/Flag as the input and updates them for the output Chain/Flag. 2221void RegsForValue::getCopyToRegs(SDOperand Val, SelectionDAG &DAG, 2222 SDOperand &Chain, SDOperand &Flag, 2223 MVT::ValueType PtrVT) const { 2224 if (Regs.size() == 1) { 2225 // If there is a single register and the types differ, this must be 2226 // a promotion. 2227 if (RegVT != ValueVT) { 2228 if (MVT::isInteger(RegVT)) { 2229 if (RegVT < ValueVT) 2230 Val = DAG.getNode(ISD::TRUNCATE, RegVT, Val); 2231 else 2232 Val = DAG.getNode(ISD::ANY_EXTEND, RegVT, Val); 2233 } else 2234 Val = DAG.getNode(ISD::FP_EXTEND, RegVT, Val); 2235 } 2236 Chain = DAG.getCopyToReg(Chain, Regs[0], Val, Flag); 2237 Flag = Chain.getValue(1); 2238 } else { 2239 std::vector<unsigned> R(Regs); 2240 if (!DAG.getTargetLoweringInfo().isLittleEndian()) 2241 std::reverse(R.begin(), R.end()); 2242 2243 for (unsigned i = 0, e = R.size(); i != e; ++i) { 2244 SDOperand Part = DAG.getNode(ISD::EXTRACT_ELEMENT, RegVT, Val, 2245 DAG.getConstant(i, PtrVT)); 2246 Chain = DAG.getCopyToReg(Chain, R[i], Part, Flag); 2247 Flag = Chain.getValue(1); 2248 } 2249 } 2250} 2251 2252/// AddInlineAsmOperands - Add this value to the specified inlineasm node 2253/// operand list. This adds the code marker and includes the number of 2254/// values added into it. 2255void RegsForValue::AddInlineAsmOperands(unsigned Code, SelectionDAG &DAG, 2256 std::vector<SDOperand> &Ops) const { 2257 Ops.push_back(DAG.getConstant(Code | (Regs.size() << 3), MVT::i32)); 2258 for (unsigned i = 0, e = Regs.size(); i != e; ++i) 2259 Ops.push_back(DAG.getRegister(Regs[i], RegVT)); 2260} 2261 2262/// isAllocatableRegister - If the specified register is safe to allocate, 2263/// i.e. it isn't a stack pointer or some other special register, return the 2264/// register class for the register. Otherwise, return null. 2265static const TargetRegisterClass * 2266isAllocatableRegister(unsigned Reg, MachineFunction &MF, 2267 const TargetLowering &TLI, const MRegisterInfo *MRI) { 2268 MVT::ValueType FoundVT = MVT::Other; 2269 const TargetRegisterClass *FoundRC = 0; 2270 for (MRegisterInfo::regclass_iterator RCI = MRI->regclass_begin(), 2271 E = MRI->regclass_end(); RCI != E; ++RCI) { 2272 MVT::ValueType ThisVT = MVT::Other; 2273 2274 const TargetRegisterClass *RC = *RCI; 2275 // If none of the the value types for this register class are valid, we 2276 // can't use it. For example, 64-bit reg classes on 32-bit targets. 2277 for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end(); 2278 I != E; ++I) { 2279 if (TLI.isTypeLegal(*I)) { 2280 // If we have already found this register in a different register class, 2281 // choose the one with the largest VT specified. For example, on 2282 // PowerPC, we favor f64 register classes over f32. 2283 if (FoundVT == MVT::Other || 2284 MVT::getSizeInBits(FoundVT) < MVT::getSizeInBits(*I)) { 2285 ThisVT = *I; 2286 break; 2287 } 2288 } 2289 } 2290 2291 if (ThisVT == MVT::Other) continue; 2292 2293 // NOTE: This isn't ideal. In particular, this might allocate the 2294 // frame pointer in functions that need it (due to them not being taken 2295 // out of allocation, because a variable sized allocation hasn't been seen 2296 // yet). This is a slight code pessimization, but should still work. 2297 for (TargetRegisterClass::iterator I = RC->allocation_order_begin(MF), 2298 E = RC->allocation_order_end(MF); I != E; ++I) 2299 if (*I == Reg) { 2300 // We found a matching register class. Keep looking at others in case 2301 // we find one with larger registers that this physreg is also in. 2302 FoundRC = RC; 2303 FoundVT = ThisVT; 2304 break; 2305 } 2306 } 2307 return FoundRC; 2308} 2309 2310RegsForValue SelectionDAGLowering:: 2311GetRegistersForValue(const std::string &ConstrCode, 2312 MVT::ValueType VT, bool isOutReg, bool isInReg, 2313 std::set<unsigned> &OutputRegs, 2314 std::set<unsigned> &InputRegs) { 2315 std::pair<unsigned, const TargetRegisterClass*> PhysReg = 2316 TLI.getRegForInlineAsmConstraint(ConstrCode, VT); 2317 std::vector<unsigned> Regs; 2318 2319 unsigned NumRegs = VT != MVT::Other ? TLI.getNumElements(VT) : 1; 2320 MVT::ValueType RegVT; 2321 MVT::ValueType ValueVT = VT; 2322 2323 // If this is a constraint for a specific physical register, like {r17}, 2324 // assign it now. 2325 if (PhysReg.first) { 2326 if (VT == MVT::Other) 2327 ValueVT = *PhysReg.second->vt_begin(); 2328 2329 // Get the actual register value type. This is important, because the user 2330 // may have asked for (e.g.) the AX register in i32 type. We need to 2331 // remember that AX is actually i16 to get the right extension. 2332 RegVT = *PhysReg.second->vt_begin(); 2333 2334 // This is a explicit reference to a physical register. 2335 Regs.push_back(PhysReg.first); 2336 2337 // If this is an expanded reference, add the rest of the regs to Regs. 2338 if (NumRegs != 1) { 2339 TargetRegisterClass::iterator I = PhysReg.second->begin(); 2340 TargetRegisterClass::iterator E = PhysReg.second->end(); 2341 for (; *I != PhysReg.first; ++I) 2342 assert(I != E && "Didn't find reg!"); 2343 2344 // Already added the first reg. 2345 --NumRegs; ++I; 2346 for (; NumRegs; --NumRegs, ++I) { 2347 assert(I != E && "Ran out of registers to allocate!"); 2348 Regs.push_back(*I); 2349 } 2350 } 2351 return RegsForValue(Regs, RegVT, ValueVT); 2352 } 2353 2354 // Otherwise, if this was a reference to an LLVM register class, create vregs 2355 // for this reference. 2356 std::vector<unsigned> RegClassRegs; 2357 if (PhysReg.second) { 2358 // If this is an early clobber or tied register, our regalloc doesn't know 2359 // how to maintain the constraint. If it isn't, go ahead and create vreg 2360 // and let the regalloc do the right thing. 2361 if (!isOutReg || !isInReg) { 2362 if (VT == MVT::Other) 2363 ValueVT = *PhysReg.second->vt_begin(); 2364 RegVT = *PhysReg.second->vt_begin(); 2365 2366 // Create the appropriate number of virtual registers. 2367 SSARegMap *RegMap = DAG.getMachineFunction().getSSARegMap(); 2368 for (; NumRegs; --NumRegs) 2369 Regs.push_back(RegMap->createVirtualRegister(PhysReg.second)); 2370 2371 return RegsForValue(Regs, RegVT, ValueVT); 2372 } 2373 2374 // Otherwise, we can't allocate it. Let the code below figure out how to 2375 // maintain these constraints. 2376 RegClassRegs.assign(PhysReg.second->begin(), PhysReg.second->end()); 2377 2378 } else { 2379 // This is a reference to a register class that doesn't directly correspond 2380 // to an LLVM register class. Allocate NumRegs consecutive, available, 2381 // registers from the class. 2382 RegClassRegs = TLI.getRegClassForInlineAsmConstraint(ConstrCode, VT); 2383 } 2384 2385 const MRegisterInfo *MRI = DAG.getTarget().getRegisterInfo(); 2386 MachineFunction &MF = *CurMBB->getParent(); 2387 unsigned NumAllocated = 0; 2388 for (unsigned i = 0, e = RegClassRegs.size(); i != e; ++i) { 2389 unsigned Reg = RegClassRegs[i]; 2390 // See if this register is available. 2391 if ((isOutReg && OutputRegs.count(Reg)) || // Already used. 2392 (isInReg && InputRegs.count(Reg))) { // Already used. 2393 // Make sure we find consecutive registers. 2394 NumAllocated = 0; 2395 continue; 2396 } 2397 2398 // Check to see if this register is allocatable (i.e. don't give out the 2399 // stack pointer). 2400 const TargetRegisterClass *RC = isAllocatableRegister(Reg, MF, TLI, MRI); 2401 if (!RC) { 2402 // Make sure we find consecutive registers. 2403 NumAllocated = 0; 2404 continue; 2405 } 2406 2407 // Okay, this register is good, we can use it. 2408 ++NumAllocated; 2409 2410 // If we allocated enough consecutive 2411 if (NumAllocated == NumRegs) { 2412 unsigned RegStart = (i-NumAllocated)+1; 2413 unsigned RegEnd = i+1; 2414 // Mark all of the allocated registers used. 2415 for (unsigned i = RegStart; i != RegEnd; ++i) { 2416 unsigned Reg = RegClassRegs[i]; 2417 Regs.push_back(Reg); 2418 if (isOutReg) OutputRegs.insert(Reg); // Mark reg used. 2419 if (isInReg) InputRegs.insert(Reg); // Mark reg used. 2420 } 2421 2422 return RegsForValue(Regs, *RC->vt_begin(), VT); 2423 } 2424 } 2425 2426 // Otherwise, we couldn't allocate enough registers for this. 2427 return RegsForValue(); 2428} 2429 2430 2431/// visitInlineAsm - Handle a call to an InlineAsm object. 2432/// 2433void SelectionDAGLowering::visitInlineAsm(CallInst &I) { 2434 InlineAsm *IA = cast<InlineAsm>(I.getOperand(0)); 2435 2436 SDOperand AsmStr = DAG.getTargetExternalSymbol(IA->getAsmString().c_str(), 2437 MVT::Other); 2438 2439 std::vector<InlineAsm::ConstraintInfo> Constraints = IA->ParseConstraints(); 2440 std::vector<MVT::ValueType> ConstraintVTs; 2441 2442 /// AsmNodeOperands - A list of pairs. The first element is a register, the 2443 /// second is a bitfield where bit #0 is set if it is a use and bit #1 is set 2444 /// if it is a def of that register. 2445 std::vector<SDOperand> AsmNodeOperands; 2446 AsmNodeOperands.push_back(SDOperand()); // reserve space for input chain 2447 AsmNodeOperands.push_back(AsmStr); 2448 2449 SDOperand Chain = getRoot(); 2450 SDOperand Flag; 2451 2452 // We fully assign registers here at isel time. This is not optimal, but 2453 // should work. For register classes that correspond to LLVM classes, we 2454 // could let the LLVM RA do its thing, but we currently don't. Do a prepass 2455 // over the constraints, collecting fixed registers that we know we can't use. 2456 std::set<unsigned> OutputRegs, InputRegs; 2457 unsigned OpNum = 1; 2458 for (unsigned i = 0, e = Constraints.size(); i != e; ++i) { 2459 assert(Constraints[i].Codes.size() == 1 && "Only handles one code so far!"); 2460 std::string &ConstraintCode = Constraints[i].Codes[0]; 2461 2462 MVT::ValueType OpVT; 2463 2464 // Compute the value type for each operand and add it to ConstraintVTs. 2465 switch (Constraints[i].Type) { 2466 case InlineAsm::isOutput: 2467 if (!Constraints[i].isIndirectOutput) { 2468 assert(I.getType() != Type::VoidTy && "Bad inline asm!"); 2469 OpVT = TLI.getValueType(I.getType()); 2470 } else { 2471 const Type *OpTy = I.getOperand(OpNum)->getType(); 2472 OpVT = TLI.getValueType(cast<PointerType>(OpTy)->getElementType()); 2473 OpNum++; // Consumes a call operand. 2474 } 2475 break; 2476 case InlineAsm::isInput: 2477 OpVT = TLI.getValueType(I.getOperand(OpNum)->getType()); 2478 OpNum++; // Consumes a call operand. 2479 break; 2480 case InlineAsm::isClobber: 2481 OpVT = MVT::Other; 2482 break; 2483 } 2484 2485 ConstraintVTs.push_back(OpVT); 2486 2487 if (TLI.getRegForInlineAsmConstraint(ConstraintCode, OpVT).first == 0) 2488 continue; // Not assigned a fixed reg. 2489 2490 // Build a list of regs that this operand uses. This always has a single 2491 // element for promoted/expanded operands. 2492 RegsForValue Regs = GetRegistersForValue(ConstraintCode, OpVT, 2493 false, false, 2494 OutputRegs, InputRegs); 2495 2496 switch (Constraints[i].Type) { 2497 case InlineAsm::isOutput: 2498 // We can't assign any other output to this register. 2499 OutputRegs.insert(Regs.Regs.begin(), Regs.Regs.end()); 2500 // If this is an early-clobber output, it cannot be assigned to the same 2501 // value as the input reg. 2502 if (Constraints[i].isEarlyClobber || Constraints[i].hasMatchingInput) 2503 InputRegs.insert(Regs.Regs.begin(), Regs.Regs.end()); 2504 break; 2505 case InlineAsm::isInput: 2506 // We can't assign any other input to this register. 2507 InputRegs.insert(Regs.Regs.begin(), Regs.Regs.end()); 2508 break; 2509 case InlineAsm::isClobber: 2510 // Clobbered regs cannot be used as inputs or outputs. 2511 InputRegs.insert(Regs.Regs.begin(), Regs.Regs.end()); 2512 OutputRegs.insert(Regs.Regs.begin(), Regs.Regs.end()); 2513 break; 2514 } 2515 } 2516 2517 // Loop over all of the inputs, copying the operand values into the 2518 // appropriate registers and processing the output regs. 2519 RegsForValue RetValRegs; 2520 std::vector<std::pair<RegsForValue, Value*> > IndirectStoresToEmit; 2521 OpNum = 1; 2522 2523 for (unsigned i = 0, e = Constraints.size(); i != e; ++i) { 2524 assert(Constraints[i].Codes.size() == 1 && "Only handles one code so far!"); 2525 std::string &ConstraintCode = Constraints[i].Codes[0]; 2526 2527 switch (Constraints[i].Type) { 2528 case InlineAsm::isOutput: { 2529 TargetLowering::ConstraintType CTy = TargetLowering::C_RegisterClass; 2530 if (ConstraintCode.size() == 1) // not a physreg name. 2531 CTy = TLI.getConstraintType(ConstraintCode[0]); 2532 2533 if (CTy == TargetLowering::C_Memory) { 2534 // Memory output. 2535 SDOperand InOperandVal = getValue(I.getOperand(OpNum)); 2536 2537 // Check that the operand (the address to store to) isn't a float. 2538 if (!MVT::isInteger(InOperandVal.getValueType())) 2539 assert(0 && "MATCH FAIL!"); 2540 2541 if (!Constraints[i].isIndirectOutput) 2542 assert(0 && "MATCH FAIL!"); 2543 2544 OpNum++; // Consumes a call operand. 2545 2546 // Extend/truncate to the right pointer type if needed. 2547 MVT::ValueType PtrType = TLI.getPointerTy(); 2548 if (InOperandVal.getValueType() < PtrType) 2549 InOperandVal = DAG.getNode(ISD::ZERO_EXTEND, PtrType, InOperandVal); 2550 else if (InOperandVal.getValueType() > PtrType) 2551 InOperandVal = DAG.getNode(ISD::TRUNCATE, PtrType, InOperandVal); 2552 2553 // Add information to the INLINEASM node to know about this output. 2554 unsigned ResOpType = 4/*MEM*/ | (1 << 3); 2555 AsmNodeOperands.push_back(DAG.getConstant(ResOpType, MVT::i32)); 2556 AsmNodeOperands.push_back(InOperandVal); 2557 break; 2558 } 2559 2560 // Otherwise, this is a register output. 2561 assert(CTy == TargetLowering::C_RegisterClass && "Unknown op type!"); 2562 2563 // If this is an early-clobber output, or if there is an input 2564 // constraint that matches this, we need to reserve the input register 2565 // so no other inputs allocate to it. 2566 bool UsesInputRegister = false; 2567 if (Constraints[i].isEarlyClobber || Constraints[i].hasMatchingInput) 2568 UsesInputRegister = true; 2569 2570 // Copy the output from the appropriate register. Find a register that 2571 // we can use. 2572 RegsForValue Regs = 2573 GetRegistersForValue(ConstraintCode, ConstraintVTs[i], 2574 true, UsesInputRegister, 2575 OutputRegs, InputRegs); 2576 if (Regs.Regs.empty()) { 2577 cerr << "Couldn't allocate output reg for contraint '" 2578 << ConstraintCode << "'!\n"; 2579 exit(1); 2580 } 2581 2582 if (!Constraints[i].isIndirectOutput) { 2583 assert(RetValRegs.Regs.empty() && 2584 "Cannot have multiple output constraints yet!"); 2585 assert(I.getType() != Type::VoidTy && "Bad inline asm!"); 2586 RetValRegs = Regs; 2587 } else { 2588 IndirectStoresToEmit.push_back(std::make_pair(Regs, 2589 I.getOperand(OpNum))); 2590 OpNum++; // Consumes a call operand. 2591 } 2592 2593 // Add information to the INLINEASM node to know that this register is 2594 // set. 2595 Regs.AddInlineAsmOperands(2 /*REGDEF*/, DAG, AsmNodeOperands); 2596 break; 2597 } 2598 case InlineAsm::isInput: { 2599 SDOperand InOperandVal = getValue(I.getOperand(OpNum)); 2600 OpNum++; // Consumes a call operand. 2601 2602 if (isdigit(ConstraintCode[0])) { // Matching constraint? 2603 // If this is required to match an output register we have already set, 2604 // just use its register. 2605 unsigned OperandNo = atoi(ConstraintCode.c_str()); 2606 2607 // Scan until we find the definition we already emitted of this operand. 2608 // When we find it, create a RegsForValue operand. 2609 unsigned CurOp = 2; // The first operand. 2610 for (; OperandNo; --OperandNo) { 2611 // Advance to the next operand. 2612 unsigned NumOps = 2613 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getValue(); 2614 assert(((NumOps & 7) == 2 /*REGDEF*/ || 2615 (NumOps & 7) == 4 /*MEM*/) && 2616 "Skipped past definitions?"); 2617 CurOp += (NumOps>>3)+1; 2618 } 2619 2620 unsigned NumOps = 2621 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getValue(); 2622 assert((NumOps & 7) == 2 /*REGDEF*/ && 2623 "Skipped past definitions?"); 2624 2625 // Add NumOps>>3 registers to MatchedRegs. 2626 RegsForValue MatchedRegs; 2627 MatchedRegs.ValueVT = InOperandVal.getValueType(); 2628 MatchedRegs.RegVT = AsmNodeOperands[CurOp+1].getValueType(); 2629 for (unsigned i = 0, e = NumOps>>3; i != e; ++i) { 2630 unsigned Reg=cast<RegisterSDNode>(AsmNodeOperands[++CurOp])->getReg(); 2631 MatchedRegs.Regs.push_back(Reg); 2632 } 2633 2634 // Use the produced MatchedRegs object to 2635 MatchedRegs.getCopyToRegs(InOperandVal, DAG, Chain, Flag, 2636 TLI.getPointerTy()); 2637 MatchedRegs.AddInlineAsmOperands(1 /*REGUSE*/, DAG, AsmNodeOperands); 2638 break; 2639 } 2640 2641 TargetLowering::ConstraintType CTy = TargetLowering::C_RegisterClass; 2642 if (ConstraintCode.size() == 1) // not a physreg name. 2643 CTy = TLI.getConstraintType(ConstraintCode[0]); 2644 2645 if (CTy == TargetLowering::C_Other) { 2646 InOperandVal = TLI.isOperandValidForConstraint(InOperandVal, 2647 ConstraintCode[0], DAG); 2648 if (!InOperandVal.Val) { 2649 cerr << "Invalid operand for inline asm constraint '" 2650 << ConstraintCode << "'!\n"; 2651 exit(1); 2652 } 2653 2654 // Add information to the INLINEASM node to know about this input. 2655 unsigned ResOpType = 3 /*IMM*/ | (1 << 3); 2656 AsmNodeOperands.push_back(DAG.getConstant(ResOpType, MVT::i32)); 2657 AsmNodeOperands.push_back(InOperandVal); 2658 break; 2659 } else if (CTy == TargetLowering::C_Memory) { 2660 // Memory input. 2661 2662 // Check that the operand isn't a float. 2663 if (!MVT::isInteger(InOperandVal.getValueType())) 2664 assert(0 && "MATCH FAIL!"); 2665 2666 // Extend/truncate to the right pointer type if needed. 2667 MVT::ValueType PtrType = TLI.getPointerTy(); 2668 if (InOperandVal.getValueType() < PtrType) 2669 InOperandVal = DAG.getNode(ISD::ZERO_EXTEND, PtrType, InOperandVal); 2670 else if (InOperandVal.getValueType() > PtrType) 2671 InOperandVal = DAG.getNode(ISD::TRUNCATE, PtrType, InOperandVal); 2672 2673 // Add information to the INLINEASM node to know about this input. 2674 unsigned ResOpType = 4/*MEM*/ | (1 << 3); 2675 AsmNodeOperands.push_back(DAG.getConstant(ResOpType, MVT::i32)); 2676 AsmNodeOperands.push_back(InOperandVal); 2677 break; 2678 } 2679 2680 assert(CTy == TargetLowering::C_RegisterClass && "Unknown op type!"); 2681 2682 // Copy the input into the appropriate registers. 2683 RegsForValue InRegs = 2684 GetRegistersForValue(ConstraintCode, ConstraintVTs[i], 2685 false, true, OutputRegs, InputRegs); 2686 // FIXME: should be match fail. 2687 assert(!InRegs.Regs.empty() && "Couldn't allocate input reg!"); 2688 2689 InRegs.getCopyToRegs(InOperandVal, DAG, Chain, Flag, TLI.getPointerTy()); 2690 2691 InRegs.AddInlineAsmOperands(1/*REGUSE*/, DAG, AsmNodeOperands); 2692 break; 2693 } 2694 case InlineAsm::isClobber: { 2695 RegsForValue ClobberedRegs = 2696 GetRegistersForValue(ConstraintCode, MVT::Other, false, false, 2697 OutputRegs, InputRegs); 2698 // Add the clobbered value to the operand list, so that the register 2699 // allocator is aware that the physreg got clobbered. 2700 if (!ClobberedRegs.Regs.empty()) 2701 ClobberedRegs.AddInlineAsmOperands(2/*REGDEF*/, DAG, AsmNodeOperands); 2702 break; 2703 } 2704 } 2705 } 2706 2707 // Finish up input operands. 2708 AsmNodeOperands[0] = Chain; 2709 if (Flag.Val) AsmNodeOperands.push_back(Flag); 2710 2711 Chain = DAG.getNode(ISD::INLINEASM, 2712 DAG.getNodeValueTypes(MVT::Other, MVT::Flag), 2, 2713 &AsmNodeOperands[0], AsmNodeOperands.size()); 2714 Flag = Chain.getValue(1); 2715 2716 // If this asm returns a register value, copy the result from that register 2717 // and set it as the value of the call. 2718 if (!RetValRegs.Regs.empty()) 2719 setValue(&I, RetValRegs.getCopyFromRegs(DAG, Chain, Flag)); 2720 2721 std::vector<std::pair<SDOperand, Value*> > StoresToEmit; 2722 2723 // Process indirect outputs, first output all of the flagged copies out of 2724 // physregs. 2725 for (unsigned i = 0, e = IndirectStoresToEmit.size(); i != e; ++i) { 2726 RegsForValue &OutRegs = IndirectStoresToEmit[i].first; 2727 Value *Ptr = IndirectStoresToEmit[i].second; 2728 SDOperand OutVal = OutRegs.getCopyFromRegs(DAG, Chain, Flag); 2729 StoresToEmit.push_back(std::make_pair(OutVal, Ptr)); 2730 } 2731 2732 // Emit the non-flagged stores from the physregs. 2733 SmallVector<SDOperand, 8> OutChains; 2734 for (unsigned i = 0, e = StoresToEmit.size(); i != e; ++i) 2735 OutChains.push_back(DAG.getStore(Chain, StoresToEmit[i].first, 2736 getValue(StoresToEmit[i].second), 2737 StoresToEmit[i].second, 0)); 2738 if (!OutChains.empty()) 2739 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 2740 &OutChains[0], OutChains.size()); 2741 DAG.setRoot(Chain); 2742} 2743 2744 2745void SelectionDAGLowering::visitMalloc(MallocInst &I) { 2746 SDOperand Src = getValue(I.getOperand(0)); 2747 2748 MVT::ValueType IntPtr = TLI.getPointerTy(); 2749 2750 if (IntPtr < Src.getValueType()) 2751 Src = DAG.getNode(ISD::TRUNCATE, IntPtr, Src); 2752 else if (IntPtr > Src.getValueType()) 2753 Src = DAG.getNode(ISD::ZERO_EXTEND, IntPtr, Src); 2754 2755 // Scale the source by the type size. 2756 uint64_t ElementSize = TD->getTypeSize(I.getType()->getElementType()); 2757 Src = DAG.getNode(ISD::MUL, Src.getValueType(), 2758 Src, getIntPtrConstant(ElementSize)); 2759 2760 TargetLowering::ArgListTy Args; 2761 TargetLowering::ArgListEntry Entry; 2762 Entry.Node = Src; 2763 Entry.Ty = TLI.getTargetData()->getIntPtrType(); 2764 Entry.isSigned = false; 2765 Args.push_back(Entry); 2766 2767 std::pair<SDOperand,SDOperand> Result = 2768 TLI.LowerCallTo(getRoot(), I.getType(), false, false, CallingConv::C, true, 2769 DAG.getExternalSymbol("malloc", IntPtr), 2770 Args, DAG); 2771 setValue(&I, Result.first); // Pointers always fit in registers 2772 DAG.setRoot(Result.second); 2773} 2774 2775void SelectionDAGLowering::visitFree(FreeInst &I) { 2776 TargetLowering::ArgListTy Args; 2777 TargetLowering::ArgListEntry Entry; 2778 Entry.Node = getValue(I.getOperand(0)); 2779 Entry.Ty = TLI.getTargetData()->getIntPtrType(); 2780 Entry.isSigned = false; 2781 Args.push_back(Entry); 2782 MVT::ValueType IntPtr = TLI.getPointerTy(); 2783 std::pair<SDOperand,SDOperand> Result = 2784 TLI.LowerCallTo(getRoot(), Type::VoidTy, false, false, CallingConv::C, true, 2785 DAG.getExternalSymbol("free", IntPtr), Args, DAG); 2786 DAG.setRoot(Result.second); 2787} 2788 2789// InsertAtEndOfBasicBlock - This method should be implemented by targets that 2790// mark instructions with the 'usesCustomDAGSchedInserter' flag. These 2791// instructions are special in various ways, which require special support to 2792// insert. The specified MachineInstr is created but not inserted into any 2793// basic blocks, and the scheduler passes ownership of it to this method. 2794MachineBasicBlock *TargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI, 2795 MachineBasicBlock *MBB) { 2796 cerr << "If a target marks an instruction with " 2797 << "'usesCustomDAGSchedInserter', it must implement " 2798 << "TargetLowering::InsertAtEndOfBasicBlock!\n"; 2799 abort(); 2800 return 0; 2801} 2802 2803void SelectionDAGLowering::visitVAStart(CallInst &I) { 2804 DAG.setRoot(DAG.getNode(ISD::VASTART, MVT::Other, getRoot(), 2805 getValue(I.getOperand(1)), 2806 DAG.getSrcValue(I.getOperand(1)))); 2807} 2808 2809void SelectionDAGLowering::visitVAArg(VAArgInst &I) { 2810 SDOperand V = DAG.getVAArg(TLI.getValueType(I.getType()), getRoot(), 2811 getValue(I.getOperand(0)), 2812 DAG.getSrcValue(I.getOperand(0))); 2813 setValue(&I, V); 2814 DAG.setRoot(V.getValue(1)); 2815} 2816 2817void SelectionDAGLowering::visitVAEnd(CallInst &I) { 2818 DAG.setRoot(DAG.getNode(ISD::VAEND, MVT::Other, getRoot(), 2819 getValue(I.getOperand(1)), 2820 DAG.getSrcValue(I.getOperand(1)))); 2821} 2822 2823void SelectionDAGLowering::visitVACopy(CallInst &I) { 2824 DAG.setRoot(DAG.getNode(ISD::VACOPY, MVT::Other, getRoot(), 2825 getValue(I.getOperand(1)), 2826 getValue(I.getOperand(2)), 2827 DAG.getSrcValue(I.getOperand(1)), 2828 DAG.getSrcValue(I.getOperand(2)))); 2829} 2830 2831/// ExpandScalarFormalArgs - Recursively expand the formal_argument node, either 2832/// bit_convert it or join a pair of them with a BUILD_PAIR when appropriate. 2833static SDOperand ExpandScalarFormalArgs(MVT::ValueType VT, SDNode *Arg, 2834 unsigned &i, SelectionDAG &DAG, 2835 TargetLowering &TLI) { 2836 if (TLI.getTypeAction(VT) != TargetLowering::Expand) 2837 return SDOperand(Arg, i++); 2838 2839 MVT::ValueType EVT = TLI.getTypeToTransformTo(VT); 2840 unsigned NumVals = MVT::getSizeInBits(VT) / MVT::getSizeInBits(EVT); 2841 if (NumVals == 1) { 2842 return DAG.getNode(ISD::BIT_CONVERT, VT, 2843 ExpandScalarFormalArgs(EVT, Arg, i, DAG, TLI)); 2844 } else if (NumVals == 2) { 2845 SDOperand Lo = ExpandScalarFormalArgs(EVT, Arg, i, DAG, TLI); 2846 SDOperand Hi = ExpandScalarFormalArgs(EVT, Arg, i, DAG, TLI); 2847 if (!TLI.isLittleEndian()) 2848 std::swap(Lo, Hi); 2849 return DAG.getNode(ISD::BUILD_PAIR, VT, Lo, Hi); 2850 } else { 2851 // Value scalarized into many values. Unimp for now. 2852 assert(0 && "Cannot expand i64 -> i16 yet!"); 2853 } 2854 return SDOperand(); 2855} 2856 2857/// TargetLowering::LowerArguments - This is the default LowerArguments 2858/// implementation, which just inserts a FORMAL_ARGUMENTS node. FIXME: When all 2859/// targets are migrated to using FORMAL_ARGUMENTS, this hook should be 2860/// integrated into SDISel. 2861std::vector<SDOperand> 2862TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) { 2863 // Add CC# and isVararg as operands to the FORMAL_ARGUMENTS node. 2864 std::vector<SDOperand> Ops; 2865 Ops.push_back(DAG.getRoot()); 2866 Ops.push_back(DAG.getConstant(F.getCallingConv(), getPointerTy())); 2867 Ops.push_back(DAG.getConstant(F.isVarArg(), getPointerTy())); 2868 2869 // Add one result value for each formal argument. 2870 std::vector<MVT::ValueType> RetVals; 2871 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) { 2872 MVT::ValueType VT = getValueType(I->getType()); 2873 2874 switch (getTypeAction(VT)) { 2875 default: assert(0 && "Unknown type action!"); 2876 case Legal: 2877 RetVals.push_back(VT); 2878 break; 2879 case Promote: 2880 RetVals.push_back(getTypeToTransformTo(VT)); 2881 break; 2882 case Expand: 2883 if (VT != MVT::Vector) { 2884 // If this is a large integer, it needs to be broken up into small 2885 // integers. Figure out what the destination type is and how many small 2886 // integers it turns into. 2887 MVT::ValueType NVT = getTypeToExpandTo(VT); 2888 unsigned NumVals = getNumElements(VT); 2889 for (unsigned i = 0; i != NumVals; ++i) 2890 RetVals.push_back(NVT); 2891 } else { 2892 // Otherwise, this is a vector type. We only support legal vectors 2893 // right now. 2894 unsigned NumElems = cast<PackedType>(I->getType())->getNumElements(); 2895 const Type *EltTy = cast<PackedType>(I->getType())->getElementType(); 2896 2897 // Figure out if there is a Packed type corresponding to this Vector 2898 // type. If so, convert to the packed type. 2899 MVT::ValueType TVT = MVT::getVectorType(getValueType(EltTy), NumElems); 2900 if (TVT != MVT::Other && isTypeLegal(TVT)) { 2901 RetVals.push_back(TVT); 2902 } else { 2903 assert(0 && "Don't support illegal by-val vector arguments yet!"); 2904 } 2905 } 2906 break; 2907 } 2908 } 2909 2910 RetVals.push_back(MVT::Other); 2911 2912 // Create the node. 2913 SDNode *Result = DAG.getNode(ISD::FORMAL_ARGUMENTS, 2914 DAG.getNodeValueTypes(RetVals), RetVals.size(), 2915 &Ops[0], Ops.size()).Val; 2916 2917 DAG.setRoot(SDOperand(Result, Result->getNumValues()-1)); 2918 2919 // Set up the return result vector. 2920 Ops.clear(); 2921 const FunctionType *FTy = F.getFunctionType(); 2922 unsigned i = 0; 2923 unsigned Idx = 1; 2924 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; 2925 ++I, ++Idx) { 2926 MVT::ValueType VT = getValueType(I->getType()); 2927 2928 switch (getTypeAction(VT)) { 2929 default: assert(0 && "Unknown type action!"); 2930 case Legal: 2931 Ops.push_back(SDOperand(Result, i++)); 2932 break; 2933 case Promote: { 2934 SDOperand Op(Result, i++); 2935 if (MVT::isInteger(VT)) { 2936 if (FTy->paramHasAttr(Idx, FunctionType::SExtAttribute)) 2937 Op = DAG.getNode(ISD::AssertSext, Op.getValueType(), Op, 2938 DAG.getValueType(VT)); 2939 else if (FTy->paramHasAttr(Idx, FunctionType::ZExtAttribute)) 2940 Op = DAG.getNode(ISD::AssertZext, Op.getValueType(), Op, 2941 DAG.getValueType(VT)); 2942 Op = DAG.getNode(ISD::TRUNCATE, VT, Op); 2943 } else { 2944 assert(MVT::isFloatingPoint(VT) && "Not int or FP?"); 2945 Op = DAG.getNode(ISD::FP_ROUND, VT, Op); 2946 } 2947 Ops.push_back(Op); 2948 break; 2949 } 2950 case Expand: 2951 if (VT != MVT::Vector) { 2952 // If this is a large integer or a floating point node that needs to be 2953 // expanded, it needs to be reassembled from small integers. Figure out 2954 // what the source elt type is and how many small integers it is. 2955 Ops.push_back(ExpandScalarFormalArgs(VT, Result, i, DAG, *this)); 2956 } else { 2957 // Otherwise, this is a vector type. We only support legal vectors 2958 // right now. 2959 const PackedType *PTy = cast<PackedType>(I->getType()); 2960 unsigned NumElems = PTy->getNumElements(); 2961 const Type *EltTy = PTy->getElementType(); 2962 2963 // Figure out if there is a Packed type corresponding to this Vector 2964 // type. If so, convert to the packed type. 2965 MVT::ValueType TVT = MVT::getVectorType(getValueType(EltTy), NumElems); 2966 if (TVT != MVT::Other && isTypeLegal(TVT)) { 2967 SDOperand N = SDOperand(Result, i++); 2968 // Handle copies from generic vectors to registers. 2969 N = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, N, 2970 DAG.getConstant(NumElems, MVT::i32), 2971 DAG.getValueType(getValueType(EltTy))); 2972 Ops.push_back(N); 2973 } else { 2974 assert(0 && "Don't support illegal by-val vector arguments yet!"); 2975 abort(); 2976 } 2977 } 2978 break; 2979 } 2980 } 2981 return Ops; 2982} 2983 2984 2985/// ExpandScalarCallArgs - Recursively expand call argument node by 2986/// bit_converting it or extract a pair of elements from the larger node. 2987static void ExpandScalarCallArgs(MVT::ValueType VT, SDOperand Arg, 2988 bool isSigned, 2989 SmallVector<SDOperand, 32> &Ops, 2990 SelectionDAG &DAG, 2991 TargetLowering &TLI) { 2992 if (TLI.getTypeAction(VT) != TargetLowering::Expand) { 2993 Ops.push_back(Arg); 2994 Ops.push_back(DAG.getConstant(isSigned, MVT::i32)); 2995 return; 2996 } 2997 2998 MVT::ValueType EVT = TLI.getTypeToTransformTo(VT); 2999 unsigned NumVals = MVT::getSizeInBits(VT) / MVT::getSizeInBits(EVT); 3000 if (NumVals == 1) { 3001 Arg = DAG.getNode(ISD::BIT_CONVERT, EVT, Arg); 3002 ExpandScalarCallArgs(EVT, Arg, isSigned, Ops, DAG, TLI); 3003 } else if (NumVals == 2) { 3004 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, EVT, Arg, 3005 DAG.getConstant(0, TLI.getPointerTy())); 3006 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, EVT, Arg, 3007 DAG.getConstant(1, TLI.getPointerTy())); 3008 if (!TLI.isLittleEndian()) 3009 std::swap(Lo, Hi); 3010 ExpandScalarCallArgs(EVT, Lo, isSigned, Ops, DAG, TLI); 3011 ExpandScalarCallArgs(EVT, Hi, isSigned, Ops, DAG, TLI); 3012 } else { 3013 // Value scalarized into many values. Unimp for now. 3014 assert(0 && "Cannot expand i64 -> i16 yet!"); 3015 } 3016} 3017 3018/// TargetLowering::LowerCallTo - This is the default LowerCallTo 3019/// implementation, which just inserts an ISD::CALL node, which is later custom 3020/// lowered by the target to something concrete. FIXME: When all targets are 3021/// migrated to using ISD::CALL, this hook should be integrated into SDISel. 3022std::pair<SDOperand, SDOperand> 3023TargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy, 3024 bool RetTyIsSigned, bool isVarArg, 3025 unsigned CallingConv, bool isTailCall, 3026 SDOperand Callee, 3027 ArgListTy &Args, SelectionDAG &DAG) { 3028 SmallVector<SDOperand, 32> Ops; 3029 Ops.push_back(Chain); // Op#0 - Chain 3030 Ops.push_back(DAG.getConstant(CallingConv, getPointerTy())); // Op#1 - CC 3031 Ops.push_back(DAG.getConstant(isVarArg, getPointerTy())); // Op#2 - VarArg 3032 Ops.push_back(DAG.getConstant(isTailCall, getPointerTy())); // Op#3 - Tail 3033 Ops.push_back(Callee); 3034 3035 // Handle all of the outgoing arguments. 3036 for (unsigned i = 0, e = Args.size(); i != e; ++i) { 3037 MVT::ValueType VT = getValueType(Args[i].Ty); 3038 SDOperand Op = Args[i].Node; 3039 bool isSigned = Args[i].isSigned; 3040 switch (getTypeAction(VT)) { 3041 default: assert(0 && "Unknown type action!"); 3042 case Legal: 3043 Ops.push_back(Op); 3044 Ops.push_back(DAG.getConstant(isSigned, MVT::i32)); 3045 break; 3046 case Promote: 3047 if (MVT::isInteger(VT)) { 3048 unsigned ExtOp = isSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 3049 Op = DAG.getNode(ExtOp, getTypeToTransformTo(VT), Op); 3050 } else { 3051 assert(MVT::isFloatingPoint(VT) && "Not int or FP?"); 3052 Op = DAG.getNode(ISD::FP_EXTEND, getTypeToTransformTo(VT), Op); 3053 } 3054 Ops.push_back(Op); 3055 Ops.push_back(DAG.getConstant(isSigned, MVT::i32)); 3056 break; 3057 case Expand: 3058 if (VT != MVT::Vector) { 3059 // If this is a large integer, it needs to be broken down into small 3060 // integers. Figure out what the source elt type is and how many small 3061 // integers it is. 3062 ExpandScalarCallArgs(VT, Op, isSigned, Ops, DAG, *this); 3063 } else { 3064 // Otherwise, this is a vector type. We only support legal vectors 3065 // right now. 3066 const PackedType *PTy = cast<PackedType>(Args[i].Ty); 3067 unsigned NumElems = PTy->getNumElements(); 3068 const Type *EltTy = PTy->getElementType(); 3069 3070 // Figure out if there is a Packed type corresponding to this Vector 3071 // type. If so, convert to the packed type. 3072 MVT::ValueType TVT = MVT::getVectorType(getValueType(EltTy), NumElems); 3073 if (TVT != MVT::Other && isTypeLegal(TVT)) { 3074 // Insert a VBIT_CONVERT of the MVT::Vector type to the packed type. 3075 Op = DAG.getNode(ISD::VBIT_CONVERT, TVT, Op); 3076 Ops.push_back(Op); 3077 Ops.push_back(DAG.getConstant(isSigned, MVT::i32)); 3078 } else { 3079 assert(0 && "Don't support illegal by-val vector call args yet!"); 3080 abort(); 3081 } 3082 } 3083 break; 3084 } 3085 } 3086 3087 // Figure out the result value types. 3088 SmallVector<MVT::ValueType, 4> RetTys; 3089 3090 if (RetTy != Type::VoidTy) { 3091 MVT::ValueType VT = getValueType(RetTy); 3092 switch (getTypeAction(VT)) { 3093 default: assert(0 && "Unknown type action!"); 3094 case Legal: 3095 RetTys.push_back(VT); 3096 break; 3097 case Promote: 3098 RetTys.push_back(getTypeToTransformTo(VT)); 3099 break; 3100 case Expand: 3101 if (VT != MVT::Vector) { 3102 // If this is a large integer, it needs to be reassembled from small 3103 // integers. Figure out what the source elt type is and how many small 3104 // integers it is. 3105 MVT::ValueType NVT = getTypeToExpandTo(VT); 3106 unsigned NumVals = getNumElements(VT); 3107 for (unsigned i = 0; i != NumVals; ++i) 3108 RetTys.push_back(NVT); 3109 } else { 3110 // Otherwise, this is a vector type. We only support legal vectors 3111 // right now. 3112 const PackedType *PTy = cast<PackedType>(RetTy); 3113 unsigned NumElems = PTy->getNumElements(); 3114 const Type *EltTy = PTy->getElementType(); 3115 3116 // Figure out if there is a Packed type corresponding to this Vector 3117 // type. If so, convert to the packed type. 3118 MVT::ValueType TVT = MVT::getVectorType(getValueType(EltTy), NumElems); 3119 if (TVT != MVT::Other && isTypeLegal(TVT)) { 3120 RetTys.push_back(TVT); 3121 } else { 3122 assert(0 && "Don't support illegal by-val vector call results yet!"); 3123 abort(); 3124 } 3125 } 3126 } 3127 } 3128 3129 RetTys.push_back(MVT::Other); // Always has a chain. 3130 3131 // Finally, create the CALL node. 3132 SDOperand Res = DAG.getNode(ISD::CALL, 3133 DAG.getVTList(&RetTys[0], RetTys.size()), 3134 &Ops[0], Ops.size()); 3135 3136 // This returns a pair of operands. The first element is the 3137 // return value for the function (if RetTy is not VoidTy). The second 3138 // element is the outgoing token chain. 3139 SDOperand ResVal; 3140 if (RetTys.size() != 1) { 3141 MVT::ValueType VT = getValueType(RetTy); 3142 if (RetTys.size() == 2) { 3143 ResVal = Res; 3144 3145 // If this value was promoted, truncate it down. 3146 if (ResVal.getValueType() != VT) { 3147 if (VT == MVT::Vector) { 3148 // Insert a VBITCONVERT to convert from the packed result type to the 3149 // MVT::Vector type. 3150 unsigned NumElems = cast<PackedType>(RetTy)->getNumElements(); 3151 const Type *EltTy = cast<PackedType>(RetTy)->getElementType(); 3152 3153 // Figure out if there is a Packed type corresponding to this Vector 3154 // type. If so, convert to the packed type. 3155 MVT::ValueType TVT = MVT::getVectorType(getValueType(EltTy), NumElems); 3156 if (TVT != MVT::Other && isTypeLegal(TVT)) { 3157 // Insert a VBIT_CONVERT of the FORMAL_ARGUMENTS to a 3158 // "N x PTyElementVT" MVT::Vector type. 3159 ResVal = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, ResVal, 3160 DAG.getConstant(NumElems, MVT::i32), 3161 DAG.getValueType(getValueType(EltTy))); 3162 } else { 3163 abort(); 3164 } 3165 } else if (MVT::isInteger(VT)) { 3166 unsigned AssertOp = ISD::AssertSext; 3167 if (!RetTyIsSigned) 3168 AssertOp = ISD::AssertZext; 3169 ResVal = DAG.getNode(AssertOp, ResVal.getValueType(), ResVal, 3170 DAG.getValueType(VT)); 3171 ResVal = DAG.getNode(ISD::TRUNCATE, VT, ResVal); 3172 } else { 3173 assert(MVT::isFloatingPoint(VT)); 3174 if (getTypeAction(VT) == Expand) 3175 ResVal = DAG.getNode(ISD::BIT_CONVERT, VT, ResVal); 3176 else 3177 ResVal = DAG.getNode(ISD::FP_ROUND, VT, ResVal); 3178 } 3179 } 3180 } else if (RetTys.size() == 3) { 3181 ResVal = DAG.getNode(ISD::BUILD_PAIR, VT, 3182 Res.getValue(0), Res.getValue(1)); 3183 3184 } else { 3185 assert(0 && "Case not handled yet!"); 3186 } 3187 } 3188 3189 return std::make_pair(ResVal, Res.getValue(Res.Val->getNumValues()-1)); 3190} 3191 3192 3193 3194// It is always conservatively correct for llvm.returnaddress and 3195// llvm.frameaddress to return 0. 3196// 3197// FIXME: Change this to insert a FRAMEADDR/RETURNADDR node, and have that be 3198// expanded to 0 if the target wants. 3199std::pair<SDOperand, SDOperand> 3200TargetLowering::LowerFrameReturnAddress(bool isFrameAddr, SDOperand Chain, 3201 unsigned Depth, SelectionDAG &DAG) { 3202 return std::make_pair(DAG.getConstant(0, getPointerTy()), Chain); 3203} 3204 3205SDOperand TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { 3206 assert(0 && "LowerOperation not implemented for this target!"); 3207 abort(); 3208 return SDOperand(); 3209} 3210 3211SDOperand TargetLowering::CustomPromoteOperation(SDOperand Op, 3212 SelectionDAG &DAG) { 3213 assert(0 && "CustomPromoteOperation not implemented for this target!"); 3214 abort(); 3215 return SDOperand(); 3216} 3217 3218void SelectionDAGLowering::visitFrameReturnAddress(CallInst &I, bool isFrame) { 3219 unsigned Depth = (unsigned)cast<ConstantInt>(I.getOperand(1))->getZExtValue(); 3220 std::pair<SDOperand,SDOperand> Result = 3221 TLI.LowerFrameReturnAddress(isFrame, getRoot(), Depth, DAG); 3222 setValue(&I, Result.first); 3223 DAG.setRoot(Result.second); 3224} 3225 3226/// getMemsetValue - Vectorized representation of the memset value 3227/// operand. 3228static SDOperand getMemsetValue(SDOperand Value, MVT::ValueType VT, 3229 SelectionDAG &DAG) { 3230 MVT::ValueType CurVT = VT; 3231 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) { 3232 uint64_t Val = C->getValue() & 255; 3233 unsigned Shift = 8; 3234 while (CurVT != MVT::i8) { 3235 Val = (Val << Shift) | Val; 3236 Shift <<= 1; 3237 CurVT = (MVT::ValueType)((unsigned)CurVT - 1); 3238 } 3239 return DAG.getConstant(Val, VT); 3240 } else { 3241 Value = DAG.getNode(ISD::ZERO_EXTEND, VT, Value); 3242 unsigned Shift = 8; 3243 while (CurVT != MVT::i8) { 3244 Value = 3245 DAG.getNode(ISD::OR, VT, 3246 DAG.getNode(ISD::SHL, VT, Value, 3247 DAG.getConstant(Shift, MVT::i8)), Value); 3248 Shift <<= 1; 3249 CurVT = (MVT::ValueType)((unsigned)CurVT - 1); 3250 } 3251 3252 return Value; 3253 } 3254} 3255 3256/// getMemsetStringVal - Similar to getMemsetValue. Except this is only 3257/// used when a memcpy is turned into a memset when the source is a constant 3258/// string ptr. 3259static SDOperand getMemsetStringVal(MVT::ValueType VT, 3260 SelectionDAG &DAG, TargetLowering &TLI, 3261 std::string &Str, unsigned Offset) { 3262 uint64_t Val = 0; 3263 unsigned MSB = getSizeInBits(VT) / 8; 3264 if (TLI.isLittleEndian()) 3265 Offset = Offset + MSB - 1; 3266 for (unsigned i = 0; i != MSB; ++i) { 3267 Val = (Val << 8) | (unsigned char)Str[Offset]; 3268 Offset += TLI.isLittleEndian() ? -1 : 1; 3269 } 3270 return DAG.getConstant(Val, VT); 3271} 3272 3273/// getMemBasePlusOffset - Returns base and offset node for the 3274static SDOperand getMemBasePlusOffset(SDOperand Base, unsigned Offset, 3275 SelectionDAG &DAG, TargetLowering &TLI) { 3276 MVT::ValueType VT = Base.getValueType(); 3277 return DAG.getNode(ISD::ADD, VT, Base, DAG.getConstant(Offset, VT)); 3278} 3279 3280/// MeetsMaxMemopRequirement - Determines if the number of memory ops required 3281/// to replace the memset / memcpy is below the threshold. It also returns the 3282/// types of the sequence of memory ops to perform memset / memcpy. 3283static bool MeetsMaxMemopRequirement(std::vector<MVT::ValueType> &MemOps, 3284 unsigned Limit, uint64_t Size, 3285 unsigned Align, TargetLowering &TLI) { 3286 MVT::ValueType VT; 3287 3288 if (TLI.allowsUnalignedMemoryAccesses()) { 3289 VT = MVT::i64; 3290 } else { 3291 switch (Align & 7) { 3292 case 0: 3293 VT = MVT::i64; 3294 break; 3295 case 4: 3296 VT = MVT::i32; 3297 break; 3298 case 2: 3299 VT = MVT::i16; 3300 break; 3301 default: 3302 VT = MVT::i8; 3303 break; 3304 } 3305 } 3306 3307 MVT::ValueType LVT = MVT::i64; 3308 while (!TLI.isTypeLegal(LVT)) 3309 LVT = (MVT::ValueType)((unsigned)LVT - 1); 3310 assert(MVT::isInteger(LVT)); 3311 3312 if (VT > LVT) 3313 VT = LVT; 3314 3315 unsigned NumMemOps = 0; 3316 while (Size != 0) { 3317 unsigned VTSize = getSizeInBits(VT) / 8; 3318 while (VTSize > Size) { 3319 VT = (MVT::ValueType)((unsigned)VT - 1); 3320 VTSize >>= 1; 3321 } 3322 assert(MVT::isInteger(VT)); 3323 3324 if (++NumMemOps > Limit) 3325 return false; 3326 MemOps.push_back(VT); 3327 Size -= VTSize; 3328 } 3329 3330 return true; 3331} 3332 3333void SelectionDAGLowering::visitMemIntrinsic(CallInst &I, unsigned Op) { 3334 SDOperand Op1 = getValue(I.getOperand(1)); 3335 SDOperand Op2 = getValue(I.getOperand(2)); 3336 SDOperand Op3 = getValue(I.getOperand(3)); 3337 SDOperand Op4 = getValue(I.getOperand(4)); 3338 unsigned Align = (unsigned)cast<ConstantSDNode>(Op4)->getValue(); 3339 if (Align == 0) Align = 1; 3340 3341 if (ConstantSDNode *Size = dyn_cast<ConstantSDNode>(Op3)) { 3342 std::vector<MVT::ValueType> MemOps; 3343 3344 // Expand memset / memcpy to a series of load / store ops 3345 // if the size operand falls below a certain threshold. 3346 SmallVector<SDOperand, 8> OutChains; 3347 switch (Op) { 3348 default: break; // Do nothing for now. 3349 case ISD::MEMSET: { 3350 if (MeetsMaxMemopRequirement(MemOps, TLI.getMaxStoresPerMemset(), 3351 Size->getValue(), Align, TLI)) { 3352 unsigned NumMemOps = MemOps.size(); 3353 unsigned Offset = 0; 3354 for (unsigned i = 0; i < NumMemOps; i++) { 3355 MVT::ValueType VT = MemOps[i]; 3356 unsigned VTSize = getSizeInBits(VT) / 8; 3357 SDOperand Value = getMemsetValue(Op2, VT, DAG); 3358 SDOperand Store = DAG.getStore(getRoot(), Value, 3359 getMemBasePlusOffset(Op1, Offset, DAG, TLI), 3360 I.getOperand(1), Offset); 3361 OutChains.push_back(Store); 3362 Offset += VTSize; 3363 } 3364 } 3365 break; 3366 } 3367 case ISD::MEMCPY: { 3368 if (MeetsMaxMemopRequirement(MemOps, TLI.getMaxStoresPerMemcpy(), 3369 Size->getValue(), Align, TLI)) { 3370 unsigned NumMemOps = MemOps.size(); 3371 unsigned SrcOff = 0, DstOff = 0, SrcDelta = 0; 3372 GlobalAddressSDNode *G = NULL; 3373 std::string Str; 3374 bool CopyFromStr = false; 3375 3376 if (Op2.getOpcode() == ISD::GlobalAddress) 3377 G = cast<GlobalAddressSDNode>(Op2); 3378 else if (Op2.getOpcode() == ISD::ADD && 3379 Op2.getOperand(0).getOpcode() == ISD::GlobalAddress && 3380 Op2.getOperand(1).getOpcode() == ISD::Constant) { 3381 G = cast<GlobalAddressSDNode>(Op2.getOperand(0)); 3382 SrcDelta = cast<ConstantSDNode>(Op2.getOperand(1))->getValue(); 3383 } 3384 if (G) { 3385 GlobalVariable *GV = dyn_cast<GlobalVariable>(G->getGlobal()); 3386 if (GV && GV->isConstant()) { 3387 Str = GV->getStringValue(false); 3388 if (!Str.empty()) { 3389 CopyFromStr = true; 3390 SrcOff += SrcDelta; 3391 } 3392 } 3393 } 3394 3395 for (unsigned i = 0; i < NumMemOps; i++) { 3396 MVT::ValueType VT = MemOps[i]; 3397 unsigned VTSize = getSizeInBits(VT) / 8; 3398 SDOperand Value, Chain, Store; 3399 3400 if (CopyFromStr) { 3401 Value = getMemsetStringVal(VT, DAG, TLI, Str, SrcOff); 3402 Chain = getRoot(); 3403 Store = 3404 DAG.getStore(Chain, Value, 3405 getMemBasePlusOffset(Op1, DstOff, DAG, TLI), 3406 I.getOperand(1), DstOff); 3407 } else { 3408 Value = DAG.getLoad(VT, getRoot(), 3409 getMemBasePlusOffset(Op2, SrcOff, DAG, TLI), 3410 I.getOperand(2), SrcOff); 3411 Chain = Value.getValue(1); 3412 Store = 3413 DAG.getStore(Chain, Value, 3414 getMemBasePlusOffset(Op1, DstOff, DAG, TLI), 3415 I.getOperand(1), DstOff); 3416 } 3417 OutChains.push_back(Store); 3418 SrcOff += VTSize; 3419 DstOff += VTSize; 3420 } 3421 } 3422 break; 3423 } 3424 } 3425 3426 if (!OutChains.empty()) { 3427 DAG.setRoot(DAG.getNode(ISD::TokenFactor, MVT::Other, 3428 &OutChains[0], OutChains.size())); 3429 return; 3430 } 3431 } 3432 3433 DAG.setRoot(DAG.getNode(Op, MVT::Other, getRoot(), Op1, Op2, Op3, Op4)); 3434} 3435 3436//===----------------------------------------------------------------------===// 3437// SelectionDAGISel code 3438//===----------------------------------------------------------------------===// 3439 3440unsigned SelectionDAGISel::MakeReg(MVT::ValueType VT) { 3441 return RegMap->createVirtualRegister(TLI.getRegClassFor(VT)); 3442} 3443 3444void SelectionDAGISel::getAnalysisUsage(AnalysisUsage &AU) const { 3445 // FIXME: we only modify the CFG to split critical edges. This 3446 // updates dom and loop info. 3447 AU.addRequired<AliasAnalysis>(); 3448} 3449 3450 3451/// OptimizeNoopCopyExpression - We have determined that the specified cast 3452/// instruction is a noop copy (e.g. it's casting from one pointer type to 3453/// another, int->uint, or int->sbyte on PPC. 3454/// 3455/// Return true if any changes are made. 3456static bool OptimizeNoopCopyExpression(CastInst *CI) { 3457 BasicBlock *DefBB = CI->getParent(); 3458 3459 /// InsertedCasts - Only insert a cast in each block once. 3460 std::map<BasicBlock*, CastInst*> InsertedCasts; 3461 3462 bool MadeChange = false; 3463 for (Value::use_iterator UI = CI->use_begin(), E = CI->use_end(); 3464 UI != E; ) { 3465 Use &TheUse = UI.getUse(); 3466 Instruction *User = cast<Instruction>(*UI); 3467 3468 // Figure out which BB this cast is used in. For PHI's this is the 3469 // appropriate predecessor block. 3470 BasicBlock *UserBB = User->getParent(); 3471 if (PHINode *PN = dyn_cast<PHINode>(User)) { 3472 unsigned OpVal = UI.getOperandNo()/2; 3473 UserBB = PN->getIncomingBlock(OpVal); 3474 } 3475 3476 // Preincrement use iterator so we don't invalidate it. 3477 ++UI; 3478 3479 // If this user is in the same block as the cast, don't change the cast. 3480 if (UserBB == DefBB) continue; 3481 3482 // If we have already inserted a cast into this block, use it. 3483 CastInst *&InsertedCast = InsertedCasts[UserBB]; 3484 3485 if (!InsertedCast) { 3486 BasicBlock::iterator InsertPt = UserBB->begin(); 3487 while (isa<PHINode>(InsertPt)) ++InsertPt; 3488 3489 InsertedCast = 3490 CastInst::create(CI->getOpcode(), CI->getOperand(0), CI->getType(), "", 3491 InsertPt); 3492 MadeChange = true; 3493 } 3494 3495 // Replace a use of the cast with a use of the new casat. 3496 TheUse = InsertedCast; 3497 } 3498 3499 // If we removed all uses, nuke the cast. 3500 if (CI->use_empty()) 3501 CI->eraseFromParent(); 3502 3503 return MadeChange; 3504} 3505 3506/// InsertGEPComputeCode - Insert code into BB to compute Ptr+PtrOffset, 3507/// casting to the type of GEPI. 3508static Instruction *InsertGEPComputeCode(Instruction *&V, BasicBlock *BB, 3509 Instruction *GEPI, Value *Ptr, 3510 Value *PtrOffset) { 3511 if (V) return V; // Already computed. 3512 3513 // Figure out the insertion point 3514 BasicBlock::iterator InsertPt; 3515 if (BB == GEPI->getParent()) { 3516 // If GEP is already inserted into BB, insert right after the GEP. 3517 InsertPt = GEPI; 3518 ++InsertPt; 3519 } else { 3520 // Otherwise, insert at the top of BB, after any PHI nodes 3521 InsertPt = BB->begin(); 3522 while (isa<PHINode>(InsertPt)) ++InsertPt; 3523 } 3524 3525 // If Ptr is itself a cast, but in some other BB, emit a copy of the cast into 3526 // BB so that there is only one value live across basic blocks (the cast 3527 // operand). 3528 if (CastInst *CI = dyn_cast<CastInst>(Ptr)) 3529 if (CI->getParent() != BB && isa<PointerType>(CI->getOperand(0)->getType())) 3530 Ptr = CastInst::create(CI->getOpcode(), CI->getOperand(0), CI->getType(), 3531 "", InsertPt); 3532 3533 // Add the offset, cast it to the right type. 3534 Ptr = BinaryOperator::createAdd(Ptr, PtrOffset, "", InsertPt); 3535 // Ptr is an integer type, GEPI is pointer type ==> IntToPtr 3536 return V = CastInst::create(Instruction::IntToPtr, Ptr, GEPI->getType(), 3537 "", InsertPt); 3538} 3539 3540/// ReplaceUsesOfGEPInst - Replace all uses of RepPtr with inserted code to 3541/// compute its value. The RepPtr value can be computed with Ptr+PtrOffset. One 3542/// trivial way of doing this would be to evaluate Ptr+PtrOffset in RepPtr's 3543/// block, then ReplaceAllUsesWith'ing everything. However, we would prefer to 3544/// sink PtrOffset into user blocks where doing so will likely allow us to fold 3545/// the constant add into a load or store instruction. Additionally, if a user 3546/// is a pointer-pointer cast, we look through it to find its users. 3547static void ReplaceUsesOfGEPInst(Instruction *RepPtr, Value *Ptr, 3548 Constant *PtrOffset, BasicBlock *DefBB, 3549 GetElementPtrInst *GEPI, 3550 std::map<BasicBlock*,Instruction*> &InsertedExprs) { 3551 while (!RepPtr->use_empty()) { 3552 Instruction *User = cast<Instruction>(RepPtr->use_back()); 3553 3554 // If the user is a Pointer-Pointer cast, recurse. Only BitCast can be 3555 // used for a Pointer-Pointer cast. 3556 if (isa<BitCastInst>(User)) { 3557 ReplaceUsesOfGEPInst(User, Ptr, PtrOffset, DefBB, GEPI, InsertedExprs); 3558 3559 // Drop the use of RepPtr. The cast is dead. Don't delete it now, else we 3560 // could invalidate an iterator. 3561 User->setOperand(0, UndefValue::get(RepPtr->getType())); 3562 continue; 3563 } 3564 3565 // If this is a load of the pointer, or a store through the pointer, emit 3566 // the increment into the load/store block. 3567 Instruction *NewVal; 3568 if (isa<LoadInst>(User) || 3569 (isa<StoreInst>(User) && User->getOperand(0) != RepPtr)) { 3570 NewVal = InsertGEPComputeCode(InsertedExprs[User->getParent()], 3571 User->getParent(), GEPI, 3572 Ptr, PtrOffset); 3573 } else { 3574 // If this use is not foldable into the addressing mode, use a version 3575 // emitted in the GEP block. 3576 NewVal = InsertGEPComputeCode(InsertedExprs[DefBB], DefBB, GEPI, 3577 Ptr, PtrOffset); 3578 } 3579 3580 if (GEPI->getType() != RepPtr->getType()) { 3581 BasicBlock::iterator IP = NewVal; 3582 ++IP; 3583 // NewVal must be a GEP which must be pointer type, so BitCast 3584 NewVal = new BitCastInst(NewVal, RepPtr->getType(), "", IP); 3585 } 3586 User->replaceUsesOfWith(RepPtr, NewVal); 3587 } 3588} 3589 3590 3591/// OptimizeGEPExpression - Since we are doing basic-block-at-a-time instruction 3592/// selection, we want to be a bit careful about some things. In particular, if 3593/// we have a GEP instruction that is used in a different block than it is 3594/// defined, the addressing expression of the GEP cannot be folded into loads or 3595/// stores that use it. In this case, decompose the GEP and move constant 3596/// indices into blocks that use it. 3597static bool OptimizeGEPExpression(GetElementPtrInst *GEPI, 3598 const TargetData *TD) { 3599 // If this GEP is only used inside the block it is defined in, there is no 3600 // need to rewrite it. 3601 bool isUsedOutsideDefBB = false; 3602 BasicBlock *DefBB = GEPI->getParent(); 3603 for (Value::use_iterator UI = GEPI->use_begin(), E = GEPI->use_end(); 3604 UI != E; ++UI) { 3605 if (cast<Instruction>(*UI)->getParent() != DefBB) { 3606 isUsedOutsideDefBB = true; 3607 break; 3608 } 3609 } 3610 if (!isUsedOutsideDefBB) return false; 3611 3612 // If this GEP has no non-zero constant indices, there is nothing we can do, 3613 // ignore it. 3614 bool hasConstantIndex = false; 3615 bool hasVariableIndex = false; 3616 for (GetElementPtrInst::op_iterator OI = GEPI->op_begin()+1, 3617 E = GEPI->op_end(); OI != E; ++OI) { 3618 if (ConstantInt *CI = dyn_cast<ConstantInt>(*OI)) { 3619 if (CI->getZExtValue()) { 3620 hasConstantIndex = true; 3621 break; 3622 } 3623 } else { 3624 hasVariableIndex = true; 3625 } 3626 } 3627 3628 // If this is a "GEP X, 0, 0, 0", turn this into a cast. 3629 if (!hasConstantIndex && !hasVariableIndex) { 3630 /// The GEP operand must be a pointer, so must its result -> BitCast 3631 Value *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(), 3632 GEPI->getName(), GEPI); 3633 GEPI->replaceAllUsesWith(NC); 3634 GEPI->eraseFromParent(); 3635 return true; 3636 } 3637 3638 // If this is a GEP &Alloca, 0, 0, forward subst the frame index into uses. 3639 if (!hasConstantIndex && !isa<AllocaInst>(GEPI->getOperand(0))) 3640 return false; 3641 3642 // Otherwise, decompose the GEP instruction into multiplies and adds. Sum the 3643 // constant offset (which we now know is non-zero) and deal with it later. 3644 uint64_t ConstantOffset = 0; 3645 const Type *UIntPtrTy = TD->getIntPtrType(); 3646 Value *Ptr = new PtrToIntInst(GEPI->getOperand(0), UIntPtrTy, "", GEPI); 3647 const Type *Ty = GEPI->getOperand(0)->getType(); 3648 3649 for (GetElementPtrInst::op_iterator OI = GEPI->op_begin()+1, 3650 E = GEPI->op_end(); OI != E; ++OI) { 3651 Value *Idx = *OI; 3652 if (const StructType *StTy = dyn_cast<StructType>(Ty)) { 3653 unsigned Field = cast<ConstantInt>(Idx)->getZExtValue(); 3654 if (Field) 3655 ConstantOffset += TD->getStructLayout(StTy)->MemberOffsets[Field]; 3656 Ty = StTy->getElementType(Field); 3657 } else { 3658 Ty = cast<SequentialType>(Ty)->getElementType(); 3659 3660 // Handle constant subscripts. 3661 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) { 3662 if (CI->getZExtValue() == 0) continue; 3663 ConstantOffset += (int64_t)TD->getTypeSize(Ty)*CI->getSExtValue(); 3664 continue; 3665 } 3666 3667 // Ptr = Ptr + Idx * ElementSize; 3668 3669 // Cast Idx to UIntPtrTy if needed. 3670 Idx = CastInst::createIntegerCast(Idx, UIntPtrTy, true/*SExt*/, "", GEPI); 3671 3672 uint64_t ElementSize = TD->getTypeSize(Ty); 3673 // Mask off bits that should not be set. 3674 ElementSize &= ~0ULL >> (64-UIntPtrTy->getPrimitiveSizeInBits()); 3675 Constant *SizeCst = ConstantInt::get(UIntPtrTy, ElementSize); 3676 3677 // Multiply by the element size and add to the base. 3678 Idx = BinaryOperator::createMul(Idx, SizeCst, "", GEPI); 3679 Ptr = BinaryOperator::createAdd(Ptr, Idx, "", GEPI); 3680 } 3681 } 3682 3683 // Make sure that the offset fits in uintptr_t. 3684 ConstantOffset &= ~0ULL >> (64-UIntPtrTy->getPrimitiveSizeInBits()); 3685 Constant *PtrOffset = ConstantInt::get(UIntPtrTy, ConstantOffset); 3686 3687 // Okay, we have now emitted all of the variable index parts to the BB that 3688 // the GEP is defined in. Loop over all of the using instructions, inserting 3689 // an "add Ptr, ConstantOffset" into each block that uses it and update the 3690 // instruction to use the newly computed value, making GEPI dead. When the 3691 // user is a load or store instruction address, we emit the add into the user 3692 // block, otherwise we use a canonical version right next to the gep (these 3693 // won't be foldable as addresses, so we might as well share the computation). 3694 3695 std::map<BasicBlock*,Instruction*> InsertedExprs; 3696 ReplaceUsesOfGEPInst(GEPI, Ptr, PtrOffset, DefBB, GEPI, InsertedExprs); 3697 3698 // Finally, the GEP is dead, remove it. 3699 GEPI->eraseFromParent(); 3700 3701 return true; 3702} 3703 3704 3705/// SplitEdgeNicely - Split the critical edge from TI to it's specified 3706/// successor if it will improve codegen. We only do this if the successor has 3707/// phi nodes (otherwise critical edges are ok). If there is already another 3708/// predecessor of the succ that is empty (and thus has no phi nodes), use it 3709/// instead of introducing a new block. 3710static void SplitEdgeNicely(TerminatorInst *TI, unsigned SuccNum, Pass *P) { 3711 BasicBlock *TIBB = TI->getParent(); 3712 BasicBlock *Dest = TI->getSuccessor(SuccNum); 3713 assert(isa<PHINode>(Dest->begin()) && 3714 "This should only be called if Dest has a PHI!"); 3715 3716 /// TIPHIValues - This array is lazily computed to determine the values of 3717 /// PHIs in Dest that TI would provide. 3718 std::vector<Value*> TIPHIValues; 3719 3720 // Check to see if Dest has any blocks that can be used as a split edge for 3721 // this terminator. 3722 for (pred_iterator PI = pred_begin(Dest), E = pred_end(Dest); PI != E; ++PI) { 3723 BasicBlock *Pred = *PI; 3724 // To be usable, the pred has to end with an uncond branch to the dest. 3725 BranchInst *PredBr = dyn_cast<BranchInst>(Pred->getTerminator()); 3726 if (!PredBr || !PredBr->isUnconditional() || 3727 // Must be empty other than the branch. 3728 &Pred->front() != PredBr) 3729 continue; 3730 3731 // Finally, since we know that Dest has phi nodes in it, we have to make 3732 // sure that jumping to Pred will have the same affect as going to Dest in 3733 // terms of PHI values. 3734 PHINode *PN; 3735 unsigned PHINo = 0; 3736 bool FoundMatch = true; 3737 for (BasicBlock::iterator I = Dest->begin(); 3738 (PN = dyn_cast<PHINode>(I)); ++I, ++PHINo) { 3739 if (PHINo == TIPHIValues.size()) 3740 TIPHIValues.push_back(PN->getIncomingValueForBlock(TIBB)); 3741 3742 // If the PHI entry doesn't work, we can't use this pred. 3743 if (TIPHIValues[PHINo] != PN->getIncomingValueForBlock(Pred)) { 3744 FoundMatch = false; 3745 break; 3746 } 3747 } 3748 3749 // If we found a workable predecessor, change TI to branch to Succ. 3750 if (FoundMatch) { 3751 Dest->removePredecessor(TIBB); 3752 TI->setSuccessor(SuccNum, Pred); 3753 return; 3754 } 3755 } 3756 3757 SplitCriticalEdge(TI, SuccNum, P, true); 3758} 3759 3760 3761bool SelectionDAGISel::runOnFunction(Function &Fn) { 3762 MachineFunction &MF = MachineFunction::construct(&Fn, TLI.getTargetMachine()); 3763 RegMap = MF.getSSARegMap(); 3764 DOUT << "\n\n\n=== " << Fn.getName() << "\n"; 3765 3766 // First, split all critical edges. 3767 // 3768 // In this pass we also look for GEP and cast instructions that are used 3769 // across basic blocks and rewrite them to improve basic-block-at-a-time 3770 // selection. 3771 // 3772 bool MadeChange = true; 3773 while (MadeChange) { 3774 MadeChange = false; 3775 for (Function::iterator BB = Fn.begin(), E = Fn.end(); BB != E; ++BB) { 3776 // Split all critical edges where the dest block has a PHI. 3777 TerminatorInst *BBTI = BB->getTerminator(); 3778 if (BBTI->getNumSuccessors() > 1) { 3779 for (unsigned i = 0, e = BBTI->getNumSuccessors(); i != e; ++i) 3780 if (isa<PHINode>(BBTI->getSuccessor(i)->begin()) && 3781 isCriticalEdge(BBTI, i, true)) 3782 SplitEdgeNicely(BBTI, i, this); 3783 } 3784 3785 3786 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) { 3787 Instruction *I = BBI++; 3788 3789 if (CallInst *CI = dyn_cast<CallInst>(I)) { 3790 // If we found an inline asm expession, and if the target knows how to 3791 // lower it to normal LLVM code, do so now. 3792 if (isa<InlineAsm>(CI->getCalledValue())) 3793 if (const TargetAsmInfo *TAI = 3794 TLI.getTargetMachine().getTargetAsmInfo()) { 3795 if (TAI->ExpandInlineAsm(CI)) 3796 BBI = BB->begin(); 3797 } 3798 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) { 3799 MadeChange |= OptimizeGEPExpression(GEPI, TLI.getTargetData()); 3800 } else if (CastInst *CI = dyn_cast<CastInst>(I)) { 3801 // If the source of the cast is a constant, then this should have 3802 // already been constant folded. The only reason NOT to constant fold 3803 // it is if something (e.g. LSR) was careful to place the constant 3804 // evaluation in a block other than then one that uses it (e.g. to hoist 3805 // the address of globals out of a loop). If this is the case, we don't 3806 // want to forward-subst the cast. 3807 if (isa<Constant>(CI->getOperand(0))) 3808 continue; 3809 3810 // If this is a noop copy, sink it into user blocks to reduce the number 3811 // of virtual registers that must be created and coallesced. 3812 MVT::ValueType SrcVT = TLI.getValueType(CI->getOperand(0)->getType()); 3813 MVT::ValueType DstVT = TLI.getValueType(CI->getType()); 3814 3815 // This is an fp<->int conversion? 3816 if (MVT::isInteger(SrcVT) != MVT::isInteger(DstVT)) 3817 continue; 3818 3819 // If this is an extension, it will be a zero or sign extension, which 3820 // isn't a noop. 3821 if (SrcVT < DstVT) continue; 3822 3823 // If these values will be promoted, find out what they will be promoted 3824 // to. This helps us consider truncates on PPC as noop copies when they 3825 // are. 3826 if (TLI.getTypeAction(SrcVT) == TargetLowering::Promote) 3827 SrcVT = TLI.getTypeToTransformTo(SrcVT); 3828 if (TLI.getTypeAction(DstVT) == TargetLowering::Promote) 3829 DstVT = TLI.getTypeToTransformTo(DstVT); 3830 3831 // If, after promotion, these are the same types, this is a noop copy. 3832 if (SrcVT == DstVT) 3833 MadeChange |= OptimizeNoopCopyExpression(CI); 3834 } 3835 } 3836 } 3837 } 3838 3839 FunctionLoweringInfo FuncInfo(TLI, Fn, MF); 3840 3841 for (Function::iterator I = Fn.begin(), E = Fn.end(); I != E; ++I) 3842 SelectBasicBlock(I, MF, FuncInfo); 3843 3844 return true; 3845} 3846 3847SDOperand SelectionDAGLowering::CopyValueToVirtualRegister(Value *V, 3848 unsigned Reg) { 3849 SDOperand Op = getValue(V); 3850 assert((Op.getOpcode() != ISD::CopyFromReg || 3851 cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) && 3852 "Copy from a reg to the same reg!"); 3853 3854 // If this type is not legal, we must make sure to not create an invalid 3855 // register use. 3856 MVT::ValueType SrcVT = Op.getValueType(); 3857 MVT::ValueType DestVT = TLI.getTypeToTransformTo(SrcVT); 3858 if (SrcVT == DestVT) { 3859 return DAG.getCopyToReg(getRoot(), Reg, Op); 3860 } else if (SrcVT == MVT::Vector) { 3861 // Handle copies from generic vectors to registers. 3862 MVT::ValueType PTyElementVT, PTyLegalElementVT; 3863 unsigned NE = TLI.getPackedTypeBreakdown(cast<PackedType>(V->getType()), 3864 PTyElementVT, PTyLegalElementVT); 3865 3866 // Insert a VBIT_CONVERT of the input vector to a "N x PTyElementVT" 3867 // MVT::Vector type. 3868 Op = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, Op, 3869 DAG.getConstant(NE, MVT::i32), 3870 DAG.getValueType(PTyElementVT)); 3871 3872 // Loop over all of the elements of the resultant vector, 3873 // VEXTRACT_VECTOR_ELT'ing them, converting them to PTyLegalElementVT, then 3874 // copying them into output registers. 3875 SmallVector<SDOperand, 8> OutChains; 3876 SDOperand Root = getRoot(); 3877 for (unsigned i = 0; i != NE; ++i) { 3878 SDOperand Elt = DAG.getNode(ISD::VEXTRACT_VECTOR_ELT, PTyElementVT, 3879 Op, DAG.getConstant(i, TLI.getPointerTy())); 3880 if (PTyElementVT == PTyLegalElementVT) { 3881 // Elements are legal. 3882 OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Elt)); 3883 } else if (PTyLegalElementVT > PTyElementVT) { 3884 // Elements are promoted. 3885 if (MVT::isFloatingPoint(PTyLegalElementVT)) 3886 Elt = DAG.getNode(ISD::FP_EXTEND, PTyLegalElementVT, Elt); 3887 else 3888 Elt = DAG.getNode(ISD::ANY_EXTEND, PTyLegalElementVT, Elt); 3889 OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Elt)); 3890 } else { 3891 // Elements are expanded. 3892 // The src value is expanded into multiple registers. 3893 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, PTyLegalElementVT, 3894 Elt, DAG.getConstant(0, TLI.getPointerTy())); 3895 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, PTyLegalElementVT, 3896 Elt, DAG.getConstant(1, TLI.getPointerTy())); 3897 OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Lo)); 3898 OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Hi)); 3899 } 3900 } 3901 return DAG.getNode(ISD::TokenFactor, MVT::Other, 3902 &OutChains[0], OutChains.size()); 3903 } else if (TLI.getTypeAction(SrcVT) == TargetLowering::Promote) { 3904 // The src value is promoted to the register. 3905 if (MVT::isFloatingPoint(SrcVT)) 3906 Op = DAG.getNode(ISD::FP_EXTEND, DestVT, Op); 3907 else 3908 Op = DAG.getNode(ISD::ANY_EXTEND, DestVT, Op); 3909 return DAG.getCopyToReg(getRoot(), Reg, Op); 3910 } else { 3911 DestVT = TLI.getTypeToExpandTo(SrcVT); 3912 unsigned NumVals = TLI.getNumElements(SrcVT); 3913 if (NumVals == 1) 3914 return DAG.getCopyToReg(getRoot(), Reg, 3915 DAG.getNode(ISD::BIT_CONVERT, DestVT, Op)); 3916 assert(NumVals == 2 && "1 to 4 (and more) expansion not implemented!"); 3917 // The src value is expanded into multiple registers. 3918 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DestVT, 3919 Op, DAG.getConstant(0, TLI.getPointerTy())); 3920 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DestVT, 3921 Op, DAG.getConstant(1, TLI.getPointerTy())); 3922 Op = DAG.getCopyToReg(getRoot(), Reg, Lo); 3923 return DAG.getCopyToReg(Op, Reg+1, Hi); 3924 } 3925} 3926 3927void SelectionDAGISel:: 3928LowerArguments(BasicBlock *BB, SelectionDAGLowering &SDL, 3929 std::vector<SDOperand> &UnorderedChains) { 3930 // If this is the entry block, emit arguments. 3931 Function &F = *BB->getParent(); 3932 FunctionLoweringInfo &FuncInfo = SDL.FuncInfo; 3933 SDOperand OldRoot = SDL.DAG.getRoot(); 3934 std::vector<SDOperand> Args = TLI.LowerArguments(F, SDL.DAG); 3935 3936 unsigned a = 0; 3937 for (Function::arg_iterator AI = F.arg_begin(), E = F.arg_end(); 3938 AI != E; ++AI, ++a) 3939 if (!AI->use_empty()) { 3940 SDL.setValue(AI, Args[a]); 3941 3942 // If this argument is live outside of the entry block, insert a copy from 3943 // whereever we got it to the vreg that other BB's will reference it as. 3944 if (FuncInfo.ValueMap.count(AI)) { 3945 SDOperand Copy = 3946 SDL.CopyValueToVirtualRegister(AI, FuncInfo.ValueMap[AI]); 3947 UnorderedChains.push_back(Copy); 3948 } 3949 } 3950 3951 // Finally, if the target has anything special to do, allow it to do so. 3952 // FIXME: this should insert code into the DAG! 3953 EmitFunctionEntryCode(F, SDL.DAG.getMachineFunction()); 3954} 3955 3956void SelectionDAGISel::BuildSelectionDAG(SelectionDAG &DAG, BasicBlock *LLVMBB, 3957 std::vector<std::pair<MachineInstr*, unsigned> > &PHINodesToUpdate, 3958 FunctionLoweringInfo &FuncInfo) { 3959 SelectionDAGLowering SDL(DAG, TLI, FuncInfo); 3960 3961 std::vector<SDOperand> UnorderedChains; 3962 3963 // Lower any arguments needed in this block if this is the entry block. 3964 if (LLVMBB == &LLVMBB->getParent()->front()) 3965 LowerArguments(LLVMBB, SDL, UnorderedChains); 3966 3967 BB = FuncInfo.MBBMap[LLVMBB]; 3968 SDL.setCurrentBasicBlock(BB); 3969 3970 // Lower all of the non-terminator instructions. 3971 for (BasicBlock::iterator I = LLVMBB->begin(), E = --LLVMBB->end(); 3972 I != E; ++I) 3973 SDL.visit(*I); 3974 3975 // Ensure that all instructions which are used outside of their defining 3976 // blocks are available as virtual registers. 3977 for (BasicBlock::iterator I = LLVMBB->begin(), E = LLVMBB->end(); I != E;++I) 3978 if (!I->use_empty() && !isa<PHINode>(I)) { 3979 std::map<const Value*, unsigned>::iterator VMI =FuncInfo.ValueMap.find(I); 3980 if (VMI != FuncInfo.ValueMap.end()) 3981 UnorderedChains.push_back( 3982 SDL.CopyValueToVirtualRegister(I, VMI->second)); 3983 } 3984 3985 // Handle PHI nodes in successor blocks. Emit code into the SelectionDAG to 3986 // ensure constants are generated when needed. Remember the virtual registers 3987 // that need to be added to the Machine PHI nodes as input. We cannot just 3988 // directly add them, because expansion might result in multiple MBB's for one 3989 // BB. As such, the start of the BB might correspond to a different MBB than 3990 // the end. 3991 // 3992 TerminatorInst *TI = LLVMBB->getTerminator(); 3993 3994 // Emit constants only once even if used by multiple PHI nodes. 3995 std::map<Constant*, unsigned> ConstantsOut; 3996 3997 // Vector bool would be better, but vector<bool> is really slow. 3998 std::vector<unsigned char> SuccsHandled; 3999 if (TI->getNumSuccessors()) 4000 SuccsHandled.resize(BB->getParent()->getNumBlockIDs()); 4001 4002 // Check successor nodes PHI nodes that expect a constant to be available from 4003 // this block. 4004 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) { 4005 BasicBlock *SuccBB = TI->getSuccessor(succ); 4006 if (!isa<PHINode>(SuccBB->begin())) continue; 4007 MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB]; 4008 4009 // If this terminator has multiple identical successors (common for 4010 // switches), only handle each succ once. 4011 unsigned SuccMBBNo = SuccMBB->getNumber(); 4012 if (SuccsHandled[SuccMBBNo]) continue; 4013 SuccsHandled[SuccMBBNo] = true; 4014 4015 MachineBasicBlock::iterator MBBI = SuccMBB->begin(); 4016 PHINode *PN; 4017 4018 // At this point we know that there is a 1-1 correspondence between LLVM PHI 4019 // nodes and Machine PHI nodes, but the incoming operands have not been 4020 // emitted yet. 4021 for (BasicBlock::iterator I = SuccBB->begin(); 4022 (PN = dyn_cast<PHINode>(I)); ++I) { 4023 // Ignore dead phi's. 4024 if (PN->use_empty()) continue; 4025 4026 unsigned Reg; 4027 Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB); 4028 4029 if (Constant *C = dyn_cast<Constant>(PHIOp)) { 4030 unsigned &RegOut = ConstantsOut[C]; 4031 if (RegOut == 0) { 4032 RegOut = FuncInfo.CreateRegForValue(C); 4033 UnorderedChains.push_back( 4034 SDL.CopyValueToVirtualRegister(C, RegOut)); 4035 } 4036 Reg = RegOut; 4037 } else { 4038 Reg = FuncInfo.ValueMap[PHIOp]; 4039 if (Reg == 0) { 4040 assert(isa<AllocaInst>(PHIOp) && 4041 FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) && 4042 "Didn't codegen value into a register!??"); 4043 Reg = FuncInfo.CreateRegForValue(PHIOp); 4044 UnorderedChains.push_back( 4045 SDL.CopyValueToVirtualRegister(PHIOp, Reg)); 4046 } 4047 } 4048 4049 // Remember that this register needs to added to the machine PHI node as 4050 // the input for this MBB. 4051 MVT::ValueType VT = TLI.getValueType(PN->getType()); 4052 unsigned NumElements; 4053 if (VT != MVT::Vector) 4054 NumElements = TLI.getNumElements(VT); 4055 else { 4056 MVT::ValueType VT1,VT2; 4057 NumElements = 4058 TLI.getPackedTypeBreakdown(cast<PackedType>(PN->getType()), 4059 VT1, VT2); 4060 } 4061 for (unsigned i = 0, e = NumElements; i != e; ++i) 4062 PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg+i)); 4063 } 4064 } 4065 ConstantsOut.clear(); 4066 4067 // Turn all of the unordered chains into one factored node. 4068 if (!UnorderedChains.empty()) { 4069 SDOperand Root = SDL.getRoot(); 4070 if (Root.getOpcode() != ISD::EntryToken) { 4071 unsigned i = 0, e = UnorderedChains.size(); 4072 for (; i != e; ++i) { 4073 assert(UnorderedChains[i].Val->getNumOperands() > 1); 4074 if (UnorderedChains[i].Val->getOperand(0) == Root) 4075 break; // Don't add the root if we already indirectly depend on it. 4076 } 4077 4078 if (i == e) 4079 UnorderedChains.push_back(Root); 4080 } 4081 DAG.setRoot(DAG.getNode(ISD::TokenFactor, MVT::Other, 4082 &UnorderedChains[0], UnorderedChains.size())); 4083 } 4084 4085 // Lower the terminator after the copies are emitted. 4086 SDL.visit(*LLVMBB->getTerminator()); 4087 4088 // Copy over any CaseBlock records that may now exist due to SwitchInst 4089 // lowering, as well as any jump table information. 4090 SwitchCases.clear(); 4091 SwitchCases = SDL.SwitchCases; 4092 JT = SDL.JT; 4093 4094 // Make sure the root of the DAG is up-to-date. 4095 DAG.setRoot(SDL.getRoot()); 4096} 4097 4098void SelectionDAGISel::CodeGenAndEmitDAG(SelectionDAG &DAG) { 4099 // Get alias analysis for load/store combining. 4100 AliasAnalysis &AA = getAnalysis<AliasAnalysis>(); 4101 4102 // Run the DAG combiner in pre-legalize mode. 4103 DAG.Combine(false, AA); 4104 4105 DOUT << "Lowered selection DAG:\n"; 4106 DEBUG(DAG.dump()); 4107 4108 // Second step, hack on the DAG until it only uses operations and types that 4109 // the target supports. 4110 DAG.Legalize(); 4111 4112 DOUT << "Legalized selection DAG:\n"; 4113 DEBUG(DAG.dump()); 4114 4115 // Run the DAG combiner in post-legalize mode. 4116 DAG.Combine(true, AA); 4117 4118 if (ViewISelDAGs) DAG.viewGraph(); 4119 4120 // Third, instruction select all of the operations to machine code, adding the 4121 // code to the MachineBasicBlock. 4122 InstructionSelectBasicBlock(DAG); 4123 4124 DOUT << "Selected machine code:\n"; 4125 DEBUG(BB->dump()); 4126} 4127 4128void SelectionDAGISel::SelectBasicBlock(BasicBlock *LLVMBB, MachineFunction &MF, 4129 FunctionLoweringInfo &FuncInfo) { 4130 std::vector<std::pair<MachineInstr*, unsigned> > PHINodesToUpdate; 4131 { 4132 SelectionDAG DAG(TLI, MF, getAnalysisToUpdate<MachineDebugInfo>()); 4133 CurDAG = &DAG; 4134 4135 // First step, lower LLVM code to some DAG. This DAG may use operations and 4136 // types that are not supported by the target. 4137 BuildSelectionDAG(DAG, LLVMBB, PHINodesToUpdate, FuncInfo); 4138 4139 // Second step, emit the lowered DAG as machine code. 4140 CodeGenAndEmitDAG(DAG); 4141 } 4142 4143 // Next, now that we know what the last MBB the LLVM BB expanded is, update 4144 // PHI nodes in successors. 4145 if (SwitchCases.empty() && JT.Reg == 0) { 4146 for (unsigned i = 0, e = PHINodesToUpdate.size(); i != e; ++i) { 4147 MachineInstr *PHI = PHINodesToUpdate[i].first; 4148 assert(PHI->getOpcode() == TargetInstrInfo::PHI && 4149 "This is not a machine PHI node that we are updating!"); 4150 PHI->addRegOperand(PHINodesToUpdate[i].second, false); 4151 PHI->addMachineBasicBlockOperand(BB); 4152 } 4153 return; 4154 } 4155 4156 // If the JumpTable record is filled in, then we need to emit a jump table. 4157 // Updating the PHI nodes is tricky in this case, since we need to determine 4158 // whether the PHI is a successor of the range check MBB or the jump table MBB 4159 if (JT.Reg) { 4160 assert(SwitchCases.empty() && "Cannot have jump table and lowered switch"); 4161 SelectionDAG SDAG(TLI, MF, getAnalysisToUpdate<MachineDebugInfo>()); 4162 CurDAG = &SDAG; 4163 SelectionDAGLowering SDL(SDAG, TLI, FuncInfo); 4164 MachineBasicBlock *RangeBB = BB; 4165 // Set the current basic block to the mbb we wish to insert the code into 4166 BB = JT.MBB; 4167 SDL.setCurrentBasicBlock(BB); 4168 // Emit the code 4169 SDL.visitJumpTable(JT); 4170 SDAG.setRoot(SDL.getRoot()); 4171 CodeGenAndEmitDAG(SDAG); 4172 // Update PHI Nodes 4173 for (unsigned pi = 0, pe = PHINodesToUpdate.size(); pi != pe; ++pi) { 4174 MachineInstr *PHI = PHINodesToUpdate[pi].first; 4175 MachineBasicBlock *PHIBB = PHI->getParent(); 4176 assert(PHI->getOpcode() == TargetInstrInfo::PHI && 4177 "This is not a machine PHI node that we are updating!"); 4178 if (PHIBB == JT.Default) { 4179 PHI->addRegOperand(PHINodesToUpdate[pi].second, false); 4180 PHI->addMachineBasicBlockOperand(RangeBB); 4181 } 4182 if (BB->succ_end() != std::find(BB->succ_begin(),BB->succ_end(), PHIBB)) { 4183 PHI->addRegOperand(PHINodesToUpdate[pi].second, false); 4184 PHI->addMachineBasicBlockOperand(BB); 4185 } 4186 } 4187 return; 4188 } 4189 4190 // If the switch block involved a branch to one of the actual successors, we 4191 // need to update PHI nodes in that block. 4192 for (unsigned i = 0, e = PHINodesToUpdate.size(); i != e; ++i) { 4193 MachineInstr *PHI = PHINodesToUpdate[i].first; 4194 assert(PHI->getOpcode() == TargetInstrInfo::PHI && 4195 "This is not a machine PHI node that we are updating!"); 4196 if (BB->isSuccessor(PHI->getParent())) { 4197 PHI->addRegOperand(PHINodesToUpdate[i].second, false); 4198 PHI->addMachineBasicBlockOperand(BB); 4199 } 4200 } 4201 4202 // If we generated any switch lowering information, build and codegen any 4203 // additional DAGs necessary. 4204 for (unsigned i = 0, e = SwitchCases.size(); i != e; ++i) { 4205 SelectionDAG SDAG(TLI, MF, getAnalysisToUpdate<MachineDebugInfo>()); 4206 CurDAG = &SDAG; 4207 SelectionDAGLowering SDL(SDAG, TLI, FuncInfo); 4208 4209 // Set the current basic block to the mbb we wish to insert the code into 4210 BB = SwitchCases[i].ThisBB; 4211 SDL.setCurrentBasicBlock(BB); 4212 4213 // Emit the code 4214 SDL.visitSwitchCase(SwitchCases[i]); 4215 SDAG.setRoot(SDL.getRoot()); 4216 CodeGenAndEmitDAG(SDAG); 4217 4218 // Handle any PHI nodes in successors of this chunk, as if we were coming 4219 // from the original BB before switch expansion. Note that PHI nodes can 4220 // occur multiple times in PHINodesToUpdate. We have to be very careful to 4221 // handle them the right number of times. 4222 while ((BB = SwitchCases[i].TrueBB)) { // Handle LHS and RHS. 4223 for (MachineBasicBlock::iterator Phi = BB->begin(); 4224 Phi != BB->end() && Phi->getOpcode() == TargetInstrInfo::PHI; ++Phi){ 4225 // This value for this PHI node is recorded in PHINodesToUpdate, get it. 4226 for (unsigned pn = 0; ; ++pn) { 4227 assert(pn != PHINodesToUpdate.size() && "Didn't find PHI entry!"); 4228 if (PHINodesToUpdate[pn].first == Phi) { 4229 Phi->addRegOperand(PHINodesToUpdate[pn].second, false); 4230 Phi->addMachineBasicBlockOperand(SwitchCases[i].ThisBB); 4231 break; 4232 } 4233 } 4234 } 4235 4236 // Don't process RHS if same block as LHS. 4237 if (BB == SwitchCases[i].FalseBB) 4238 SwitchCases[i].FalseBB = 0; 4239 4240 // If we haven't handled the RHS, do so now. Otherwise, we're done. 4241 SwitchCases[i].TrueBB = SwitchCases[i].FalseBB; 4242 SwitchCases[i].FalseBB = 0; 4243 } 4244 assert(SwitchCases[i].TrueBB == 0 && SwitchCases[i].FalseBB == 0); 4245 } 4246} 4247 4248 4249//===----------------------------------------------------------------------===// 4250/// ScheduleAndEmitDAG - Pick a safe ordering and emit instructions for each 4251/// target node in the graph. 4252void SelectionDAGISel::ScheduleAndEmitDAG(SelectionDAG &DAG) { 4253 if (ViewSchedDAGs) DAG.viewGraph(); 4254 4255 RegisterScheduler::FunctionPassCtor Ctor = RegisterScheduler::getDefault(); 4256 4257 if (!Ctor) { 4258 Ctor = ISHeuristic; 4259 RegisterScheduler::setDefault(Ctor); 4260 } 4261 4262 ScheduleDAG *SL = Ctor(this, &DAG, BB); 4263 BB = SL->Run(); 4264 delete SL; 4265} 4266 4267 4268HazardRecognizer *SelectionDAGISel::CreateTargetHazardRecognizer() { 4269 return new HazardRecognizer(); 4270} 4271 4272//===----------------------------------------------------------------------===// 4273// Helper functions used by the generated instruction selector. 4274//===----------------------------------------------------------------------===// 4275// Calls to these methods are generated by tblgen. 4276 4277/// CheckAndMask - The isel is trying to match something like (and X, 255). If 4278/// the dag combiner simplified the 255, we still want to match. RHS is the 4279/// actual value in the DAG on the RHS of an AND, and DesiredMaskS is the value 4280/// specified in the .td file (e.g. 255). 4281bool SelectionDAGISel::CheckAndMask(SDOperand LHS, ConstantSDNode *RHS, 4282 int64_t DesiredMaskS) { 4283 uint64_t ActualMask = RHS->getValue(); 4284 uint64_t DesiredMask =DesiredMaskS & MVT::getIntVTBitMask(LHS.getValueType()); 4285 4286 // If the actual mask exactly matches, success! 4287 if (ActualMask == DesiredMask) 4288 return true; 4289 4290 // If the actual AND mask is allowing unallowed bits, this doesn't match. 4291 if (ActualMask & ~DesiredMask) 4292 return false; 4293 4294 // Otherwise, the DAG Combiner may have proven that the value coming in is 4295 // either already zero or is not demanded. Check for known zero input bits. 4296 uint64_t NeededMask = DesiredMask & ~ActualMask; 4297 if (getTargetLowering().MaskedValueIsZero(LHS, NeededMask)) 4298 return true; 4299 4300 // TODO: check to see if missing bits are just not demanded. 4301 4302 // Otherwise, this pattern doesn't match. 4303 return false; 4304} 4305 4306/// CheckOrMask - The isel is trying to match something like (or X, 255). If 4307/// the dag combiner simplified the 255, we still want to match. RHS is the 4308/// actual value in the DAG on the RHS of an OR, and DesiredMaskS is the value 4309/// specified in the .td file (e.g. 255). 4310bool SelectionDAGISel::CheckOrMask(SDOperand LHS, ConstantSDNode *RHS, 4311 int64_t DesiredMaskS) { 4312 uint64_t ActualMask = RHS->getValue(); 4313 uint64_t DesiredMask =DesiredMaskS & MVT::getIntVTBitMask(LHS.getValueType()); 4314 4315 // If the actual mask exactly matches, success! 4316 if (ActualMask == DesiredMask) 4317 return true; 4318 4319 // If the actual AND mask is allowing unallowed bits, this doesn't match. 4320 if (ActualMask & ~DesiredMask) 4321 return false; 4322 4323 // Otherwise, the DAG Combiner may have proven that the value coming in is 4324 // either already zero or is not demanded. Check for known zero input bits. 4325 uint64_t NeededMask = DesiredMask & ~ActualMask; 4326 4327 uint64_t KnownZero, KnownOne; 4328 getTargetLowering().ComputeMaskedBits(LHS, NeededMask, KnownZero, KnownOne); 4329 4330 // If all the missing bits in the or are already known to be set, match! 4331 if ((NeededMask & KnownOne) == NeededMask) 4332 return true; 4333 4334 // TODO: check to see if missing bits are just not demanded. 4335 4336 // Otherwise, this pattern doesn't match. 4337 return false; 4338} 4339 4340 4341/// SelectInlineAsmMemoryOperands - Calls to this are automatically generated 4342/// by tblgen. Others should not call it. 4343void SelectionDAGISel:: 4344SelectInlineAsmMemoryOperands(std::vector<SDOperand> &Ops, SelectionDAG &DAG) { 4345 std::vector<SDOperand> InOps; 4346 std::swap(InOps, Ops); 4347 4348 Ops.push_back(InOps[0]); // input chain. 4349 Ops.push_back(InOps[1]); // input asm string. 4350 4351 unsigned i = 2, e = InOps.size(); 4352 if (InOps[e-1].getValueType() == MVT::Flag) 4353 --e; // Don't process a flag operand if it is here. 4354 4355 while (i != e) { 4356 unsigned Flags = cast<ConstantSDNode>(InOps[i])->getValue(); 4357 if ((Flags & 7) != 4 /*MEM*/) { 4358 // Just skip over this operand, copying the operands verbatim. 4359 Ops.insert(Ops.end(), InOps.begin()+i, InOps.begin()+i+(Flags >> 3) + 1); 4360 i += (Flags >> 3) + 1; 4361 } else { 4362 assert((Flags >> 3) == 1 && "Memory operand with multiple values?"); 4363 // Otherwise, this is a memory operand. Ask the target to select it. 4364 std::vector<SDOperand> SelOps; 4365 if (SelectInlineAsmMemoryOperand(InOps[i+1], 'm', SelOps, DAG)) { 4366 cerr << "Could not match memory address. Inline asm failure!\n"; 4367 exit(1); 4368 } 4369 4370 // Add this to the output node. 4371 Ops.push_back(DAG.getTargetConstant(4/*MEM*/ | (SelOps.size() << 3), 4372 MVT::i32)); 4373 Ops.insert(Ops.end(), SelOps.begin(), SelOps.end()); 4374 i += 2; 4375 } 4376 } 4377 4378 // Add the flag input back if present. 4379 if (e != InOps.size()) 4380 Ops.push_back(InOps.back()); 4381} 4382