SelectionDAGISel.cpp revision 7e598096ea8db3f19f4ec8f4cb407aea996bd7c2
1//===-- SelectionDAGISel.cpp - Implement the SelectionDAGISel class -------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file was developed by the LLVM research group and is distributed under 6// the University of Illinois Open Source License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This implements the SelectionDAGISel class. 11// 12//===----------------------------------------------------------------------===// 13 14#define DEBUG_TYPE "isel" 15#include "llvm/CodeGen/SelectionDAGISel.h" 16#include "llvm/CodeGen/ScheduleDAG.h" 17#include "llvm/CallingConv.h" 18#include "llvm/Constants.h" 19#include "llvm/DerivedTypes.h" 20#include "llvm/Function.h" 21#include "llvm/GlobalVariable.h" 22#include "llvm/InlineAsm.h" 23#include "llvm/Instructions.h" 24#include "llvm/Intrinsics.h" 25#include "llvm/IntrinsicInst.h" 26#include "llvm/CodeGen/IntrinsicLowering.h" 27#include "llvm/CodeGen/MachineDebugInfo.h" 28#include "llvm/CodeGen/MachineFunction.h" 29#include "llvm/CodeGen/MachineFrameInfo.h" 30#include "llvm/CodeGen/MachineJumpTableInfo.h" 31#include "llvm/CodeGen/MachineInstrBuilder.h" 32#include "llvm/CodeGen/SelectionDAG.h" 33#include "llvm/CodeGen/SSARegMap.h" 34#include "llvm/Target/MRegisterInfo.h" 35#include "llvm/Target/TargetData.h" 36#include "llvm/Target/TargetFrameInfo.h" 37#include "llvm/Target/TargetInstrInfo.h" 38#include "llvm/Target/TargetLowering.h" 39#include "llvm/Target/TargetMachine.h" 40#include "llvm/Transforms/Utils/BasicBlockUtils.h" 41#include "llvm/Support/CommandLine.h" 42#include "llvm/Support/MathExtras.h" 43#include "llvm/Support/Debug.h" 44#include <map> 45#include <set> 46#include <iostream> 47#include <algorithm> 48using namespace llvm; 49 50#ifndef NDEBUG 51static cl::opt<bool> 52ViewISelDAGs("view-isel-dags", cl::Hidden, 53 cl::desc("Pop up a window to show isel dags as they are selected")); 54static cl::opt<bool> 55ViewSchedDAGs("view-sched-dags", cl::Hidden, 56 cl::desc("Pop up a window to show sched dags as they are processed")); 57#else 58static const bool ViewISelDAGs = 0, ViewSchedDAGs = 0; 59#endif 60 61// Scheduling heuristics 62enum SchedHeuristics { 63 defaultScheduling, // Let the target specify its preference. 64 noScheduling, // No scheduling, emit breadth first sequence. 65 simpleScheduling, // Two pass, min. critical path, max. utilization. 66 simpleNoItinScheduling, // Same as above exact using generic latency. 67 listSchedulingBURR, // Bottom up reg reduction list scheduling. 68 listSchedulingTD // Top-down list scheduler. 69}; 70 71namespace { 72 cl::opt<SchedHeuristics> 73 ISHeuristic( 74 "sched", 75 cl::desc("Choose scheduling style"), 76 cl::init(defaultScheduling), 77 cl::values( 78 clEnumValN(defaultScheduling, "default", 79 "Target preferred scheduling style"), 80 clEnumValN(noScheduling, "none", 81 "No scheduling: breadth first sequencing"), 82 clEnumValN(simpleScheduling, "simple", 83 "Simple two pass scheduling: minimize critical path " 84 "and maximize processor utilization"), 85 clEnumValN(simpleNoItinScheduling, "simple-noitin", 86 "Simple two pass scheduling: Same as simple " 87 "except using generic latency"), 88 clEnumValN(listSchedulingBURR, "list-burr", 89 "Bottom up register reduction list scheduling"), 90 clEnumValN(listSchedulingTD, "list-td", 91 "Top-down list scheduler"), 92 clEnumValEnd)); 93} // namespace 94 95namespace { 96 /// RegsForValue - This struct represents the physical registers that a 97 /// particular value is assigned and the type information about the value. 98 /// This is needed because values can be promoted into larger registers and 99 /// expanded into multiple smaller registers than the value. 100 struct RegsForValue { 101 /// Regs - This list hold the register (for legal and promoted values) 102 /// or register set (for expanded values) that the value should be assigned 103 /// to. 104 std::vector<unsigned> Regs; 105 106 /// RegVT - The value type of each register. 107 /// 108 MVT::ValueType RegVT; 109 110 /// ValueVT - The value type of the LLVM value, which may be promoted from 111 /// RegVT or made from merging the two expanded parts. 112 MVT::ValueType ValueVT; 113 114 RegsForValue() : RegVT(MVT::Other), ValueVT(MVT::Other) {} 115 116 RegsForValue(unsigned Reg, MVT::ValueType regvt, MVT::ValueType valuevt) 117 : RegVT(regvt), ValueVT(valuevt) { 118 Regs.push_back(Reg); 119 } 120 RegsForValue(const std::vector<unsigned> ®s, 121 MVT::ValueType regvt, MVT::ValueType valuevt) 122 : Regs(regs), RegVT(regvt), ValueVT(valuevt) { 123 } 124 125 /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from 126 /// this value and returns the result as a ValueVT value. This uses 127 /// Chain/Flag as the input and updates them for the output Chain/Flag. 128 SDOperand getCopyFromRegs(SelectionDAG &DAG, 129 SDOperand &Chain, SDOperand &Flag) const; 130 131 /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the 132 /// specified value into the registers specified by this object. This uses 133 /// Chain/Flag as the input and updates them for the output Chain/Flag. 134 void getCopyToRegs(SDOperand Val, SelectionDAG &DAG, 135 SDOperand &Chain, SDOperand &Flag) const; 136 137 /// AddInlineAsmOperands - Add this value to the specified inlineasm node 138 /// operand list. This adds the code marker and includes the number of 139 /// values added into it. 140 void AddInlineAsmOperands(unsigned Code, SelectionDAG &DAG, 141 std::vector<SDOperand> &Ops) const; 142 }; 143} 144 145namespace llvm { 146 //===--------------------------------------------------------------------===// 147 /// FunctionLoweringInfo - This contains information that is global to a 148 /// function that is used when lowering a region of the function. 149 class FunctionLoweringInfo { 150 public: 151 TargetLowering &TLI; 152 Function &Fn; 153 MachineFunction &MF; 154 SSARegMap *RegMap; 155 156 FunctionLoweringInfo(TargetLowering &TLI, Function &Fn,MachineFunction &MF); 157 158 /// MBBMap - A mapping from LLVM basic blocks to their machine code entry. 159 std::map<const BasicBlock*, MachineBasicBlock *> MBBMap; 160 161 /// ValueMap - Since we emit code for the function a basic block at a time, 162 /// we must remember which virtual registers hold the values for 163 /// cross-basic-block values. 164 std::map<const Value*, unsigned> ValueMap; 165 166 /// StaticAllocaMap - Keep track of frame indices for fixed sized allocas in 167 /// the entry block. This allows the allocas to be efficiently referenced 168 /// anywhere in the function. 169 std::map<const AllocaInst*, int> StaticAllocaMap; 170 171 unsigned MakeReg(MVT::ValueType VT) { 172 return RegMap->createVirtualRegister(TLI.getRegClassFor(VT)); 173 } 174 175 unsigned CreateRegForValue(const Value *V); 176 177 unsigned InitializeRegForValue(const Value *V) { 178 unsigned &R = ValueMap[V]; 179 assert(R == 0 && "Already initialized this value register!"); 180 return R = CreateRegForValue(V); 181 } 182 }; 183} 184 185/// isUsedOutsideOfDefiningBlock - Return true if this instruction is used by 186/// PHI nodes or outside of the basic block that defines it, or used by a 187/// switch instruction, which may expand to multiple basic blocks. 188static bool isUsedOutsideOfDefiningBlock(Instruction *I) { 189 if (isa<PHINode>(I)) return true; 190 BasicBlock *BB = I->getParent(); 191 for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E; ++UI) 192 if (cast<Instruction>(*UI)->getParent() != BB || isa<PHINode>(*UI) || 193 isa<SwitchInst>(*UI)) 194 return true; 195 return false; 196} 197 198/// isOnlyUsedInEntryBlock - If the specified argument is only used in the 199/// entry block, return true. This includes arguments used by switches, since 200/// the switch may expand into multiple basic blocks. 201static bool isOnlyUsedInEntryBlock(Argument *A) { 202 BasicBlock *Entry = A->getParent()->begin(); 203 for (Value::use_iterator UI = A->use_begin(), E = A->use_end(); UI != E; ++UI) 204 if (cast<Instruction>(*UI)->getParent() != Entry || isa<SwitchInst>(*UI)) 205 return false; // Use not in entry block. 206 return true; 207} 208 209FunctionLoweringInfo::FunctionLoweringInfo(TargetLowering &tli, 210 Function &fn, MachineFunction &mf) 211 : TLI(tli), Fn(fn), MF(mf), RegMap(MF.getSSARegMap()) { 212 213 // Create a vreg for each argument register that is not dead and is used 214 // outside of the entry block for the function. 215 for (Function::arg_iterator AI = Fn.arg_begin(), E = Fn.arg_end(); 216 AI != E; ++AI) 217 if (!isOnlyUsedInEntryBlock(AI)) 218 InitializeRegForValue(AI); 219 220 // Initialize the mapping of values to registers. This is only set up for 221 // instruction values that are used outside of the block that defines 222 // them. 223 Function::iterator BB = Fn.begin(), EB = Fn.end(); 224 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) 225 if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) 226 if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(AI->getArraySize())) { 227 const Type *Ty = AI->getAllocatedType(); 228 uint64_t TySize = TLI.getTargetData()->getTypeSize(Ty); 229 unsigned Align = 230 std::max((unsigned)TLI.getTargetData()->getTypeAlignment(Ty), 231 AI->getAlignment()); 232 233 // If the alignment of the value is smaller than the size of the value, 234 // and if the size of the value is particularly small (<= 8 bytes), 235 // round up to the size of the value for potentially better performance. 236 // 237 // FIXME: This could be made better with a preferred alignment hook in 238 // TargetData. It serves primarily to 8-byte align doubles for X86. 239 if (Align < TySize && TySize <= 8) Align = TySize; 240 TySize *= CUI->getValue(); // Get total allocated size. 241 if (TySize == 0) TySize = 1; // Don't create zero-sized stack objects. 242 StaticAllocaMap[AI] = 243 MF.getFrameInfo()->CreateStackObject((unsigned)TySize, Align); 244 } 245 246 for (; BB != EB; ++BB) 247 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) 248 if (!I->use_empty() && isUsedOutsideOfDefiningBlock(I)) 249 if (!isa<AllocaInst>(I) || 250 !StaticAllocaMap.count(cast<AllocaInst>(I))) 251 InitializeRegForValue(I); 252 253 // Create an initial MachineBasicBlock for each LLVM BasicBlock in F. This 254 // also creates the initial PHI MachineInstrs, though none of the input 255 // operands are populated. 256 for (BB = Fn.begin(), EB = Fn.end(); BB != EB; ++BB) { 257 MachineBasicBlock *MBB = new MachineBasicBlock(BB); 258 MBBMap[BB] = MBB; 259 MF.getBasicBlockList().push_back(MBB); 260 261 // Create Machine PHI nodes for LLVM PHI nodes, lowering them as 262 // appropriate. 263 PHINode *PN; 264 for (BasicBlock::iterator I = BB->begin(); 265 (PN = dyn_cast<PHINode>(I)); ++I) 266 if (!PN->use_empty()) { 267 MVT::ValueType VT = TLI.getValueType(PN->getType()); 268 unsigned NumElements; 269 if (VT != MVT::Vector) 270 NumElements = TLI.getNumElements(VT); 271 else { 272 MVT::ValueType VT1,VT2; 273 NumElements = 274 TLI.getPackedTypeBreakdown(cast<PackedType>(PN->getType()), 275 VT1, VT2); 276 } 277 unsigned PHIReg = ValueMap[PN]; 278 assert(PHIReg &&"PHI node does not have an assigned virtual register!"); 279 for (unsigned i = 0; i != NumElements; ++i) 280 BuildMI(MBB, TargetInstrInfo::PHI, PN->getNumOperands(), PHIReg+i); 281 } 282 } 283} 284 285/// CreateRegForValue - Allocate the appropriate number of virtual registers of 286/// the correctly promoted or expanded types. Assign these registers 287/// consecutive vreg numbers and return the first assigned number. 288unsigned FunctionLoweringInfo::CreateRegForValue(const Value *V) { 289 MVT::ValueType VT = TLI.getValueType(V->getType()); 290 291 // The number of multiples of registers that we need, to, e.g., split up 292 // a <2 x int64> -> 4 x i32 registers. 293 unsigned NumVectorRegs = 1; 294 295 // If this is a packed type, figure out what type it will decompose into 296 // and how many of the elements it will use. 297 if (VT == MVT::Vector) { 298 const PackedType *PTy = cast<PackedType>(V->getType()); 299 unsigned NumElts = PTy->getNumElements(); 300 MVT::ValueType EltTy = TLI.getValueType(PTy->getElementType()); 301 302 // Divide the input until we get to a supported size. This will always 303 // end with a scalar if the target doesn't support vectors. 304 while (NumElts > 1 && !TLI.isTypeLegal(getVectorType(EltTy, NumElts))) { 305 NumElts >>= 1; 306 NumVectorRegs <<= 1; 307 } 308 if (NumElts == 1) 309 VT = EltTy; 310 else 311 VT = getVectorType(EltTy, NumElts); 312 } 313 314 // The common case is that we will only create one register for this 315 // value. If we have that case, create and return the virtual register. 316 unsigned NV = TLI.getNumElements(VT); 317 if (NV == 1) { 318 // If we are promoting this value, pick the next largest supported type. 319 MVT::ValueType PromotedType = TLI.getTypeToTransformTo(VT); 320 unsigned Reg = MakeReg(PromotedType); 321 // If this is a vector of supported or promoted types (e.g. 4 x i16), 322 // create all of the registers. 323 for (unsigned i = 1; i != NumVectorRegs; ++i) 324 MakeReg(PromotedType); 325 return Reg; 326 } 327 328 // If this value is represented with multiple target registers, make sure 329 // to create enough consecutive registers of the right (smaller) type. 330 unsigned NT = VT-1; // Find the type to use. 331 while (TLI.getNumElements((MVT::ValueType)NT) != 1) 332 --NT; 333 334 unsigned R = MakeReg((MVT::ValueType)NT); 335 for (unsigned i = 1; i != NV*NumVectorRegs; ++i) 336 MakeReg((MVT::ValueType)NT); 337 return R; 338} 339 340//===----------------------------------------------------------------------===// 341/// SelectionDAGLowering - This is the common target-independent lowering 342/// implementation that is parameterized by a TargetLowering object. 343/// Also, targets can overload any lowering method. 344/// 345namespace llvm { 346class SelectionDAGLowering { 347 MachineBasicBlock *CurMBB; 348 349 std::map<const Value*, SDOperand> NodeMap; 350 351 /// PendingLoads - Loads are not emitted to the program immediately. We bunch 352 /// them up and then emit token factor nodes when possible. This allows us to 353 /// get simple disambiguation between loads without worrying about alias 354 /// analysis. 355 std::vector<SDOperand> PendingLoads; 356 357 /// Case - A pair of values to record the Value for a switch case, and the 358 /// case's target basic block. 359 typedef std::pair<Constant*, MachineBasicBlock*> Case; 360 typedef std::vector<Case>::iterator CaseItr; 361 typedef std::pair<CaseItr, CaseItr> CaseRange; 362 363 /// CaseRec - A struct with ctor used in lowering switches to a binary tree 364 /// of conditional branches. 365 struct CaseRec { 366 CaseRec(MachineBasicBlock *bb, Constant *lt, Constant *ge, CaseRange r) : 367 CaseBB(bb), LT(lt), GE(ge), Range(r) {} 368 369 /// CaseBB - The MBB in which to emit the compare and branch 370 MachineBasicBlock *CaseBB; 371 /// LT, GE - If nonzero, we know the current case value must be less-than or 372 /// greater-than-or-equal-to these Constants. 373 Constant *LT; 374 Constant *GE; 375 /// Range - A pair of iterators representing the range of case values to be 376 /// processed at this point in the binary search tree. 377 CaseRange Range; 378 }; 379 380 /// The comparison function for sorting Case values. 381 struct CaseCmp { 382 bool operator () (const Case& C1, const Case& C2) { 383 if (const ConstantUInt* U1 = dyn_cast<const ConstantUInt>(C1.first)) 384 return U1->getValue() < cast<const ConstantUInt>(C2.first)->getValue(); 385 386 const ConstantSInt* S1 = dyn_cast<const ConstantSInt>(C1.first); 387 return S1->getValue() < cast<const ConstantSInt>(C2.first)->getValue(); 388 } 389 }; 390 391public: 392 // TLI - This is information that describes the available target features we 393 // need for lowering. This indicates when operations are unavailable, 394 // implemented with a libcall, etc. 395 TargetLowering &TLI; 396 SelectionDAG &DAG; 397 const TargetData *TD; 398 399 /// SwitchCases - Vector of CaseBlock structures used to communicate 400 /// SwitchInst code generation information. 401 std::vector<SelectionDAGISel::CaseBlock> SwitchCases; 402 SelectionDAGISel::JumpTable JT; 403 404 /// FuncInfo - Information about the function as a whole. 405 /// 406 FunctionLoweringInfo &FuncInfo; 407 408 SelectionDAGLowering(SelectionDAG &dag, TargetLowering &tli, 409 FunctionLoweringInfo &funcinfo) 410 : TLI(tli), DAG(dag), TD(DAG.getTarget().getTargetData()), 411 JT(0,0,0,0), FuncInfo(funcinfo) { 412 } 413 414 /// getRoot - Return the current virtual root of the Selection DAG. 415 /// 416 SDOperand getRoot() { 417 if (PendingLoads.empty()) 418 return DAG.getRoot(); 419 420 if (PendingLoads.size() == 1) { 421 SDOperand Root = PendingLoads[0]; 422 DAG.setRoot(Root); 423 PendingLoads.clear(); 424 return Root; 425 } 426 427 // Otherwise, we have to make a token factor node. 428 SDOperand Root = DAG.getNode(ISD::TokenFactor, MVT::Other, PendingLoads); 429 PendingLoads.clear(); 430 DAG.setRoot(Root); 431 return Root; 432 } 433 434 void visit(Instruction &I) { visit(I.getOpcode(), I); } 435 436 void visit(unsigned Opcode, User &I) { 437 switch (Opcode) { 438 default: assert(0 && "Unknown instruction type encountered!"); 439 abort(); 440 // Build the switch statement using the Instruction.def file. 441#define HANDLE_INST(NUM, OPCODE, CLASS) \ 442 case Instruction::OPCODE:return visit##OPCODE((CLASS&)I); 443#include "llvm/Instruction.def" 444 } 445 } 446 447 void setCurrentBasicBlock(MachineBasicBlock *MBB) { CurMBB = MBB; } 448 449 SDOperand getLoadFrom(const Type *Ty, SDOperand Ptr, 450 SDOperand SrcValue, SDOperand Root, 451 bool isVolatile); 452 453 SDOperand getIntPtrConstant(uint64_t Val) { 454 return DAG.getConstant(Val, TLI.getPointerTy()); 455 } 456 457 SDOperand getValue(const Value *V); 458 459 const SDOperand &setValue(const Value *V, SDOperand NewN) { 460 SDOperand &N = NodeMap[V]; 461 assert(N.Val == 0 && "Already set a value for this node!"); 462 return N = NewN; 463 } 464 465 RegsForValue GetRegistersForValue(const std::string &ConstrCode, 466 MVT::ValueType VT, 467 bool OutReg, bool InReg, 468 std::set<unsigned> &OutputRegs, 469 std::set<unsigned> &InputRegs); 470 471 // Terminator instructions. 472 void visitRet(ReturnInst &I); 473 void visitBr(BranchInst &I); 474 void visitSwitch(SwitchInst &I); 475 void visitUnreachable(UnreachableInst &I) { /* noop */ } 476 477 // Helper for visitSwitch 478 void visitSwitchCase(SelectionDAGISel::CaseBlock &CB); 479 void visitJumpTable(SelectionDAGISel::JumpTable &JT); 480 481 // These all get lowered before this pass. 482 void visitInvoke(InvokeInst &I) { assert(0 && "TODO"); } 483 void visitUnwind(UnwindInst &I) { assert(0 && "TODO"); } 484 485 void visitBinary(User &I, unsigned IntOp, unsigned FPOp, unsigned VecOp); 486 void visitShift(User &I, unsigned Opcode); 487 void visitAdd(User &I) { 488 visitBinary(I, ISD::ADD, ISD::FADD, ISD::VADD); 489 } 490 void visitSub(User &I); 491 void visitMul(User &I) { 492 visitBinary(I, ISD::MUL, ISD::FMUL, ISD::VMUL); 493 } 494 void visitDiv(User &I) { 495 const Type *Ty = I.getType(); 496 visitBinary(I, 497 Ty->isSigned() ? ISD::SDIV : ISD::UDIV, ISD::FDIV, 498 Ty->isSigned() ? ISD::VSDIV : ISD::VUDIV); 499 } 500 void visitRem(User &I) { 501 const Type *Ty = I.getType(); 502 visitBinary(I, Ty->isSigned() ? ISD::SREM : ISD::UREM, ISD::FREM, 0); 503 } 504 void visitAnd(User &I) { visitBinary(I, ISD::AND, 0, ISD::VAND); } 505 void visitOr (User &I) { visitBinary(I, ISD::OR, 0, ISD::VOR); } 506 void visitXor(User &I) { visitBinary(I, ISD::XOR, 0, ISD::VXOR); } 507 void visitShl(User &I) { visitShift(I, ISD::SHL); } 508 void visitShr(User &I) { 509 visitShift(I, I.getType()->isUnsigned() ? ISD::SRL : ISD::SRA); 510 } 511 512 void visitSetCC(User &I, ISD::CondCode SignedOpc, ISD::CondCode UnsignedOpc); 513 void visitSetEQ(User &I) { visitSetCC(I, ISD::SETEQ, ISD::SETEQ); } 514 void visitSetNE(User &I) { visitSetCC(I, ISD::SETNE, ISD::SETNE); } 515 void visitSetLE(User &I) { visitSetCC(I, ISD::SETLE, ISD::SETULE); } 516 void visitSetGE(User &I) { visitSetCC(I, ISD::SETGE, ISD::SETUGE); } 517 void visitSetLT(User &I) { visitSetCC(I, ISD::SETLT, ISD::SETULT); } 518 void visitSetGT(User &I) { visitSetCC(I, ISD::SETGT, ISD::SETUGT); } 519 520 void visitExtractElement(User &I); 521 void visitInsertElement(User &I); 522 void visitShuffleVector(User &I); 523 524 void visitGetElementPtr(User &I); 525 void visitCast(User &I); 526 void visitSelect(User &I); 527 528 void visitMalloc(MallocInst &I); 529 void visitFree(FreeInst &I); 530 void visitAlloca(AllocaInst &I); 531 void visitLoad(LoadInst &I); 532 void visitStore(StoreInst &I); 533 void visitPHI(PHINode &I) { } // PHI nodes are handled specially. 534 void visitCall(CallInst &I); 535 void visitInlineAsm(CallInst &I); 536 const char *visitIntrinsicCall(CallInst &I, unsigned Intrinsic); 537 void visitTargetIntrinsic(CallInst &I, unsigned Intrinsic); 538 539 void visitVAStart(CallInst &I); 540 void visitVAArg(VAArgInst &I); 541 void visitVAEnd(CallInst &I); 542 void visitVACopy(CallInst &I); 543 void visitFrameReturnAddress(CallInst &I, bool isFrameAddress); 544 545 void visitMemIntrinsic(CallInst &I, unsigned Op); 546 547 void visitUserOp1(Instruction &I) { 548 assert(0 && "UserOp1 should not exist at instruction selection time!"); 549 abort(); 550 } 551 void visitUserOp2(Instruction &I) { 552 assert(0 && "UserOp2 should not exist at instruction selection time!"); 553 abort(); 554 } 555}; 556} // end namespace llvm 557 558SDOperand SelectionDAGLowering::getValue(const Value *V) { 559 SDOperand &N = NodeMap[V]; 560 if (N.Val) return N; 561 562 const Type *VTy = V->getType(); 563 MVT::ValueType VT = TLI.getValueType(VTy); 564 if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(V))) { 565 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) { 566 visit(CE->getOpcode(), *CE); 567 assert(N.Val && "visit didn't populate the ValueMap!"); 568 return N; 569 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(C)) { 570 return N = DAG.getGlobalAddress(GV, VT); 571 } else if (isa<ConstantPointerNull>(C)) { 572 return N = DAG.getConstant(0, TLI.getPointerTy()); 573 } else if (isa<UndefValue>(C)) { 574 if (!isa<PackedType>(VTy)) 575 return N = DAG.getNode(ISD::UNDEF, VT); 576 577 // Create a VBUILD_VECTOR of undef nodes. 578 const PackedType *PTy = cast<PackedType>(VTy); 579 unsigned NumElements = PTy->getNumElements(); 580 MVT::ValueType PVT = TLI.getValueType(PTy->getElementType()); 581 582 std::vector<SDOperand> Ops; 583 Ops.assign(NumElements, DAG.getNode(ISD::UNDEF, PVT)); 584 585 // Create a VConstant node with generic Vector type. 586 Ops.push_back(DAG.getConstant(NumElements, MVT::i32)); 587 Ops.push_back(DAG.getValueType(PVT)); 588 return N = DAG.getNode(ISD::VBUILD_VECTOR, MVT::Vector, Ops); 589 } else if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) { 590 return N = DAG.getConstantFP(CFP->getValue(), VT); 591 } else if (const PackedType *PTy = dyn_cast<PackedType>(VTy)) { 592 unsigned NumElements = PTy->getNumElements(); 593 MVT::ValueType PVT = TLI.getValueType(PTy->getElementType()); 594 595 // Now that we know the number and type of the elements, push a 596 // Constant or ConstantFP node onto the ops list for each element of 597 // the packed constant. 598 std::vector<SDOperand> Ops; 599 if (ConstantPacked *CP = dyn_cast<ConstantPacked>(C)) { 600 for (unsigned i = 0; i != NumElements; ++i) 601 Ops.push_back(getValue(CP->getOperand(i))); 602 } else { 603 assert(isa<ConstantAggregateZero>(C) && "Unknown packed constant!"); 604 SDOperand Op; 605 if (MVT::isFloatingPoint(PVT)) 606 Op = DAG.getConstantFP(0, PVT); 607 else 608 Op = DAG.getConstant(0, PVT); 609 Ops.assign(NumElements, Op); 610 } 611 612 // Create a VBUILD_VECTOR node with generic Vector type. 613 Ops.push_back(DAG.getConstant(NumElements, MVT::i32)); 614 Ops.push_back(DAG.getValueType(PVT)); 615 return N = DAG.getNode(ISD::VBUILD_VECTOR, MVT::Vector, Ops); 616 } else { 617 // Canonicalize all constant ints to be unsigned. 618 return N = DAG.getConstant(cast<ConstantIntegral>(C)->getRawValue(),VT); 619 } 620 } 621 622 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) { 623 std::map<const AllocaInst*, int>::iterator SI = 624 FuncInfo.StaticAllocaMap.find(AI); 625 if (SI != FuncInfo.StaticAllocaMap.end()) 626 return DAG.getFrameIndex(SI->second, TLI.getPointerTy()); 627 } 628 629 std::map<const Value*, unsigned>::const_iterator VMI = 630 FuncInfo.ValueMap.find(V); 631 assert(VMI != FuncInfo.ValueMap.end() && "Value not in map!"); 632 633 unsigned InReg = VMI->second; 634 635 // If this type is not legal, make it so now. 636 if (VT != MVT::Vector) { 637 MVT::ValueType DestVT = TLI.getTypeToTransformTo(VT); 638 639 N = DAG.getCopyFromReg(DAG.getEntryNode(), InReg, DestVT); 640 if (DestVT < VT) { 641 // Source must be expanded. This input value is actually coming from the 642 // register pair VMI->second and VMI->second+1. 643 N = DAG.getNode(ISD::BUILD_PAIR, VT, N, 644 DAG.getCopyFromReg(DAG.getEntryNode(), InReg+1, DestVT)); 645 } else if (DestVT > VT) { // Promotion case 646 if (MVT::isFloatingPoint(VT)) 647 N = DAG.getNode(ISD::FP_ROUND, VT, N); 648 else 649 N = DAG.getNode(ISD::TRUNCATE, VT, N); 650 } 651 } else { 652 // Otherwise, if this is a vector, make it available as a generic vector 653 // here. 654 MVT::ValueType PTyElementVT, PTyLegalElementVT; 655 const PackedType *PTy = cast<PackedType>(VTy); 656 unsigned NE = TLI.getPackedTypeBreakdown(PTy, PTyElementVT, 657 PTyLegalElementVT); 658 659 // Build a VBUILD_VECTOR with the input registers. 660 std::vector<SDOperand> Ops; 661 if (PTyElementVT == PTyLegalElementVT) { 662 // If the value types are legal, just VBUILD the CopyFromReg nodes. 663 for (unsigned i = 0; i != NE; ++i) 664 Ops.push_back(DAG.getCopyFromReg(DAG.getEntryNode(), InReg++, 665 PTyElementVT)); 666 } else if (PTyElementVT < PTyLegalElementVT) { 667 // If the register was promoted, use TRUNCATE of FP_ROUND as appropriate. 668 for (unsigned i = 0; i != NE; ++i) { 669 SDOperand Op = DAG.getCopyFromReg(DAG.getEntryNode(), InReg++, 670 PTyElementVT); 671 if (MVT::isFloatingPoint(PTyElementVT)) 672 Op = DAG.getNode(ISD::FP_ROUND, PTyElementVT, Op); 673 else 674 Op = DAG.getNode(ISD::TRUNCATE, PTyElementVT, Op); 675 Ops.push_back(Op); 676 } 677 } else { 678 // If the register was expanded, use BUILD_PAIR. 679 assert((NE & 1) == 0 && "Must expand into a multiple of 2 elements!"); 680 for (unsigned i = 0; i != NE/2; ++i) { 681 SDOperand Op0 = DAG.getCopyFromReg(DAG.getEntryNode(), InReg++, 682 PTyElementVT); 683 SDOperand Op1 = DAG.getCopyFromReg(DAG.getEntryNode(), InReg++, 684 PTyElementVT); 685 Ops.push_back(DAG.getNode(ISD::BUILD_PAIR, VT, Op0, Op1)); 686 } 687 } 688 689 Ops.push_back(DAG.getConstant(NE, MVT::i32)); 690 Ops.push_back(DAG.getValueType(PTyLegalElementVT)); 691 N = DAG.getNode(ISD::VBUILD_VECTOR, MVT::Vector, Ops); 692 693 // Finally, use a VBIT_CONVERT to make this available as the appropriate 694 // vector type. 695 N = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, N, 696 DAG.getConstant(PTy->getNumElements(), 697 MVT::i32), 698 DAG.getValueType(TLI.getValueType(PTy->getElementType()))); 699 } 700 701 return N; 702} 703 704 705void SelectionDAGLowering::visitRet(ReturnInst &I) { 706 if (I.getNumOperands() == 0) { 707 DAG.setRoot(DAG.getNode(ISD::RET, MVT::Other, getRoot())); 708 return; 709 } 710 std::vector<SDOperand> NewValues; 711 NewValues.push_back(getRoot()); 712 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) { 713 SDOperand RetOp = getValue(I.getOperand(i)); 714 715 // If this is an integer return value, we need to promote it ourselves to 716 // the full width of a register, since LegalizeOp will use ANY_EXTEND rather 717 // than sign/zero. 718 if (MVT::isInteger(RetOp.getValueType()) && 719 RetOp.getValueType() < MVT::i64) { 720 MVT::ValueType TmpVT; 721 if (TLI.getTypeAction(MVT::i32) == TargetLowering::Promote) 722 TmpVT = TLI.getTypeToTransformTo(MVT::i32); 723 else 724 TmpVT = MVT::i32; 725 726 if (I.getOperand(i)->getType()->isSigned()) 727 RetOp = DAG.getNode(ISD::SIGN_EXTEND, TmpVT, RetOp); 728 else 729 RetOp = DAG.getNode(ISD::ZERO_EXTEND, TmpVT, RetOp); 730 } 731 NewValues.push_back(RetOp); 732 } 733 DAG.setRoot(DAG.getNode(ISD::RET, MVT::Other, NewValues)); 734} 735 736void SelectionDAGLowering::visitBr(BranchInst &I) { 737 // Update machine-CFG edges. 738 MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)]; 739 CurMBB->addSuccessor(Succ0MBB); 740 741 // Figure out which block is immediately after the current one. 742 MachineBasicBlock *NextBlock = 0; 743 MachineFunction::iterator BBI = CurMBB; 744 if (++BBI != CurMBB->getParent()->end()) 745 NextBlock = BBI; 746 747 if (I.isUnconditional()) { 748 // If this is not a fall-through branch, emit the branch. 749 if (Succ0MBB != NextBlock) 750 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, getRoot(), 751 DAG.getBasicBlock(Succ0MBB))); 752 } else { 753 MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)]; 754 CurMBB->addSuccessor(Succ1MBB); 755 756 SDOperand Cond = getValue(I.getCondition()); 757 if (Succ1MBB == NextBlock) { 758 // If the condition is false, fall through. This means we should branch 759 // if the condition is true to Succ #0. 760 DAG.setRoot(DAG.getNode(ISD::BRCOND, MVT::Other, getRoot(), 761 Cond, DAG.getBasicBlock(Succ0MBB))); 762 } else if (Succ0MBB == NextBlock) { 763 // If the condition is true, fall through. This means we should branch if 764 // the condition is false to Succ #1. Invert the condition first. 765 SDOperand True = DAG.getConstant(1, Cond.getValueType()); 766 Cond = DAG.getNode(ISD::XOR, Cond.getValueType(), Cond, True); 767 DAG.setRoot(DAG.getNode(ISD::BRCOND, MVT::Other, getRoot(), 768 Cond, DAG.getBasicBlock(Succ1MBB))); 769 } else { 770 std::vector<SDOperand> Ops; 771 Ops.push_back(getRoot()); 772 // If the false case is the current basic block, then this is a self 773 // loop. We do not want to emit "Loop: ... brcond Out; br Loop", as it 774 // adds an extra instruction in the loop. Instead, invert the 775 // condition and emit "Loop: ... br!cond Loop; br Out. 776 if (CurMBB == Succ1MBB) { 777 std::swap(Succ0MBB, Succ1MBB); 778 SDOperand True = DAG.getConstant(1, Cond.getValueType()); 779 Cond = DAG.getNode(ISD::XOR, Cond.getValueType(), Cond, True); 780 } 781 SDOperand True = DAG.getNode(ISD::BRCOND, MVT::Other, getRoot(), Cond, 782 DAG.getBasicBlock(Succ0MBB)); 783 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, True, 784 DAG.getBasicBlock(Succ1MBB))); 785 } 786 } 787} 788 789/// visitSwitchCase - Emits the necessary code to represent a single node in 790/// the binary search tree resulting from lowering a switch instruction. 791void SelectionDAGLowering::visitSwitchCase(SelectionDAGISel::CaseBlock &CB) { 792 SDOperand SwitchOp = getValue(CB.SwitchV); 793 SDOperand CaseOp = getValue(CB.CaseC); 794 SDOperand Cond = DAG.getSetCC(MVT::i1, SwitchOp, CaseOp, CB.CC); 795 796 // Set NextBlock to be the MBB immediately after the current one, if any. 797 // This is used to avoid emitting unnecessary branches to the next block. 798 MachineBasicBlock *NextBlock = 0; 799 MachineFunction::iterator BBI = CurMBB; 800 if (++BBI != CurMBB->getParent()->end()) 801 NextBlock = BBI; 802 803 // If the lhs block is the next block, invert the condition so that we can 804 // fall through to the lhs instead of the rhs block. 805 if (CB.LHSBB == NextBlock) { 806 std::swap(CB.LHSBB, CB.RHSBB); 807 SDOperand True = DAG.getConstant(1, Cond.getValueType()); 808 Cond = DAG.getNode(ISD::XOR, Cond.getValueType(), Cond, True); 809 } 810 SDOperand BrCond = DAG.getNode(ISD::BRCOND, MVT::Other, getRoot(), Cond, 811 DAG.getBasicBlock(CB.LHSBB)); 812 if (CB.RHSBB == NextBlock) 813 DAG.setRoot(BrCond); 814 else 815 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, BrCond, 816 DAG.getBasicBlock(CB.RHSBB))); 817 // Update successor info 818 CurMBB->addSuccessor(CB.LHSBB); 819 CurMBB->addSuccessor(CB.RHSBB); 820} 821 822/// visitSwitchCase - Emits the necessary code to represent a single node in 823/// the binary search tree resulting from lowering a switch instruction. 824void SelectionDAGLowering::visitJumpTable(SelectionDAGISel::JumpTable &JT) { 825 // FIXME: Need to emit different code for PIC vs. Non-PIC, specifically, 826 // we need to add the address of the jump table to the value loaded, since 827 // the entries in the jump table will be differences rather than absolute 828 // addresses. 829 830 // Emit the code for the jump table 831 MVT::ValueType PTy = TLI.getPointerTy(); 832 unsigned PTyBytes = MVT::getSizeInBits(PTy)/8; 833 SDOperand Copy = DAG.getCopyFromReg(getRoot(), JT.Reg, PTy); 834 SDOperand IDX = DAG.getNode(ISD::MUL, PTy, Copy, 835 DAG.getConstant(PTyBytes, PTy)); 836 SDOperand ADD = DAG.getNode(ISD::ADD, PTy, IDX, DAG.getJumpTable(JT.JTI,PTy)); 837 SDOperand LD = DAG.getLoad(PTy, Copy.getValue(1), ADD, DAG.getSrcValue(0)); 838 DAG.setRoot(DAG.getNode(ISD::BRIND, MVT::Other, LD.getValue(1), LD)); 839} 840 841void SelectionDAGLowering::visitSwitch(SwitchInst &I) { 842 // Figure out which block is immediately after the current one. 843 MachineBasicBlock *NextBlock = 0; 844 MachineFunction::iterator BBI = CurMBB; 845 if (++BBI != CurMBB->getParent()->end()) 846 NextBlock = BBI; 847 848 // If there is only the default destination, branch to it if it is not the 849 // next basic block. Otherwise, just fall through. 850 if (I.getNumOperands() == 2) { 851 // Update machine-CFG edges. 852 MachineBasicBlock *DefaultMBB = FuncInfo.MBBMap[I.getDefaultDest()]; 853 // If this is not a fall-through branch, emit the branch. 854 if (DefaultMBB != NextBlock) 855 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, getRoot(), 856 DAG.getBasicBlock(DefaultMBB))); 857 return; 858 } 859 860 // If there are any non-default case statements, create a vector of Cases 861 // representing each one, and sort the vector so that we can efficiently 862 // create a binary search tree from them. 863 std::vector<Case> Cases; 864 for (unsigned i = 1; i < I.getNumSuccessors(); ++i) { 865 MachineBasicBlock *SMBB = FuncInfo.MBBMap[I.getSuccessor(i)]; 866 Cases.push_back(Case(I.getSuccessorValue(i), SMBB)); 867 } 868 std::sort(Cases.begin(), Cases.end(), CaseCmp()); 869 870 // Get the Value to be switched on and default basic blocks, which will be 871 // inserted into CaseBlock records, representing basic blocks in the binary 872 // search tree. 873 Value *SV = I.getOperand(0); 874 MachineBasicBlock *Default = FuncInfo.MBBMap[I.getDefaultDest()]; 875 876 // Get the MachineFunction which holds the current MBB. This is used during 877 // emission of jump tables, and when inserting any additional MBBs necessary 878 // to represent the switch. 879 MachineFunction *CurMF = CurMBB->getParent(); 880 const BasicBlock *LLVMBB = CurMBB->getBasicBlock(); 881 Reloc::Model Relocs = TLI.getTargetMachine().getRelocationModel(); 882 883 // If the switch has more than 5 blocks, and at least 75% dense, then emit a 884 // jump table rather than lowering the switch to a binary tree of conditional 885 // branches. 886 // FIXME: Make this work with PIC code 887 if (TLI.isOperationLegal(ISD::BRIND, TLI.getPointerTy()) && 888 (Relocs == Reloc::Static || Relocs == Reloc::DynamicNoPIC) && 889 Cases.size() > 5) { 890 uint64_t First = cast<ConstantIntegral>(Cases.front().first)->getRawValue(); 891 uint64_t Last = cast<ConstantIntegral>(Cases.back().first)->getRawValue(); 892 double Density = (double)Cases.size() / (double)((Last - First) + 1ULL); 893 894 if (Density >= 0.75) { 895 // Create a new basic block to hold the code for loading the address 896 // of the jump table, and jumping to it. Update successor information; 897 // we will either branch to the default case for the switch, or the jump 898 // table. 899 MachineBasicBlock *JumpTableBB = new MachineBasicBlock(LLVMBB); 900 CurMF->getBasicBlockList().insert(BBI, JumpTableBB); 901 CurMBB->addSuccessor(Default); 902 CurMBB->addSuccessor(JumpTableBB); 903 904 // Subtract the lowest switch case value from the value being switched on 905 // and conditional branch to default mbb if the result is greater than the 906 // difference between smallest and largest cases. 907 SDOperand SwitchOp = getValue(SV); 908 MVT::ValueType VT = SwitchOp.getValueType(); 909 SDOperand SUB = DAG.getNode(ISD::SUB, VT, SwitchOp, 910 DAG.getConstant(First, VT)); 911 912 // The SDNode we just created, which holds the value being switched on 913 // minus the the smallest case value, needs to be copied to a virtual 914 // register so it can be used as an index into the jump table in a 915 // subsequent basic block. This value may be smaller or larger than the 916 // target's pointer type, and therefore require extension or truncating. 917 if (VT > TLI.getPointerTy()) 918 SwitchOp = DAG.getNode(ISD::TRUNCATE, TLI.getPointerTy(), SUB); 919 else 920 SwitchOp = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(), SUB); 921 unsigned JumpTableReg = FuncInfo.MakeReg(TLI.getPointerTy()); 922 SDOperand CopyTo = DAG.getCopyToReg(getRoot(), JumpTableReg, SwitchOp); 923 924 // Emit the range check for the jump table, and branch to the default 925 // block for the switch statement if the value being switched on exceeds 926 // the largest case in the switch. 927 SDOperand CMP = DAG.getSetCC(TLI.getSetCCResultTy(), SUB, 928 DAG.getConstant(Last-First,VT), ISD::SETUGT); 929 DAG.setRoot(DAG.getNode(ISD::BRCOND, MVT::Other, CopyTo, CMP, 930 DAG.getBasicBlock(Default))); 931 932 // Build a vector of destination BBs, corresponding to each target 933 // of the jump table. If the value of the jump table slot corresponds to 934 // a case statement, push the case's BB onto the vector, otherwise, push 935 // the default BB. 936 std::set<MachineBasicBlock*> UniqueBBs; 937 std::vector<MachineBasicBlock*> DestBBs; 938 uint64_t TEI = First; 939 for (CaseItr ii = Cases.begin(), ee = Cases.end(); ii != ee; ++TEI) { 940 if (cast<ConstantIntegral>(ii->first)->getRawValue() == TEI) { 941 DestBBs.push_back(ii->second); 942 UniqueBBs.insert(ii->second); 943 ++ii; 944 } else { 945 DestBBs.push_back(Default); 946 UniqueBBs.insert(Default); 947 } 948 } 949 950 // Update successor info 951 for (std::set<MachineBasicBlock*>::iterator ii = UniqueBBs.begin(), 952 ee = UniqueBBs.end(); ii != ee; ++ii) 953 JumpTableBB->addSuccessor(*ii); 954 955 // Create a jump table index for this jump table, or return an existing 956 // one. 957 unsigned JTI = CurMF->getJumpTableInfo()->getJumpTableIndex(DestBBs); 958 959 // Set the jump table information so that we can codegen it as a second 960 // MachineBasicBlock 961 JT.Reg = JumpTableReg; 962 JT.JTI = JTI; 963 JT.MBB = JumpTableBB; 964 JT.Default = Default; 965 return; 966 } 967 } 968 969 // Push the initial CaseRec onto the worklist 970 std::vector<CaseRec> CaseVec; 971 CaseVec.push_back(CaseRec(CurMBB,0,0,CaseRange(Cases.begin(),Cases.end()))); 972 973 while (!CaseVec.empty()) { 974 // Grab a record representing a case range to process off the worklist 975 CaseRec CR = CaseVec.back(); 976 CaseVec.pop_back(); 977 978 // Size is the number of Cases represented by this range. If Size is 1, 979 // then we are processing a leaf of the binary search tree. Otherwise, 980 // we need to pick a pivot, and push left and right ranges onto the 981 // worklist. 982 unsigned Size = CR.Range.second - CR.Range.first; 983 984 if (Size == 1) { 985 // Create a CaseBlock record representing a conditional branch to 986 // the Case's target mbb if the value being switched on SV is equal 987 // to C. Otherwise, branch to default. 988 Constant *C = CR.Range.first->first; 989 MachineBasicBlock *Target = CR.Range.first->second; 990 SelectionDAGISel::CaseBlock CB(ISD::SETEQ, SV, C, Target, Default, 991 CR.CaseBB); 992 // If the MBB representing the leaf node is the current MBB, then just 993 // call visitSwitchCase to emit the code into the current block. 994 // Otherwise, push the CaseBlock onto the vector to be later processed 995 // by SDISel, and insert the node's MBB before the next MBB. 996 if (CR.CaseBB == CurMBB) 997 visitSwitchCase(CB); 998 else { 999 SwitchCases.push_back(CB); 1000 CurMF->getBasicBlockList().insert(BBI, CR.CaseBB); 1001 } 1002 } else { 1003 // split case range at pivot 1004 CaseItr Pivot = CR.Range.first + (Size / 2); 1005 CaseRange LHSR(CR.Range.first, Pivot); 1006 CaseRange RHSR(Pivot, CR.Range.second); 1007 Constant *C = Pivot->first; 1008 MachineBasicBlock *RHSBB = 0, *LHSBB = 0; 1009 // We know that we branch to the LHS if the Value being switched on is 1010 // less than the Pivot value, C. We use this to optimize our binary 1011 // tree a bit, by recognizing that if SV is greater than or equal to the 1012 // LHS's Case Value, and that Case Value is exactly one less than the 1013 // Pivot's Value, then we can branch directly to the LHS's Target, 1014 // rather than creating a leaf node for it. 1015 if ((LHSR.second - LHSR.first) == 1 && 1016 LHSR.first->first == CR.GE && 1017 cast<ConstantIntegral>(C)->getRawValue() == 1018 (cast<ConstantIntegral>(CR.GE)->getRawValue() + 1ULL)) { 1019 LHSBB = LHSR.first->second; 1020 } else { 1021 LHSBB = new MachineBasicBlock(LLVMBB); 1022 CaseVec.push_back(CaseRec(LHSBB,C,CR.GE,LHSR)); 1023 } 1024 // Similar to the optimization above, if the Value being switched on is 1025 // known to be less than the Constant CR.LT, and the current Case Value 1026 // is CR.LT - 1, then we can branch directly to the target block for 1027 // the current Case Value, rather than emitting a RHS leaf node for it. 1028 if ((RHSR.second - RHSR.first) == 1 && CR.LT && 1029 cast<ConstantIntegral>(RHSR.first->first)->getRawValue() == 1030 (cast<ConstantIntegral>(CR.LT)->getRawValue() - 1ULL)) { 1031 RHSBB = RHSR.first->second; 1032 } else { 1033 RHSBB = new MachineBasicBlock(LLVMBB); 1034 CaseVec.push_back(CaseRec(RHSBB,CR.LT,C,RHSR)); 1035 } 1036 // Create a CaseBlock record representing a conditional branch to 1037 // the LHS node if the value being switched on SV is less than C. 1038 // Otherwise, branch to LHS. 1039 ISD::CondCode CC = C->getType()->isSigned() ? ISD::SETLT : ISD::SETULT; 1040 SelectionDAGISel::CaseBlock CB(CC, SV, C, LHSBB, RHSBB, CR.CaseBB); 1041 if (CR.CaseBB == CurMBB) 1042 visitSwitchCase(CB); 1043 else { 1044 SwitchCases.push_back(CB); 1045 CurMF->getBasicBlockList().insert(BBI, CR.CaseBB); 1046 } 1047 } 1048 } 1049} 1050 1051void SelectionDAGLowering::visitSub(User &I) { 1052 // -0.0 - X --> fneg 1053 if (I.getType()->isFloatingPoint()) { 1054 if (ConstantFP *CFP = dyn_cast<ConstantFP>(I.getOperand(0))) 1055 if (CFP->isExactlyValue(-0.0)) { 1056 SDOperand Op2 = getValue(I.getOperand(1)); 1057 setValue(&I, DAG.getNode(ISD::FNEG, Op2.getValueType(), Op2)); 1058 return; 1059 } 1060 } 1061 visitBinary(I, ISD::SUB, ISD::FSUB, ISD::VSUB); 1062} 1063 1064void SelectionDAGLowering::visitBinary(User &I, unsigned IntOp, unsigned FPOp, 1065 unsigned VecOp) { 1066 const Type *Ty = I.getType(); 1067 SDOperand Op1 = getValue(I.getOperand(0)); 1068 SDOperand Op2 = getValue(I.getOperand(1)); 1069 1070 if (Ty->isIntegral()) { 1071 setValue(&I, DAG.getNode(IntOp, Op1.getValueType(), Op1, Op2)); 1072 } else if (Ty->isFloatingPoint()) { 1073 setValue(&I, DAG.getNode(FPOp, Op1.getValueType(), Op1, Op2)); 1074 } else { 1075 const PackedType *PTy = cast<PackedType>(Ty); 1076 SDOperand Num = DAG.getConstant(PTy->getNumElements(), MVT::i32); 1077 SDOperand Typ = DAG.getValueType(TLI.getValueType(PTy->getElementType())); 1078 setValue(&I, DAG.getNode(VecOp, MVT::Vector, Op1, Op2, Num, Typ)); 1079 } 1080} 1081 1082void SelectionDAGLowering::visitShift(User &I, unsigned Opcode) { 1083 SDOperand Op1 = getValue(I.getOperand(0)); 1084 SDOperand Op2 = getValue(I.getOperand(1)); 1085 1086 Op2 = DAG.getNode(ISD::ANY_EXTEND, TLI.getShiftAmountTy(), Op2); 1087 1088 setValue(&I, DAG.getNode(Opcode, Op1.getValueType(), Op1, Op2)); 1089} 1090 1091void SelectionDAGLowering::visitSetCC(User &I,ISD::CondCode SignedOpcode, 1092 ISD::CondCode UnsignedOpcode) { 1093 SDOperand Op1 = getValue(I.getOperand(0)); 1094 SDOperand Op2 = getValue(I.getOperand(1)); 1095 ISD::CondCode Opcode = SignedOpcode; 1096 if (I.getOperand(0)->getType()->isUnsigned()) 1097 Opcode = UnsignedOpcode; 1098 setValue(&I, DAG.getSetCC(MVT::i1, Op1, Op2, Opcode)); 1099} 1100 1101void SelectionDAGLowering::visitSelect(User &I) { 1102 SDOperand Cond = getValue(I.getOperand(0)); 1103 SDOperand TrueVal = getValue(I.getOperand(1)); 1104 SDOperand FalseVal = getValue(I.getOperand(2)); 1105 if (!isa<PackedType>(I.getType())) { 1106 setValue(&I, DAG.getNode(ISD::SELECT, TrueVal.getValueType(), Cond, 1107 TrueVal, FalseVal)); 1108 } else { 1109 setValue(&I, DAG.getNode(ISD::VSELECT, MVT::Vector, Cond, TrueVal, FalseVal, 1110 *(TrueVal.Val->op_end()-2), 1111 *(TrueVal.Val->op_end()-1))); 1112 } 1113} 1114 1115void SelectionDAGLowering::visitCast(User &I) { 1116 SDOperand N = getValue(I.getOperand(0)); 1117 MVT::ValueType SrcVT = N.getValueType(); 1118 MVT::ValueType DestVT = TLI.getValueType(I.getType()); 1119 1120 if (DestVT == MVT::Vector) { 1121 // This is a cast to a vector from something else. This is always a bit 1122 // convert. Get information about the input vector. 1123 const PackedType *DestTy = cast<PackedType>(I.getType()); 1124 MVT::ValueType EltVT = TLI.getValueType(DestTy->getElementType()); 1125 setValue(&I, DAG.getNode(ISD::VBIT_CONVERT, DestVT, N, 1126 DAG.getConstant(DestTy->getNumElements(),MVT::i32), 1127 DAG.getValueType(EltVT))); 1128 } else if (SrcVT == DestVT) { 1129 setValue(&I, N); // noop cast. 1130 } else if (DestVT == MVT::i1) { 1131 // Cast to bool is a comparison against zero, not truncation to zero. 1132 SDOperand Zero = isInteger(SrcVT) ? DAG.getConstant(0, N.getValueType()) : 1133 DAG.getConstantFP(0.0, N.getValueType()); 1134 setValue(&I, DAG.getSetCC(MVT::i1, N, Zero, ISD::SETNE)); 1135 } else if (isInteger(SrcVT)) { 1136 if (isInteger(DestVT)) { // Int -> Int cast 1137 if (DestVT < SrcVT) // Truncating cast? 1138 setValue(&I, DAG.getNode(ISD::TRUNCATE, DestVT, N)); 1139 else if (I.getOperand(0)->getType()->isSigned()) 1140 setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, DestVT, N)); 1141 else 1142 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, DestVT, N)); 1143 } else if (isFloatingPoint(DestVT)) { // Int -> FP cast 1144 if (I.getOperand(0)->getType()->isSigned()) 1145 setValue(&I, DAG.getNode(ISD::SINT_TO_FP, DestVT, N)); 1146 else 1147 setValue(&I, DAG.getNode(ISD::UINT_TO_FP, DestVT, N)); 1148 } else { 1149 assert(0 && "Unknown cast!"); 1150 } 1151 } else if (isFloatingPoint(SrcVT)) { 1152 if (isFloatingPoint(DestVT)) { // FP -> FP cast 1153 if (DestVT < SrcVT) // Rounding cast? 1154 setValue(&I, DAG.getNode(ISD::FP_ROUND, DestVT, N)); 1155 else 1156 setValue(&I, DAG.getNode(ISD::FP_EXTEND, DestVT, N)); 1157 } else if (isInteger(DestVT)) { // FP -> Int cast. 1158 if (I.getType()->isSigned()) 1159 setValue(&I, DAG.getNode(ISD::FP_TO_SINT, DestVT, N)); 1160 else 1161 setValue(&I, DAG.getNode(ISD::FP_TO_UINT, DestVT, N)); 1162 } else { 1163 assert(0 && "Unknown cast!"); 1164 } 1165 } else { 1166 assert(SrcVT == MVT::Vector && "Unknown cast!"); 1167 assert(DestVT != MVT::Vector && "Casts to vector already handled!"); 1168 // This is a cast from a vector to something else. This is always a bit 1169 // convert. Get information about the input vector. 1170 setValue(&I, DAG.getNode(ISD::VBIT_CONVERT, DestVT, N)); 1171 } 1172} 1173 1174void SelectionDAGLowering::visitInsertElement(User &I) { 1175 SDOperand InVec = getValue(I.getOperand(0)); 1176 SDOperand InVal = getValue(I.getOperand(1)); 1177 SDOperand InIdx = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(), 1178 getValue(I.getOperand(2))); 1179 1180 SDOperand Num = *(InVec.Val->op_end()-2); 1181 SDOperand Typ = *(InVec.Val->op_end()-1); 1182 setValue(&I, DAG.getNode(ISD::VINSERT_VECTOR_ELT, MVT::Vector, 1183 InVec, InVal, InIdx, Num, Typ)); 1184} 1185 1186void SelectionDAGLowering::visitExtractElement(User &I) { 1187 SDOperand InVec = getValue(I.getOperand(0)); 1188 SDOperand InIdx = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(), 1189 getValue(I.getOperand(1))); 1190 SDOperand Typ = *(InVec.Val->op_end()-1); 1191 setValue(&I, DAG.getNode(ISD::VEXTRACT_VECTOR_ELT, 1192 TLI.getValueType(I.getType()), InVec, InIdx)); 1193} 1194 1195void SelectionDAGLowering::visitShuffleVector(User &I) { 1196 SDOperand V1 = getValue(I.getOperand(0)); 1197 SDOperand V2 = getValue(I.getOperand(1)); 1198 SDOperand Mask = getValue(I.getOperand(2)); 1199 1200 SDOperand Num = *(V1.Val->op_end()-2); 1201 SDOperand Typ = *(V2.Val->op_end()-1); 1202 setValue(&I, DAG.getNode(ISD::VVECTOR_SHUFFLE, MVT::Vector, 1203 V1, V2, Mask, Num, Typ)); 1204} 1205 1206 1207void SelectionDAGLowering::visitGetElementPtr(User &I) { 1208 SDOperand N = getValue(I.getOperand(0)); 1209 const Type *Ty = I.getOperand(0)->getType(); 1210 const Type *UIntPtrTy = TD->getIntPtrType(); 1211 1212 for (GetElementPtrInst::op_iterator OI = I.op_begin()+1, E = I.op_end(); 1213 OI != E; ++OI) { 1214 Value *Idx = *OI; 1215 if (const StructType *StTy = dyn_cast<StructType>(Ty)) { 1216 unsigned Field = cast<ConstantUInt>(Idx)->getValue(); 1217 if (Field) { 1218 // N = N + Offset 1219 uint64_t Offset = TD->getStructLayout(StTy)->MemberOffsets[Field]; 1220 N = DAG.getNode(ISD::ADD, N.getValueType(), N, 1221 getIntPtrConstant(Offset)); 1222 } 1223 Ty = StTy->getElementType(Field); 1224 } else { 1225 Ty = cast<SequentialType>(Ty)->getElementType(); 1226 1227 // If this is a constant subscript, handle it quickly. 1228 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) { 1229 if (CI->getRawValue() == 0) continue; 1230 1231 uint64_t Offs; 1232 if (ConstantSInt *CSI = dyn_cast<ConstantSInt>(CI)) 1233 Offs = (int64_t)TD->getTypeSize(Ty)*CSI->getValue(); 1234 else 1235 Offs = TD->getTypeSize(Ty)*cast<ConstantUInt>(CI)->getValue(); 1236 N = DAG.getNode(ISD::ADD, N.getValueType(), N, getIntPtrConstant(Offs)); 1237 continue; 1238 } 1239 1240 // N = N + Idx * ElementSize; 1241 uint64_t ElementSize = TD->getTypeSize(Ty); 1242 SDOperand IdxN = getValue(Idx); 1243 1244 // If the index is smaller or larger than intptr_t, truncate or extend 1245 // it. 1246 if (IdxN.getValueType() < N.getValueType()) { 1247 if (Idx->getType()->isSigned()) 1248 IdxN = DAG.getNode(ISD::SIGN_EXTEND, N.getValueType(), IdxN); 1249 else 1250 IdxN = DAG.getNode(ISD::ZERO_EXTEND, N.getValueType(), IdxN); 1251 } else if (IdxN.getValueType() > N.getValueType()) 1252 IdxN = DAG.getNode(ISD::TRUNCATE, N.getValueType(), IdxN); 1253 1254 // If this is a multiply by a power of two, turn it into a shl 1255 // immediately. This is a very common case. 1256 if (isPowerOf2_64(ElementSize)) { 1257 unsigned Amt = Log2_64(ElementSize); 1258 IdxN = DAG.getNode(ISD::SHL, N.getValueType(), IdxN, 1259 DAG.getConstant(Amt, TLI.getShiftAmountTy())); 1260 N = DAG.getNode(ISD::ADD, N.getValueType(), N, IdxN); 1261 continue; 1262 } 1263 1264 SDOperand Scale = getIntPtrConstant(ElementSize); 1265 IdxN = DAG.getNode(ISD::MUL, N.getValueType(), IdxN, Scale); 1266 N = DAG.getNode(ISD::ADD, N.getValueType(), N, IdxN); 1267 } 1268 } 1269 setValue(&I, N); 1270} 1271 1272void SelectionDAGLowering::visitAlloca(AllocaInst &I) { 1273 // If this is a fixed sized alloca in the entry block of the function, 1274 // allocate it statically on the stack. 1275 if (FuncInfo.StaticAllocaMap.count(&I)) 1276 return; // getValue will auto-populate this. 1277 1278 const Type *Ty = I.getAllocatedType(); 1279 uint64_t TySize = TLI.getTargetData()->getTypeSize(Ty); 1280 unsigned Align = std::max((unsigned)TLI.getTargetData()->getTypeAlignment(Ty), 1281 I.getAlignment()); 1282 1283 SDOperand AllocSize = getValue(I.getArraySize()); 1284 MVT::ValueType IntPtr = TLI.getPointerTy(); 1285 if (IntPtr < AllocSize.getValueType()) 1286 AllocSize = DAG.getNode(ISD::TRUNCATE, IntPtr, AllocSize); 1287 else if (IntPtr > AllocSize.getValueType()) 1288 AllocSize = DAG.getNode(ISD::ZERO_EXTEND, IntPtr, AllocSize); 1289 1290 AllocSize = DAG.getNode(ISD::MUL, IntPtr, AllocSize, 1291 getIntPtrConstant(TySize)); 1292 1293 // Handle alignment. If the requested alignment is less than or equal to the 1294 // stack alignment, ignore it and round the size of the allocation up to the 1295 // stack alignment size. If the size is greater than the stack alignment, we 1296 // note this in the DYNAMIC_STACKALLOC node. 1297 unsigned StackAlign = 1298 TLI.getTargetMachine().getFrameInfo()->getStackAlignment(); 1299 if (Align <= StackAlign) { 1300 Align = 0; 1301 // Add SA-1 to the size. 1302 AllocSize = DAG.getNode(ISD::ADD, AllocSize.getValueType(), AllocSize, 1303 getIntPtrConstant(StackAlign-1)); 1304 // Mask out the low bits for alignment purposes. 1305 AllocSize = DAG.getNode(ISD::AND, AllocSize.getValueType(), AllocSize, 1306 getIntPtrConstant(~(uint64_t)(StackAlign-1))); 1307 } 1308 1309 std::vector<MVT::ValueType> VTs; 1310 VTs.push_back(AllocSize.getValueType()); 1311 VTs.push_back(MVT::Other); 1312 std::vector<SDOperand> Ops; 1313 Ops.push_back(getRoot()); 1314 Ops.push_back(AllocSize); 1315 Ops.push_back(getIntPtrConstant(Align)); 1316 SDOperand DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, VTs, Ops); 1317 DAG.setRoot(setValue(&I, DSA).getValue(1)); 1318 1319 // Inform the Frame Information that we have just allocated a variable-sized 1320 // object. 1321 CurMBB->getParent()->getFrameInfo()->CreateVariableSizedObject(); 1322} 1323 1324void SelectionDAGLowering::visitLoad(LoadInst &I) { 1325 SDOperand Ptr = getValue(I.getOperand(0)); 1326 1327 SDOperand Root; 1328 if (I.isVolatile()) 1329 Root = getRoot(); 1330 else { 1331 // Do not serialize non-volatile loads against each other. 1332 Root = DAG.getRoot(); 1333 } 1334 1335 setValue(&I, getLoadFrom(I.getType(), Ptr, DAG.getSrcValue(I.getOperand(0)), 1336 Root, I.isVolatile())); 1337} 1338 1339SDOperand SelectionDAGLowering::getLoadFrom(const Type *Ty, SDOperand Ptr, 1340 SDOperand SrcValue, SDOperand Root, 1341 bool isVolatile) { 1342 SDOperand L; 1343 if (const PackedType *PTy = dyn_cast<PackedType>(Ty)) { 1344 MVT::ValueType PVT = TLI.getValueType(PTy->getElementType()); 1345 L = DAG.getVecLoad(PTy->getNumElements(), PVT, Root, Ptr, SrcValue); 1346 } else { 1347 L = DAG.getLoad(TLI.getValueType(Ty), Root, Ptr, SrcValue); 1348 } 1349 1350 if (isVolatile) 1351 DAG.setRoot(L.getValue(1)); 1352 else 1353 PendingLoads.push_back(L.getValue(1)); 1354 1355 return L; 1356} 1357 1358 1359void SelectionDAGLowering::visitStore(StoreInst &I) { 1360 Value *SrcV = I.getOperand(0); 1361 SDOperand Src = getValue(SrcV); 1362 SDOperand Ptr = getValue(I.getOperand(1)); 1363 DAG.setRoot(DAG.getNode(ISD::STORE, MVT::Other, getRoot(), Src, Ptr, 1364 DAG.getSrcValue(I.getOperand(1)))); 1365} 1366 1367/// IntrinsicCannotAccessMemory - Return true if the specified intrinsic cannot 1368/// access memory and has no other side effects at all. 1369static bool IntrinsicCannotAccessMemory(unsigned IntrinsicID) { 1370#define GET_NO_MEMORY_INTRINSICS 1371#include "llvm/Intrinsics.gen" 1372#undef GET_NO_MEMORY_INTRINSICS 1373 return false; 1374} 1375 1376// IntrinsicOnlyReadsMemory - Return true if the specified intrinsic doesn't 1377// have any side-effects or if it only reads memory. 1378static bool IntrinsicOnlyReadsMemory(unsigned IntrinsicID) { 1379#define GET_SIDE_EFFECT_INFO 1380#include "llvm/Intrinsics.gen" 1381#undef GET_SIDE_EFFECT_INFO 1382 return false; 1383} 1384 1385/// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC 1386/// node. 1387void SelectionDAGLowering::visitTargetIntrinsic(CallInst &I, 1388 unsigned Intrinsic) { 1389 bool HasChain = !IntrinsicCannotAccessMemory(Intrinsic); 1390 bool OnlyLoad = HasChain && IntrinsicOnlyReadsMemory(Intrinsic); 1391 1392 // Build the operand list. 1393 std::vector<SDOperand> Ops; 1394 if (HasChain) { // If this intrinsic has side-effects, chainify it. 1395 if (OnlyLoad) { 1396 // We don't need to serialize loads against other loads. 1397 Ops.push_back(DAG.getRoot()); 1398 } else { 1399 Ops.push_back(getRoot()); 1400 } 1401 } 1402 1403 // Add the intrinsic ID as an integer operand. 1404 Ops.push_back(DAG.getConstant(Intrinsic, TLI.getPointerTy())); 1405 1406 // Add all operands of the call to the operand list. 1407 for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) { 1408 SDOperand Op = getValue(I.getOperand(i)); 1409 1410 // If this is a vector type, force it to the right packed type. 1411 if (Op.getValueType() == MVT::Vector) { 1412 const PackedType *OpTy = cast<PackedType>(I.getOperand(i)->getType()); 1413 MVT::ValueType EltVT = TLI.getValueType(OpTy->getElementType()); 1414 1415 MVT::ValueType VVT = MVT::getVectorType(EltVT, OpTy->getNumElements()); 1416 assert(VVT != MVT::Other && "Intrinsic uses a non-legal type?"); 1417 Op = DAG.getNode(ISD::VBIT_CONVERT, VVT, Op); 1418 } 1419 1420 assert(TLI.isTypeLegal(Op.getValueType()) && 1421 "Intrinsic uses a non-legal type?"); 1422 Ops.push_back(Op); 1423 } 1424 1425 std::vector<MVT::ValueType> VTs; 1426 if (I.getType() != Type::VoidTy) { 1427 MVT::ValueType VT = TLI.getValueType(I.getType()); 1428 if (VT == MVT::Vector) { 1429 const PackedType *DestTy = cast<PackedType>(I.getType()); 1430 MVT::ValueType EltVT = TLI.getValueType(DestTy->getElementType()); 1431 1432 VT = MVT::getVectorType(EltVT, DestTy->getNumElements()); 1433 assert(VT != MVT::Other && "Intrinsic uses a non-legal type?"); 1434 } 1435 1436 assert(TLI.isTypeLegal(VT) && "Intrinsic uses a non-legal type?"); 1437 VTs.push_back(VT); 1438 } 1439 if (HasChain) 1440 VTs.push_back(MVT::Other); 1441 1442 // Create the node. 1443 SDOperand Result; 1444 if (!HasChain) 1445 Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, VTs, Ops); 1446 else if (I.getType() != Type::VoidTy) 1447 Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, VTs, Ops); 1448 else 1449 Result = DAG.getNode(ISD::INTRINSIC_VOID, VTs, Ops); 1450 1451 if (HasChain) { 1452 SDOperand Chain = Result.getValue(Result.Val->getNumValues()-1); 1453 if (OnlyLoad) 1454 PendingLoads.push_back(Chain); 1455 else 1456 DAG.setRoot(Chain); 1457 } 1458 if (I.getType() != Type::VoidTy) { 1459 if (const PackedType *PTy = dyn_cast<PackedType>(I.getType())) { 1460 MVT::ValueType EVT = TLI.getValueType(PTy->getElementType()); 1461 Result = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, Result, 1462 DAG.getConstant(PTy->getNumElements(), MVT::i32), 1463 DAG.getValueType(EVT)); 1464 } 1465 setValue(&I, Result); 1466 } 1467} 1468 1469/// visitIntrinsicCall - Lower the call to the specified intrinsic function. If 1470/// we want to emit this as a call to a named external function, return the name 1471/// otherwise lower it and return null. 1472const char * 1473SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) { 1474 switch (Intrinsic) { 1475 default: 1476 // By default, turn this into a target intrinsic node. 1477 visitTargetIntrinsic(I, Intrinsic); 1478 return 0; 1479 case Intrinsic::vastart: visitVAStart(I); return 0; 1480 case Intrinsic::vaend: visitVAEnd(I); return 0; 1481 case Intrinsic::vacopy: visitVACopy(I); return 0; 1482 case Intrinsic::returnaddress: visitFrameReturnAddress(I, false); return 0; 1483 case Intrinsic::frameaddress: visitFrameReturnAddress(I, true); return 0; 1484 case Intrinsic::setjmp: 1485 return "_setjmp"+!TLI.usesUnderscoreSetJmpLongJmp(); 1486 break; 1487 case Intrinsic::longjmp: 1488 return "_longjmp"+!TLI.usesUnderscoreSetJmpLongJmp(); 1489 break; 1490 case Intrinsic::memcpy_i32: 1491 case Intrinsic::memcpy_i64: 1492 visitMemIntrinsic(I, ISD::MEMCPY); 1493 return 0; 1494 case Intrinsic::memset_i32: 1495 case Intrinsic::memset_i64: 1496 visitMemIntrinsic(I, ISD::MEMSET); 1497 return 0; 1498 case Intrinsic::memmove_i32: 1499 case Intrinsic::memmove_i64: 1500 visitMemIntrinsic(I, ISD::MEMMOVE); 1501 return 0; 1502 1503 case Intrinsic::dbg_stoppoint: { 1504 MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo(); 1505 DbgStopPointInst &SPI = cast<DbgStopPointInst>(I); 1506 if (DebugInfo && SPI.getContext() && DebugInfo->Verify(SPI.getContext())) { 1507 std::vector<SDOperand> Ops; 1508 1509 Ops.push_back(getRoot()); 1510 Ops.push_back(getValue(SPI.getLineValue())); 1511 Ops.push_back(getValue(SPI.getColumnValue())); 1512 1513 DebugInfoDesc *DD = DebugInfo->getDescFor(SPI.getContext()); 1514 assert(DD && "Not a debug information descriptor"); 1515 CompileUnitDesc *CompileUnit = cast<CompileUnitDesc>(DD); 1516 1517 Ops.push_back(DAG.getString(CompileUnit->getFileName())); 1518 Ops.push_back(DAG.getString(CompileUnit->getDirectory())); 1519 1520 DAG.setRoot(DAG.getNode(ISD::LOCATION, MVT::Other, Ops)); 1521 } 1522 1523 return 0; 1524 } 1525 case Intrinsic::dbg_region_start: { 1526 MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo(); 1527 DbgRegionStartInst &RSI = cast<DbgRegionStartInst>(I); 1528 if (DebugInfo && RSI.getContext() && DebugInfo->Verify(RSI.getContext())) { 1529 std::vector<SDOperand> Ops; 1530 1531 unsigned LabelID = DebugInfo->RecordRegionStart(RSI.getContext()); 1532 1533 Ops.push_back(getRoot()); 1534 Ops.push_back(DAG.getConstant(LabelID, MVT::i32)); 1535 1536 DAG.setRoot(DAG.getNode(ISD::DEBUG_LABEL, MVT::Other, Ops)); 1537 } 1538 1539 return 0; 1540 } 1541 case Intrinsic::dbg_region_end: { 1542 MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo(); 1543 DbgRegionEndInst &REI = cast<DbgRegionEndInst>(I); 1544 if (DebugInfo && REI.getContext() && DebugInfo->Verify(REI.getContext())) { 1545 std::vector<SDOperand> Ops; 1546 1547 unsigned LabelID = DebugInfo->RecordRegionEnd(REI.getContext()); 1548 1549 Ops.push_back(getRoot()); 1550 Ops.push_back(DAG.getConstant(LabelID, MVT::i32)); 1551 1552 DAG.setRoot(DAG.getNode(ISD::DEBUG_LABEL, MVT::Other, Ops)); 1553 } 1554 1555 return 0; 1556 } 1557 case Intrinsic::dbg_func_start: { 1558 MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo(); 1559 DbgFuncStartInst &FSI = cast<DbgFuncStartInst>(I); 1560 if (DebugInfo && FSI.getSubprogram() && 1561 DebugInfo->Verify(FSI.getSubprogram())) { 1562 std::vector<SDOperand> Ops; 1563 1564 unsigned LabelID = DebugInfo->RecordRegionStart(FSI.getSubprogram()); 1565 1566 Ops.push_back(getRoot()); 1567 Ops.push_back(DAG.getConstant(LabelID, MVT::i32)); 1568 1569 DAG.setRoot(DAG.getNode(ISD::DEBUG_LABEL, MVT::Other, Ops)); 1570 } 1571 1572 return 0; 1573 } 1574 case Intrinsic::dbg_declare: { 1575 MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo(); 1576 DbgDeclareInst &DI = cast<DbgDeclareInst>(I); 1577 if (DebugInfo && DI.getVariable() && DebugInfo->Verify(DI.getVariable())) { 1578 std::vector<SDOperand> Ops; 1579 1580 SDOperand AddressOp = getValue(DI.getAddress()); 1581 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(AddressOp)) { 1582 DebugInfo->RecordVariable(DI.getVariable(), FI->getIndex()); 1583 } 1584 } 1585 1586 return 0; 1587 } 1588 1589 case Intrinsic::isunordered_f32: 1590 case Intrinsic::isunordered_f64: 1591 setValue(&I, DAG.getSetCC(MVT::i1,getValue(I.getOperand(1)), 1592 getValue(I.getOperand(2)), ISD::SETUO)); 1593 return 0; 1594 1595 case Intrinsic::sqrt_f32: 1596 case Intrinsic::sqrt_f64: 1597 setValue(&I, DAG.getNode(ISD::FSQRT, 1598 getValue(I.getOperand(1)).getValueType(), 1599 getValue(I.getOperand(1)))); 1600 return 0; 1601 case Intrinsic::pcmarker: { 1602 SDOperand Tmp = getValue(I.getOperand(1)); 1603 DAG.setRoot(DAG.getNode(ISD::PCMARKER, MVT::Other, getRoot(), Tmp)); 1604 return 0; 1605 } 1606 case Intrinsic::readcyclecounter: { 1607 std::vector<MVT::ValueType> VTs; 1608 VTs.push_back(MVT::i64); 1609 VTs.push_back(MVT::Other); 1610 std::vector<SDOperand> Ops; 1611 Ops.push_back(getRoot()); 1612 SDOperand Tmp = DAG.getNode(ISD::READCYCLECOUNTER, VTs, Ops); 1613 setValue(&I, Tmp); 1614 DAG.setRoot(Tmp.getValue(1)); 1615 return 0; 1616 } 1617 case Intrinsic::bswap_i16: 1618 case Intrinsic::bswap_i32: 1619 case Intrinsic::bswap_i64: 1620 setValue(&I, DAG.getNode(ISD::BSWAP, 1621 getValue(I.getOperand(1)).getValueType(), 1622 getValue(I.getOperand(1)))); 1623 return 0; 1624 case Intrinsic::cttz_i8: 1625 case Intrinsic::cttz_i16: 1626 case Intrinsic::cttz_i32: 1627 case Intrinsic::cttz_i64: 1628 setValue(&I, DAG.getNode(ISD::CTTZ, 1629 getValue(I.getOperand(1)).getValueType(), 1630 getValue(I.getOperand(1)))); 1631 return 0; 1632 case Intrinsic::ctlz_i8: 1633 case Intrinsic::ctlz_i16: 1634 case Intrinsic::ctlz_i32: 1635 case Intrinsic::ctlz_i64: 1636 setValue(&I, DAG.getNode(ISD::CTLZ, 1637 getValue(I.getOperand(1)).getValueType(), 1638 getValue(I.getOperand(1)))); 1639 return 0; 1640 case Intrinsic::ctpop_i8: 1641 case Intrinsic::ctpop_i16: 1642 case Intrinsic::ctpop_i32: 1643 case Intrinsic::ctpop_i64: 1644 setValue(&I, DAG.getNode(ISD::CTPOP, 1645 getValue(I.getOperand(1)).getValueType(), 1646 getValue(I.getOperand(1)))); 1647 return 0; 1648 case Intrinsic::stacksave: { 1649 std::vector<MVT::ValueType> VTs; 1650 VTs.push_back(TLI.getPointerTy()); 1651 VTs.push_back(MVT::Other); 1652 std::vector<SDOperand> Ops; 1653 Ops.push_back(getRoot()); 1654 SDOperand Tmp = DAG.getNode(ISD::STACKSAVE, VTs, Ops); 1655 setValue(&I, Tmp); 1656 DAG.setRoot(Tmp.getValue(1)); 1657 return 0; 1658 } 1659 case Intrinsic::stackrestore: { 1660 SDOperand Tmp = getValue(I.getOperand(1)); 1661 DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, MVT::Other, getRoot(), Tmp)); 1662 return 0; 1663 } 1664 case Intrinsic::prefetch: 1665 // FIXME: Currently discarding prefetches. 1666 return 0; 1667 } 1668} 1669 1670 1671void SelectionDAGLowering::visitCall(CallInst &I) { 1672 const char *RenameFn = 0; 1673 if (Function *F = I.getCalledFunction()) { 1674 if (F->isExternal()) 1675 if (unsigned IID = F->getIntrinsicID()) { 1676 RenameFn = visitIntrinsicCall(I, IID); 1677 if (!RenameFn) 1678 return; 1679 } else { // Not an LLVM intrinsic. 1680 const std::string &Name = F->getName(); 1681 if (Name[0] == 'c' && (Name == "copysign" || Name == "copysignf")) { 1682 if (I.getNumOperands() == 3 && // Basic sanity checks. 1683 I.getOperand(1)->getType()->isFloatingPoint() && 1684 I.getType() == I.getOperand(1)->getType() && 1685 I.getType() == I.getOperand(2)->getType()) { 1686 SDOperand LHS = getValue(I.getOperand(1)); 1687 SDOperand RHS = getValue(I.getOperand(2)); 1688 setValue(&I, DAG.getNode(ISD::FCOPYSIGN, LHS.getValueType(), 1689 LHS, RHS)); 1690 return; 1691 } 1692 } else if (Name[0] == 'f' && (Name == "fabs" || Name == "fabsf")) { 1693 if (I.getNumOperands() == 2 && // Basic sanity checks. 1694 I.getOperand(1)->getType()->isFloatingPoint() && 1695 I.getType() == I.getOperand(1)->getType()) { 1696 SDOperand Tmp = getValue(I.getOperand(1)); 1697 setValue(&I, DAG.getNode(ISD::FABS, Tmp.getValueType(), Tmp)); 1698 return; 1699 } 1700 } else if (Name[0] == 's' && (Name == "sin" || Name == "sinf")) { 1701 if (I.getNumOperands() == 2 && // Basic sanity checks. 1702 I.getOperand(1)->getType()->isFloatingPoint() && 1703 I.getType() == I.getOperand(1)->getType()) { 1704 SDOperand Tmp = getValue(I.getOperand(1)); 1705 setValue(&I, DAG.getNode(ISD::FSIN, Tmp.getValueType(), Tmp)); 1706 return; 1707 } 1708 } else if (Name[0] == 'c' && (Name == "cos" || Name == "cosf")) { 1709 if (I.getNumOperands() == 2 && // Basic sanity checks. 1710 I.getOperand(1)->getType()->isFloatingPoint() && 1711 I.getType() == I.getOperand(1)->getType()) { 1712 SDOperand Tmp = getValue(I.getOperand(1)); 1713 setValue(&I, DAG.getNode(ISD::FCOS, Tmp.getValueType(), Tmp)); 1714 return; 1715 } 1716 } 1717 } 1718 } else if (isa<InlineAsm>(I.getOperand(0))) { 1719 visitInlineAsm(I); 1720 return; 1721 } 1722 1723 SDOperand Callee; 1724 if (!RenameFn) 1725 Callee = getValue(I.getOperand(0)); 1726 else 1727 Callee = DAG.getExternalSymbol(RenameFn, TLI.getPointerTy()); 1728 std::vector<std::pair<SDOperand, const Type*> > Args; 1729 Args.reserve(I.getNumOperands()); 1730 for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) { 1731 Value *Arg = I.getOperand(i); 1732 SDOperand ArgNode = getValue(Arg); 1733 Args.push_back(std::make_pair(ArgNode, Arg->getType())); 1734 } 1735 1736 const PointerType *PT = cast<PointerType>(I.getCalledValue()->getType()); 1737 const FunctionType *FTy = cast<FunctionType>(PT->getElementType()); 1738 1739 std::pair<SDOperand,SDOperand> Result = 1740 TLI.LowerCallTo(getRoot(), I.getType(), FTy->isVarArg(), I.getCallingConv(), 1741 I.isTailCall(), Callee, Args, DAG); 1742 if (I.getType() != Type::VoidTy) 1743 setValue(&I, Result.first); 1744 DAG.setRoot(Result.second); 1745} 1746 1747SDOperand RegsForValue::getCopyFromRegs(SelectionDAG &DAG, 1748 SDOperand &Chain, SDOperand &Flag)const{ 1749 SDOperand Val = DAG.getCopyFromReg(Chain, Regs[0], RegVT, Flag); 1750 Chain = Val.getValue(1); 1751 Flag = Val.getValue(2); 1752 1753 // If the result was expanded, copy from the top part. 1754 if (Regs.size() > 1) { 1755 assert(Regs.size() == 2 && 1756 "Cannot expand to more than 2 elts yet!"); 1757 SDOperand Hi = DAG.getCopyFromReg(Chain, Regs[1], RegVT, Flag); 1758 Chain = Val.getValue(1); 1759 Flag = Val.getValue(2); 1760 if (DAG.getTargetLoweringInfo().isLittleEndian()) 1761 return DAG.getNode(ISD::BUILD_PAIR, ValueVT, Val, Hi); 1762 else 1763 return DAG.getNode(ISD::BUILD_PAIR, ValueVT, Hi, Val); 1764 } 1765 1766 // Otherwise, if the return value was promoted, truncate it to the 1767 // appropriate type. 1768 if (RegVT == ValueVT) 1769 return Val; 1770 1771 if (MVT::isInteger(RegVT)) 1772 return DAG.getNode(ISD::TRUNCATE, ValueVT, Val); 1773 else 1774 return DAG.getNode(ISD::FP_ROUND, ValueVT, Val); 1775} 1776 1777/// getCopyToRegs - Emit a series of CopyToReg nodes that copies the 1778/// specified value into the registers specified by this object. This uses 1779/// Chain/Flag as the input and updates them for the output Chain/Flag. 1780void RegsForValue::getCopyToRegs(SDOperand Val, SelectionDAG &DAG, 1781 SDOperand &Chain, SDOperand &Flag) const { 1782 if (Regs.size() == 1) { 1783 // If there is a single register and the types differ, this must be 1784 // a promotion. 1785 if (RegVT != ValueVT) { 1786 if (MVT::isInteger(RegVT)) 1787 Val = DAG.getNode(ISD::ANY_EXTEND, RegVT, Val); 1788 else 1789 Val = DAG.getNode(ISD::FP_EXTEND, RegVT, Val); 1790 } 1791 Chain = DAG.getCopyToReg(Chain, Regs[0], Val, Flag); 1792 Flag = Chain.getValue(1); 1793 } else { 1794 std::vector<unsigned> R(Regs); 1795 if (!DAG.getTargetLoweringInfo().isLittleEndian()) 1796 std::reverse(R.begin(), R.end()); 1797 1798 for (unsigned i = 0, e = R.size(); i != e; ++i) { 1799 SDOperand Part = DAG.getNode(ISD::EXTRACT_ELEMENT, RegVT, Val, 1800 DAG.getConstant(i, MVT::i32)); 1801 Chain = DAG.getCopyToReg(Chain, R[i], Part, Flag); 1802 Flag = Chain.getValue(1); 1803 } 1804 } 1805} 1806 1807/// AddInlineAsmOperands - Add this value to the specified inlineasm node 1808/// operand list. This adds the code marker and includes the number of 1809/// values added into it. 1810void RegsForValue::AddInlineAsmOperands(unsigned Code, SelectionDAG &DAG, 1811 std::vector<SDOperand> &Ops) const { 1812 Ops.push_back(DAG.getConstant(Code | (Regs.size() << 3), MVT::i32)); 1813 for (unsigned i = 0, e = Regs.size(); i != e; ++i) 1814 Ops.push_back(DAG.getRegister(Regs[i], RegVT)); 1815} 1816 1817/// isAllocatableRegister - If the specified register is safe to allocate, 1818/// i.e. it isn't a stack pointer or some other special register, return the 1819/// register class for the register. Otherwise, return null. 1820static const TargetRegisterClass * 1821isAllocatableRegister(unsigned Reg, MachineFunction &MF, 1822 const TargetLowering &TLI, const MRegisterInfo *MRI) { 1823 MVT::ValueType FoundVT = MVT::Other; 1824 const TargetRegisterClass *FoundRC = 0; 1825 for (MRegisterInfo::regclass_iterator RCI = MRI->regclass_begin(), 1826 E = MRI->regclass_end(); RCI != E; ++RCI) { 1827 MVT::ValueType ThisVT = MVT::Other; 1828 1829 const TargetRegisterClass *RC = *RCI; 1830 // If none of the the value types for this register class are valid, we 1831 // can't use it. For example, 64-bit reg classes on 32-bit targets. 1832 for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end(); 1833 I != E; ++I) { 1834 if (TLI.isTypeLegal(*I)) { 1835 // If we have already found this register in a different register class, 1836 // choose the one with the largest VT specified. For example, on 1837 // PowerPC, we favor f64 register classes over f32. 1838 if (FoundVT == MVT::Other || 1839 MVT::getSizeInBits(FoundVT) < MVT::getSizeInBits(*I)) { 1840 ThisVT = *I; 1841 break; 1842 } 1843 } 1844 } 1845 1846 if (ThisVT == MVT::Other) continue; 1847 1848 // NOTE: This isn't ideal. In particular, this might allocate the 1849 // frame pointer in functions that need it (due to them not being taken 1850 // out of allocation, because a variable sized allocation hasn't been seen 1851 // yet). This is a slight code pessimization, but should still work. 1852 for (TargetRegisterClass::iterator I = RC->allocation_order_begin(MF), 1853 E = RC->allocation_order_end(MF); I != E; ++I) 1854 if (*I == Reg) { 1855 // We found a matching register class. Keep looking at others in case 1856 // we find one with larger registers that this physreg is also in. 1857 FoundRC = RC; 1858 FoundVT = ThisVT; 1859 break; 1860 } 1861 } 1862 return FoundRC; 1863} 1864 1865RegsForValue SelectionDAGLowering:: 1866GetRegistersForValue(const std::string &ConstrCode, 1867 MVT::ValueType VT, bool isOutReg, bool isInReg, 1868 std::set<unsigned> &OutputRegs, 1869 std::set<unsigned> &InputRegs) { 1870 std::pair<unsigned, const TargetRegisterClass*> PhysReg = 1871 TLI.getRegForInlineAsmConstraint(ConstrCode, VT); 1872 std::vector<unsigned> Regs; 1873 1874 unsigned NumRegs = VT != MVT::Other ? TLI.getNumElements(VT) : 1; 1875 MVT::ValueType RegVT; 1876 MVT::ValueType ValueVT = VT; 1877 1878 if (PhysReg.first) { 1879 if (VT == MVT::Other) 1880 ValueVT = *PhysReg.second->vt_begin(); 1881 RegVT = VT; 1882 1883 // This is a explicit reference to a physical register. 1884 Regs.push_back(PhysReg.first); 1885 1886 // If this is an expanded reference, add the rest of the regs to Regs. 1887 if (NumRegs != 1) { 1888 RegVT = *PhysReg.second->vt_begin(); 1889 TargetRegisterClass::iterator I = PhysReg.second->begin(); 1890 TargetRegisterClass::iterator E = PhysReg.second->end(); 1891 for (; *I != PhysReg.first; ++I) 1892 assert(I != E && "Didn't find reg!"); 1893 1894 // Already added the first reg. 1895 --NumRegs; ++I; 1896 for (; NumRegs; --NumRegs, ++I) { 1897 assert(I != E && "Ran out of registers to allocate!"); 1898 Regs.push_back(*I); 1899 } 1900 } 1901 return RegsForValue(Regs, RegVT, ValueVT); 1902 } 1903 1904 // This is a reference to a register class. Allocate NumRegs consecutive, 1905 // available, registers from the class. 1906 std::vector<unsigned> RegClassRegs = 1907 TLI.getRegClassForInlineAsmConstraint(ConstrCode, VT); 1908 1909 const MRegisterInfo *MRI = DAG.getTarget().getRegisterInfo(); 1910 MachineFunction &MF = *CurMBB->getParent(); 1911 unsigned NumAllocated = 0; 1912 for (unsigned i = 0, e = RegClassRegs.size(); i != e; ++i) { 1913 unsigned Reg = RegClassRegs[i]; 1914 // See if this register is available. 1915 if ((isOutReg && OutputRegs.count(Reg)) || // Already used. 1916 (isInReg && InputRegs.count(Reg))) { // Already used. 1917 // Make sure we find consecutive registers. 1918 NumAllocated = 0; 1919 continue; 1920 } 1921 1922 // Check to see if this register is allocatable (i.e. don't give out the 1923 // stack pointer). 1924 const TargetRegisterClass *RC = isAllocatableRegister(Reg, MF, TLI, MRI); 1925 if (!RC) { 1926 // Make sure we find consecutive registers. 1927 NumAllocated = 0; 1928 continue; 1929 } 1930 1931 // Okay, this register is good, we can use it. 1932 ++NumAllocated; 1933 1934 // If we allocated enough consecutive 1935 if (NumAllocated == NumRegs) { 1936 unsigned RegStart = (i-NumAllocated)+1; 1937 unsigned RegEnd = i+1; 1938 // Mark all of the allocated registers used. 1939 for (unsigned i = RegStart; i != RegEnd; ++i) { 1940 unsigned Reg = RegClassRegs[i]; 1941 Regs.push_back(Reg); 1942 if (isOutReg) OutputRegs.insert(Reg); // Mark reg used. 1943 if (isInReg) InputRegs.insert(Reg); // Mark reg used. 1944 } 1945 1946 return RegsForValue(Regs, *RC->vt_begin(), VT); 1947 } 1948 } 1949 1950 // Otherwise, we couldn't allocate enough registers for this. 1951 return RegsForValue(); 1952} 1953 1954 1955/// visitInlineAsm - Handle a call to an InlineAsm object. 1956/// 1957void SelectionDAGLowering::visitInlineAsm(CallInst &I) { 1958 InlineAsm *IA = cast<InlineAsm>(I.getOperand(0)); 1959 1960 SDOperand AsmStr = DAG.getTargetExternalSymbol(IA->getAsmString().c_str(), 1961 MVT::Other); 1962 1963 // Note, we treat inline asms both with and without side-effects as the same. 1964 // If an inline asm doesn't have side effects and doesn't access memory, we 1965 // could not choose to not chain it. 1966 bool hasSideEffects = IA->hasSideEffects(); 1967 1968 std::vector<InlineAsm::ConstraintInfo> Constraints = IA->ParseConstraints(); 1969 std::vector<MVT::ValueType> ConstraintVTs; 1970 1971 /// AsmNodeOperands - A list of pairs. The first element is a register, the 1972 /// second is a bitfield where bit #0 is set if it is a use and bit #1 is set 1973 /// if it is a def of that register. 1974 std::vector<SDOperand> AsmNodeOperands; 1975 AsmNodeOperands.push_back(SDOperand()); // reserve space for input chain 1976 AsmNodeOperands.push_back(AsmStr); 1977 1978 SDOperand Chain = getRoot(); 1979 SDOperand Flag; 1980 1981 // We fully assign registers here at isel time. This is not optimal, but 1982 // should work. For register classes that correspond to LLVM classes, we 1983 // could let the LLVM RA do its thing, but we currently don't. Do a prepass 1984 // over the constraints, collecting fixed registers that we know we can't use. 1985 std::set<unsigned> OutputRegs, InputRegs; 1986 unsigned OpNum = 1; 1987 for (unsigned i = 0, e = Constraints.size(); i != e; ++i) { 1988 assert(Constraints[i].Codes.size() == 1 && "Only handles one code so far!"); 1989 std::string &ConstraintCode = Constraints[i].Codes[0]; 1990 1991 MVT::ValueType OpVT; 1992 1993 // Compute the value type for each operand and add it to ConstraintVTs. 1994 switch (Constraints[i].Type) { 1995 case InlineAsm::isOutput: 1996 if (!Constraints[i].isIndirectOutput) { 1997 assert(I.getType() != Type::VoidTy && "Bad inline asm!"); 1998 OpVT = TLI.getValueType(I.getType()); 1999 } else { 2000 const Type *OpTy = I.getOperand(OpNum)->getType(); 2001 OpVT = TLI.getValueType(cast<PointerType>(OpTy)->getElementType()); 2002 OpNum++; // Consumes a call operand. 2003 } 2004 break; 2005 case InlineAsm::isInput: 2006 OpVT = TLI.getValueType(I.getOperand(OpNum)->getType()); 2007 OpNum++; // Consumes a call operand. 2008 break; 2009 case InlineAsm::isClobber: 2010 OpVT = MVT::Other; 2011 break; 2012 } 2013 2014 ConstraintVTs.push_back(OpVT); 2015 2016 if (TLI.getRegForInlineAsmConstraint(ConstraintCode, OpVT).first == 0) 2017 continue; // Not assigned a fixed reg. 2018 2019 // Build a list of regs that this operand uses. This always has a single 2020 // element for promoted/expanded operands. 2021 RegsForValue Regs = GetRegistersForValue(ConstraintCode, OpVT, 2022 false, false, 2023 OutputRegs, InputRegs); 2024 2025 switch (Constraints[i].Type) { 2026 case InlineAsm::isOutput: 2027 // We can't assign any other output to this register. 2028 OutputRegs.insert(Regs.Regs.begin(), Regs.Regs.end()); 2029 // If this is an early-clobber output, it cannot be assigned to the same 2030 // value as the input reg. 2031 if (Constraints[i].isEarlyClobber || Constraints[i].hasMatchingInput) 2032 InputRegs.insert(Regs.Regs.begin(), Regs.Regs.end()); 2033 break; 2034 case InlineAsm::isInput: 2035 // We can't assign any other input to this register. 2036 InputRegs.insert(Regs.Regs.begin(), Regs.Regs.end()); 2037 break; 2038 case InlineAsm::isClobber: 2039 // Clobbered regs cannot be used as inputs or outputs. 2040 InputRegs.insert(Regs.Regs.begin(), Regs.Regs.end()); 2041 OutputRegs.insert(Regs.Regs.begin(), Regs.Regs.end()); 2042 break; 2043 } 2044 } 2045 2046 // Loop over all of the inputs, copying the operand values into the 2047 // appropriate registers and processing the output regs. 2048 RegsForValue RetValRegs; 2049 std::vector<std::pair<RegsForValue, Value*> > IndirectStoresToEmit; 2050 OpNum = 1; 2051 2052 for (unsigned i = 0, e = Constraints.size(); i != e; ++i) { 2053 assert(Constraints[i].Codes.size() == 1 && "Only handles one code so far!"); 2054 std::string &ConstraintCode = Constraints[i].Codes[0]; 2055 2056 switch (Constraints[i].Type) { 2057 case InlineAsm::isOutput: { 2058 TargetLowering::ConstraintType CTy = TargetLowering::C_RegisterClass; 2059 if (ConstraintCode.size() == 1) // not a physreg name. 2060 CTy = TLI.getConstraintType(ConstraintCode[0]); 2061 2062 if (CTy == TargetLowering::C_Memory) { 2063 // Memory output. 2064 SDOperand InOperandVal = getValue(I.getOperand(OpNum)); 2065 2066 // Check that the operand (the address to store to) isn't a float. 2067 if (!MVT::isInteger(InOperandVal.getValueType())) 2068 assert(0 && "MATCH FAIL!"); 2069 2070 if (!Constraints[i].isIndirectOutput) 2071 assert(0 && "MATCH FAIL!"); 2072 2073 OpNum++; // Consumes a call operand. 2074 2075 // Extend/truncate to the right pointer type if needed. 2076 MVT::ValueType PtrType = TLI.getPointerTy(); 2077 if (InOperandVal.getValueType() < PtrType) 2078 InOperandVal = DAG.getNode(ISD::ZERO_EXTEND, PtrType, InOperandVal); 2079 else if (InOperandVal.getValueType() > PtrType) 2080 InOperandVal = DAG.getNode(ISD::TRUNCATE, PtrType, InOperandVal); 2081 2082 // Add information to the INLINEASM node to know about this output. 2083 unsigned ResOpType = 4/*MEM*/ | (1 << 3); 2084 AsmNodeOperands.push_back(DAG.getConstant(ResOpType, MVT::i32)); 2085 AsmNodeOperands.push_back(InOperandVal); 2086 break; 2087 } 2088 2089 // Otherwise, this is a register output. 2090 assert(CTy == TargetLowering::C_RegisterClass && "Unknown op type!"); 2091 2092 // If this is an early-clobber output, or if there is an input 2093 // constraint that matches this, we need to reserve the input register 2094 // so no other inputs allocate to it. 2095 bool UsesInputRegister = false; 2096 if (Constraints[i].isEarlyClobber || Constraints[i].hasMatchingInput) 2097 UsesInputRegister = true; 2098 2099 // Copy the output from the appropriate register. Find a register that 2100 // we can use. 2101 RegsForValue Regs = 2102 GetRegistersForValue(ConstraintCode, ConstraintVTs[i], 2103 true, UsesInputRegister, 2104 OutputRegs, InputRegs); 2105 assert(!Regs.Regs.empty() && "Couldn't allocate output reg!"); 2106 2107 if (!Constraints[i].isIndirectOutput) { 2108 assert(RetValRegs.Regs.empty() && 2109 "Cannot have multiple output constraints yet!"); 2110 assert(I.getType() != Type::VoidTy && "Bad inline asm!"); 2111 RetValRegs = Regs; 2112 } else { 2113 IndirectStoresToEmit.push_back(std::make_pair(Regs, 2114 I.getOperand(OpNum))); 2115 OpNum++; // Consumes a call operand. 2116 } 2117 2118 // Add information to the INLINEASM node to know that this register is 2119 // set. 2120 Regs.AddInlineAsmOperands(2 /*REGDEF*/, DAG, AsmNodeOperands); 2121 break; 2122 } 2123 case InlineAsm::isInput: { 2124 SDOperand InOperandVal = getValue(I.getOperand(OpNum)); 2125 OpNum++; // Consumes a call operand. 2126 2127 if (isdigit(ConstraintCode[0])) { // Matching constraint? 2128 // If this is required to match an output register we have already set, 2129 // just use its register. 2130 unsigned OperandNo = atoi(ConstraintCode.c_str()); 2131 2132 // Scan until we find the definition we already emitted of this operand. 2133 // When we find it, create a RegsForValue operand. 2134 unsigned CurOp = 2; // The first operand. 2135 for (; OperandNo; --OperandNo) { 2136 // Advance to the next operand. 2137 unsigned NumOps = 2138 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getValue(); 2139 assert((NumOps & 7) == 2 /*REGDEF*/ && 2140 "Skipped past definitions?"); 2141 CurOp += (NumOps>>3)+1; 2142 } 2143 2144 unsigned NumOps = 2145 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getValue(); 2146 assert((NumOps & 7) == 2 /*REGDEF*/ && 2147 "Skipped past definitions?"); 2148 2149 // Add NumOps>>3 registers to MatchedRegs. 2150 RegsForValue MatchedRegs; 2151 MatchedRegs.ValueVT = InOperandVal.getValueType(); 2152 MatchedRegs.RegVT = AsmNodeOperands[CurOp+1].getValueType(); 2153 for (unsigned i = 0, e = NumOps>>3; i != e; ++i) { 2154 unsigned Reg=cast<RegisterSDNode>(AsmNodeOperands[++CurOp])->getReg(); 2155 MatchedRegs.Regs.push_back(Reg); 2156 } 2157 2158 // Use the produced MatchedRegs object to 2159 MatchedRegs.getCopyToRegs(InOperandVal, DAG, Chain, Flag); 2160 MatchedRegs.AddInlineAsmOperands(1 /*REGUSE*/, DAG, AsmNodeOperands); 2161 break; 2162 } 2163 2164 TargetLowering::ConstraintType CTy = TargetLowering::C_RegisterClass; 2165 if (ConstraintCode.size() == 1) // not a physreg name. 2166 CTy = TLI.getConstraintType(ConstraintCode[0]); 2167 2168 if (CTy == TargetLowering::C_Other) { 2169 if (!TLI.isOperandValidForConstraint(InOperandVal, ConstraintCode[0])) 2170 assert(0 && "MATCH FAIL!"); 2171 2172 // Add information to the INLINEASM node to know about this input. 2173 unsigned ResOpType = 3 /*IMM*/ | (1 << 3); 2174 AsmNodeOperands.push_back(DAG.getConstant(ResOpType, MVT::i32)); 2175 AsmNodeOperands.push_back(InOperandVal); 2176 break; 2177 } else if (CTy == TargetLowering::C_Memory) { 2178 // Memory input. 2179 2180 // Check that the operand isn't a float. 2181 if (!MVT::isInteger(InOperandVal.getValueType())) 2182 assert(0 && "MATCH FAIL!"); 2183 2184 // Extend/truncate to the right pointer type if needed. 2185 MVT::ValueType PtrType = TLI.getPointerTy(); 2186 if (InOperandVal.getValueType() < PtrType) 2187 InOperandVal = DAG.getNode(ISD::ZERO_EXTEND, PtrType, InOperandVal); 2188 else if (InOperandVal.getValueType() > PtrType) 2189 InOperandVal = DAG.getNode(ISD::TRUNCATE, PtrType, InOperandVal); 2190 2191 // Add information to the INLINEASM node to know about this input. 2192 unsigned ResOpType = 4/*MEM*/ | (1 << 3); 2193 AsmNodeOperands.push_back(DAG.getConstant(ResOpType, MVT::i32)); 2194 AsmNodeOperands.push_back(InOperandVal); 2195 break; 2196 } 2197 2198 assert(CTy == TargetLowering::C_RegisterClass && "Unknown op type!"); 2199 2200 // Copy the input into the appropriate registers. 2201 RegsForValue InRegs = 2202 GetRegistersForValue(ConstraintCode, ConstraintVTs[i], 2203 false, true, OutputRegs, InputRegs); 2204 // FIXME: should be match fail. 2205 assert(!InRegs.Regs.empty() && "Couldn't allocate input reg!"); 2206 2207 InRegs.getCopyToRegs(InOperandVal, DAG, Chain, Flag); 2208 2209 InRegs.AddInlineAsmOperands(1/*REGUSE*/, DAG, AsmNodeOperands); 2210 break; 2211 } 2212 case InlineAsm::isClobber: { 2213 RegsForValue ClobberedRegs = 2214 GetRegistersForValue(ConstraintCode, MVT::Other, false, false, 2215 OutputRegs, InputRegs); 2216 // Add the clobbered value to the operand list, so that the register 2217 // allocator is aware that the physreg got clobbered. 2218 if (!ClobberedRegs.Regs.empty()) 2219 ClobberedRegs.AddInlineAsmOperands(2/*REGDEF*/, DAG, AsmNodeOperands); 2220 break; 2221 } 2222 } 2223 } 2224 2225 // Finish up input operands. 2226 AsmNodeOperands[0] = Chain; 2227 if (Flag.Val) AsmNodeOperands.push_back(Flag); 2228 2229 std::vector<MVT::ValueType> VTs; 2230 VTs.push_back(MVT::Other); 2231 VTs.push_back(MVT::Flag); 2232 Chain = DAG.getNode(ISD::INLINEASM, VTs, AsmNodeOperands); 2233 Flag = Chain.getValue(1); 2234 2235 // If this asm returns a register value, copy the result from that register 2236 // and set it as the value of the call. 2237 if (!RetValRegs.Regs.empty()) 2238 setValue(&I, RetValRegs.getCopyFromRegs(DAG, Chain, Flag)); 2239 2240 std::vector<std::pair<SDOperand, Value*> > StoresToEmit; 2241 2242 // Process indirect outputs, first output all of the flagged copies out of 2243 // physregs. 2244 for (unsigned i = 0, e = IndirectStoresToEmit.size(); i != e; ++i) { 2245 RegsForValue &OutRegs = IndirectStoresToEmit[i].first; 2246 Value *Ptr = IndirectStoresToEmit[i].second; 2247 SDOperand OutVal = OutRegs.getCopyFromRegs(DAG, Chain, Flag); 2248 StoresToEmit.push_back(std::make_pair(OutVal, Ptr)); 2249 } 2250 2251 // Emit the non-flagged stores from the physregs. 2252 std::vector<SDOperand> OutChains; 2253 for (unsigned i = 0, e = StoresToEmit.size(); i != e; ++i) 2254 OutChains.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain, 2255 StoresToEmit[i].first, 2256 getValue(StoresToEmit[i].second), 2257 DAG.getSrcValue(StoresToEmit[i].second))); 2258 if (!OutChains.empty()) 2259 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains); 2260 DAG.setRoot(Chain); 2261} 2262 2263 2264void SelectionDAGLowering::visitMalloc(MallocInst &I) { 2265 SDOperand Src = getValue(I.getOperand(0)); 2266 2267 MVT::ValueType IntPtr = TLI.getPointerTy(); 2268 2269 if (IntPtr < Src.getValueType()) 2270 Src = DAG.getNode(ISD::TRUNCATE, IntPtr, Src); 2271 else if (IntPtr > Src.getValueType()) 2272 Src = DAG.getNode(ISD::ZERO_EXTEND, IntPtr, Src); 2273 2274 // Scale the source by the type size. 2275 uint64_t ElementSize = TD->getTypeSize(I.getType()->getElementType()); 2276 Src = DAG.getNode(ISD::MUL, Src.getValueType(), 2277 Src, getIntPtrConstant(ElementSize)); 2278 2279 std::vector<std::pair<SDOperand, const Type*> > Args; 2280 Args.push_back(std::make_pair(Src, TLI.getTargetData()->getIntPtrType())); 2281 2282 std::pair<SDOperand,SDOperand> Result = 2283 TLI.LowerCallTo(getRoot(), I.getType(), false, CallingConv::C, true, 2284 DAG.getExternalSymbol("malloc", IntPtr), 2285 Args, DAG); 2286 setValue(&I, Result.first); // Pointers always fit in registers 2287 DAG.setRoot(Result.second); 2288} 2289 2290void SelectionDAGLowering::visitFree(FreeInst &I) { 2291 std::vector<std::pair<SDOperand, const Type*> > Args; 2292 Args.push_back(std::make_pair(getValue(I.getOperand(0)), 2293 TLI.getTargetData()->getIntPtrType())); 2294 MVT::ValueType IntPtr = TLI.getPointerTy(); 2295 std::pair<SDOperand,SDOperand> Result = 2296 TLI.LowerCallTo(getRoot(), Type::VoidTy, false, CallingConv::C, true, 2297 DAG.getExternalSymbol("free", IntPtr), Args, DAG); 2298 DAG.setRoot(Result.second); 2299} 2300 2301// InsertAtEndOfBasicBlock - This method should be implemented by targets that 2302// mark instructions with the 'usesCustomDAGSchedInserter' flag. These 2303// instructions are special in various ways, which require special support to 2304// insert. The specified MachineInstr is created but not inserted into any 2305// basic blocks, and the scheduler passes ownership of it to this method. 2306MachineBasicBlock *TargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI, 2307 MachineBasicBlock *MBB) { 2308 std::cerr << "If a target marks an instruction with " 2309 "'usesCustomDAGSchedInserter', it must implement " 2310 "TargetLowering::InsertAtEndOfBasicBlock!\n"; 2311 abort(); 2312 return 0; 2313} 2314 2315void SelectionDAGLowering::visitVAStart(CallInst &I) { 2316 DAG.setRoot(DAG.getNode(ISD::VASTART, MVT::Other, getRoot(), 2317 getValue(I.getOperand(1)), 2318 DAG.getSrcValue(I.getOperand(1)))); 2319} 2320 2321void SelectionDAGLowering::visitVAArg(VAArgInst &I) { 2322 SDOperand V = DAG.getVAArg(TLI.getValueType(I.getType()), getRoot(), 2323 getValue(I.getOperand(0)), 2324 DAG.getSrcValue(I.getOperand(0))); 2325 setValue(&I, V); 2326 DAG.setRoot(V.getValue(1)); 2327} 2328 2329void SelectionDAGLowering::visitVAEnd(CallInst &I) { 2330 DAG.setRoot(DAG.getNode(ISD::VAEND, MVT::Other, getRoot(), 2331 getValue(I.getOperand(1)), 2332 DAG.getSrcValue(I.getOperand(1)))); 2333} 2334 2335void SelectionDAGLowering::visitVACopy(CallInst &I) { 2336 DAG.setRoot(DAG.getNode(ISD::VACOPY, MVT::Other, getRoot(), 2337 getValue(I.getOperand(1)), 2338 getValue(I.getOperand(2)), 2339 DAG.getSrcValue(I.getOperand(1)), 2340 DAG.getSrcValue(I.getOperand(2)))); 2341} 2342 2343/// TargetLowering::LowerArguments - This is the default LowerArguments 2344/// implementation, which just inserts a FORMAL_ARGUMENTS node. FIXME: When all 2345/// targets are migrated to using FORMAL_ARGUMENTS, this hook should be removed. 2346std::vector<SDOperand> 2347TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) { 2348 // Add CC# and isVararg as operands to the FORMAL_ARGUMENTS node. 2349 std::vector<SDOperand> Ops; 2350 Ops.push_back(DAG.getConstant(F.getCallingConv(), getPointerTy())); 2351 Ops.push_back(DAG.getConstant(F.isVarArg(), getPointerTy())); 2352 2353 // Add one result value for each formal argument. 2354 std::vector<MVT::ValueType> RetVals; 2355 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) { 2356 MVT::ValueType VT = getValueType(I->getType()); 2357 2358 switch (getTypeAction(VT)) { 2359 default: assert(0 && "Unknown type action!"); 2360 case Legal: 2361 RetVals.push_back(VT); 2362 break; 2363 case Promote: 2364 RetVals.push_back(getTypeToTransformTo(VT)); 2365 break; 2366 case Expand: 2367 if (VT != MVT::Vector) { 2368 // If this is a large integer, it needs to be broken up into small 2369 // integers. Figure out what the destination type is and how many small 2370 // integers it turns into. 2371 MVT::ValueType NVT = getTypeToTransformTo(VT); 2372 unsigned NumVals = MVT::getSizeInBits(VT)/MVT::getSizeInBits(NVT); 2373 for (unsigned i = 0; i != NumVals; ++i) 2374 RetVals.push_back(NVT); 2375 } else { 2376 // Otherwise, this is a vector type. We only support legal vectors 2377 // right now. 2378 unsigned NumElems = cast<PackedType>(I->getType())->getNumElements(); 2379 const Type *EltTy = cast<PackedType>(I->getType())->getElementType(); 2380 2381 // Figure out if there is a Packed type corresponding to this Vector 2382 // type. If so, convert to the packed type. 2383 MVT::ValueType TVT = MVT::getVectorType(getValueType(EltTy), NumElems); 2384 if (TVT != MVT::Other && isTypeLegal(TVT)) { 2385 RetVals.push_back(TVT); 2386 } else { 2387 assert(0 && "Don't support illegal by-val vector arguments yet!"); 2388 } 2389 } 2390 break; 2391 } 2392 } 2393 2394 if (RetVals.size() == 0) 2395 RetVals.push_back(MVT::isVoid); 2396 2397 // Create the node. 2398 SDNode *Result = DAG.getNode(ISD::FORMAL_ARGUMENTS, RetVals, Ops).Val; 2399 2400 // Set up the return result vector. 2401 Ops.clear(); 2402 unsigned i = 0; 2403 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) { 2404 MVT::ValueType VT = getValueType(I->getType()); 2405 2406 switch (getTypeAction(VT)) { 2407 default: assert(0 && "Unknown type action!"); 2408 case Legal: 2409 Ops.push_back(SDOperand(Result, i++)); 2410 break; 2411 case Promote: { 2412 SDOperand Op(Result, i++); 2413 if (MVT::isInteger(VT)) { 2414 unsigned AssertOp = I->getType()->isSigned() ? ISD::AssertSext 2415 : ISD::AssertZext; 2416 Op = DAG.getNode(AssertOp, Op.getValueType(), Op, DAG.getValueType(VT)); 2417 Op = DAG.getNode(ISD::TRUNCATE, VT, Op); 2418 } else { 2419 assert(MVT::isFloatingPoint(VT) && "Not int or FP?"); 2420 Op = DAG.getNode(ISD::FP_ROUND, VT, Op); 2421 } 2422 Ops.push_back(Op); 2423 break; 2424 } 2425 case Expand: 2426 if (VT != MVT::Vector) { 2427 // If this is a large integer, it needs to be reassembled from small 2428 // integers. Figure out what the source elt type is and how many small 2429 // integers it is. 2430 MVT::ValueType NVT = getTypeToTransformTo(VT); 2431 unsigned NumVals = MVT::getSizeInBits(VT)/MVT::getSizeInBits(NVT); 2432 if (NumVals == 2) { 2433 SDOperand Lo = SDOperand(Result, i++); 2434 SDOperand Hi = SDOperand(Result, i++); 2435 2436 if (!isLittleEndian()) 2437 std::swap(Lo, Hi); 2438 2439 Ops.push_back(DAG.getNode(ISD::BUILD_PAIR, VT, Lo, Hi)); 2440 } else { 2441 // Value scalarized into many values. Unimp for now. 2442 assert(0 && "Cannot expand i64 -> i16 yet!"); 2443 } 2444 } else { 2445 // Otherwise, this is a vector type. We only support legal vectors 2446 // right now. 2447 const PackedType *PTy = cast<PackedType>(I->getType()); 2448 unsigned NumElems = PTy->getNumElements(); 2449 const Type *EltTy = PTy->getElementType(); 2450 2451 // Figure out if there is a Packed type corresponding to this Vector 2452 // type. If so, convert to the packed type. 2453 MVT::ValueType TVT = MVT::getVectorType(getValueType(EltTy), NumElems); 2454 if (TVT != MVT::Other && isTypeLegal(TVT)) { 2455 SDOperand N = SDOperand(Result, i++); 2456 // Handle copies from generic vectors to registers. 2457 MVT::ValueType PTyElementVT, PTyLegalElementVT; 2458 unsigned NE = getPackedTypeBreakdown(PTy, PTyElementVT, 2459 PTyLegalElementVT); 2460 // Insert a VBIT_CONVERT of the FORMAL_ARGUMENTS to a 2461 // "N x PTyElementVT" MVT::Vector type. 2462 N = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, N, 2463 DAG.getConstant(NE, MVT::i32), 2464 DAG.getValueType(PTyElementVT)); 2465 Ops.push_back(N); 2466 } else { 2467 assert(0 && "Don't support illegal by-val vector arguments yet!"); 2468 } 2469 } 2470 break; 2471 } 2472 } 2473 return Ops; 2474} 2475 2476// It is always conservatively correct for llvm.returnaddress and 2477// llvm.frameaddress to return 0. 2478std::pair<SDOperand, SDOperand> 2479TargetLowering::LowerFrameReturnAddress(bool isFrameAddr, SDOperand Chain, 2480 unsigned Depth, SelectionDAG &DAG) { 2481 return std::make_pair(DAG.getConstant(0, getPointerTy()), Chain); 2482} 2483 2484SDOperand TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { 2485 assert(0 && "LowerOperation not implemented for this target!"); 2486 abort(); 2487 return SDOperand(); 2488} 2489 2490SDOperand TargetLowering::CustomPromoteOperation(SDOperand Op, 2491 SelectionDAG &DAG) { 2492 assert(0 && "CustomPromoteOperation not implemented for this target!"); 2493 abort(); 2494 return SDOperand(); 2495} 2496 2497void SelectionDAGLowering::visitFrameReturnAddress(CallInst &I, bool isFrame) { 2498 unsigned Depth = (unsigned)cast<ConstantUInt>(I.getOperand(1))->getValue(); 2499 std::pair<SDOperand,SDOperand> Result = 2500 TLI.LowerFrameReturnAddress(isFrame, getRoot(), Depth, DAG); 2501 setValue(&I, Result.first); 2502 DAG.setRoot(Result.second); 2503} 2504 2505/// getMemsetValue - Vectorized representation of the memset value 2506/// operand. 2507static SDOperand getMemsetValue(SDOperand Value, MVT::ValueType VT, 2508 SelectionDAG &DAG) { 2509 MVT::ValueType CurVT = VT; 2510 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) { 2511 uint64_t Val = C->getValue() & 255; 2512 unsigned Shift = 8; 2513 while (CurVT != MVT::i8) { 2514 Val = (Val << Shift) | Val; 2515 Shift <<= 1; 2516 CurVT = (MVT::ValueType)((unsigned)CurVT - 1); 2517 } 2518 return DAG.getConstant(Val, VT); 2519 } else { 2520 Value = DAG.getNode(ISD::ZERO_EXTEND, VT, Value); 2521 unsigned Shift = 8; 2522 while (CurVT != MVT::i8) { 2523 Value = 2524 DAG.getNode(ISD::OR, VT, 2525 DAG.getNode(ISD::SHL, VT, Value, 2526 DAG.getConstant(Shift, MVT::i8)), Value); 2527 Shift <<= 1; 2528 CurVT = (MVT::ValueType)((unsigned)CurVT - 1); 2529 } 2530 2531 return Value; 2532 } 2533} 2534 2535/// getMemsetStringVal - Similar to getMemsetValue. Except this is only 2536/// used when a memcpy is turned into a memset when the source is a constant 2537/// string ptr. 2538static SDOperand getMemsetStringVal(MVT::ValueType VT, 2539 SelectionDAG &DAG, TargetLowering &TLI, 2540 std::string &Str, unsigned Offset) { 2541 MVT::ValueType CurVT = VT; 2542 uint64_t Val = 0; 2543 unsigned MSB = getSizeInBits(VT) / 8; 2544 if (TLI.isLittleEndian()) 2545 Offset = Offset + MSB - 1; 2546 for (unsigned i = 0; i != MSB; ++i) { 2547 Val = (Val << 8) | Str[Offset]; 2548 Offset += TLI.isLittleEndian() ? -1 : 1; 2549 } 2550 return DAG.getConstant(Val, VT); 2551} 2552 2553/// getMemBasePlusOffset - Returns base and offset node for the 2554static SDOperand getMemBasePlusOffset(SDOperand Base, unsigned Offset, 2555 SelectionDAG &DAG, TargetLowering &TLI) { 2556 MVT::ValueType VT = Base.getValueType(); 2557 return DAG.getNode(ISD::ADD, VT, Base, DAG.getConstant(Offset, VT)); 2558} 2559 2560/// MeetsMaxMemopRequirement - Determines if the number of memory ops required 2561/// to replace the memset / memcpy is below the threshold. It also returns the 2562/// types of the sequence of memory ops to perform memset / memcpy. 2563static bool MeetsMaxMemopRequirement(std::vector<MVT::ValueType> &MemOps, 2564 unsigned Limit, uint64_t Size, 2565 unsigned Align, TargetLowering &TLI) { 2566 MVT::ValueType VT; 2567 2568 if (TLI.allowsUnalignedMemoryAccesses()) { 2569 VT = MVT::i64; 2570 } else { 2571 switch (Align & 7) { 2572 case 0: 2573 VT = MVT::i64; 2574 break; 2575 case 4: 2576 VT = MVT::i32; 2577 break; 2578 case 2: 2579 VT = MVT::i16; 2580 break; 2581 default: 2582 VT = MVT::i8; 2583 break; 2584 } 2585 } 2586 2587 MVT::ValueType LVT = MVT::i64; 2588 while (!TLI.isTypeLegal(LVT)) 2589 LVT = (MVT::ValueType)((unsigned)LVT - 1); 2590 assert(MVT::isInteger(LVT)); 2591 2592 if (VT > LVT) 2593 VT = LVT; 2594 2595 unsigned NumMemOps = 0; 2596 while (Size != 0) { 2597 unsigned VTSize = getSizeInBits(VT) / 8; 2598 while (VTSize > Size) { 2599 VT = (MVT::ValueType)((unsigned)VT - 1); 2600 VTSize >>= 1; 2601 } 2602 assert(MVT::isInteger(VT)); 2603 2604 if (++NumMemOps > Limit) 2605 return false; 2606 MemOps.push_back(VT); 2607 Size -= VTSize; 2608 } 2609 2610 return true; 2611} 2612 2613void SelectionDAGLowering::visitMemIntrinsic(CallInst &I, unsigned Op) { 2614 SDOperand Op1 = getValue(I.getOperand(1)); 2615 SDOperand Op2 = getValue(I.getOperand(2)); 2616 SDOperand Op3 = getValue(I.getOperand(3)); 2617 SDOperand Op4 = getValue(I.getOperand(4)); 2618 unsigned Align = (unsigned)cast<ConstantSDNode>(Op4)->getValue(); 2619 if (Align == 0) Align = 1; 2620 2621 if (ConstantSDNode *Size = dyn_cast<ConstantSDNode>(Op3)) { 2622 std::vector<MVT::ValueType> MemOps; 2623 2624 // Expand memset / memcpy to a series of load / store ops 2625 // if the size operand falls below a certain threshold. 2626 std::vector<SDOperand> OutChains; 2627 switch (Op) { 2628 default: break; // Do nothing for now. 2629 case ISD::MEMSET: { 2630 if (MeetsMaxMemopRequirement(MemOps, TLI.getMaxStoresPerMemset(), 2631 Size->getValue(), Align, TLI)) { 2632 unsigned NumMemOps = MemOps.size(); 2633 unsigned Offset = 0; 2634 for (unsigned i = 0; i < NumMemOps; i++) { 2635 MVT::ValueType VT = MemOps[i]; 2636 unsigned VTSize = getSizeInBits(VT) / 8; 2637 SDOperand Value = getMemsetValue(Op2, VT, DAG); 2638 SDOperand Store = DAG.getNode(ISD::STORE, MVT::Other, getRoot(), 2639 Value, 2640 getMemBasePlusOffset(Op1, Offset, DAG, TLI), 2641 DAG.getSrcValue(I.getOperand(1), Offset)); 2642 OutChains.push_back(Store); 2643 Offset += VTSize; 2644 } 2645 } 2646 break; 2647 } 2648 case ISD::MEMCPY: { 2649 if (MeetsMaxMemopRequirement(MemOps, TLI.getMaxStoresPerMemcpy(), 2650 Size->getValue(), Align, TLI)) { 2651 unsigned NumMemOps = MemOps.size(); 2652 unsigned SrcOff = 0, DstOff = 0, SrcDelta = 0; 2653 GlobalAddressSDNode *G = NULL; 2654 std::string Str; 2655 bool CopyFromStr = false; 2656 2657 if (Op2.getOpcode() == ISD::GlobalAddress) 2658 G = cast<GlobalAddressSDNode>(Op2); 2659 else if (Op2.getOpcode() == ISD::ADD && 2660 Op2.getOperand(0).getOpcode() == ISD::GlobalAddress && 2661 Op2.getOperand(1).getOpcode() == ISD::Constant) { 2662 G = cast<GlobalAddressSDNode>(Op2.getOperand(0)); 2663 SrcDelta = cast<ConstantSDNode>(Op2.getOperand(1))->getValue(); 2664 } 2665 if (G) { 2666 GlobalVariable *GV = dyn_cast<GlobalVariable>(G->getGlobal()); 2667 if (GV) { 2668 Str = GV->getStringValue(false); 2669 if (!Str.empty()) { 2670 CopyFromStr = true; 2671 SrcOff += SrcDelta; 2672 } 2673 } 2674 } 2675 2676 for (unsigned i = 0; i < NumMemOps; i++) { 2677 MVT::ValueType VT = MemOps[i]; 2678 unsigned VTSize = getSizeInBits(VT) / 8; 2679 SDOperand Value, Chain, Store; 2680 2681 if (CopyFromStr) { 2682 Value = getMemsetStringVal(VT, DAG, TLI, Str, SrcOff); 2683 Chain = getRoot(); 2684 Store = 2685 DAG.getNode(ISD::STORE, MVT::Other, Chain, Value, 2686 getMemBasePlusOffset(Op1, DstOff, DAG, TLI), 2687 DAG.getSrcValue(I.getOperand(1), DstOff)); 2688 } else { 2689 Value = DAG.getLoad(VT, getRoot(), 2690 getMemBasePlusOffset(Op2, SrcOff, DAG, TLI), 2691 DAG.getSrcValue(I.getOperand(2), SrcOff)); 2692 Chain = Value.getValue(1); 2693 Store = 2694 DAG.getNode(ISD::STORE, MVT::Other, Chain, Value, 2695 getMemBasePlusOffset(Op1, DstOff, DAG, TLI), 2696 DAG.getSrcValue(I.getOperand(1), DstOff)); 2697 } 2698 OutChains.push_back(Store); 2699 SrcOff += VTSize; 2700 DstOff += VTSize; 2701 } 2702 } 2703 break; 2704 } 2705 } 2706 2707 if (!OutChains.empty()) { 2708 DAG.setRoot(DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains)); 2709 return; 2710 } 2711 } 2712 2713 std::vector<SDOperand> Ops; 2714 Ops.push_back(getRoot()); 2715 Ops.push_back(Op1); 2716 Ops.push_back(Op2); 2717 Ops.push_back(Op3); 2718 Ops.push_back(Op4); 2719 DAG.setRoot(DAG.getNode(Op, MVT::Other, Ops)); 2720} 2721 2722//===----------------------------------------------------------------------===// 2723// SelectionDAGISel code 2724//===----------------------------------------------------------------------===// 2725 2726unsigned SelectionDAGISel::MakeReg(MVT::ValueType VT) { 2727 return RegMap->createVirtualRegister(TLI.getRegClassFor(VT)); 2728} 2729 2730void SelectionDAGISel::getAnalysisUsage(AnalysisUsage &AU) const { 2731 // FIXME: we only modify the CFG to split critical edges. This 2732 // updates dom and loop info. 2733} 2734 2735 2736/// InsertGEPComputeCode - Insert code into BB to compute Ptr+PtrOffset, 2737/// casting to the type of GEPI. 2738static Value *InsertGEPComputeCode(Value *&V, BasicBlock *BB, Instruction *GEPI, 2739 Value *Ptr, Value *PtrOffset) { 2740 if (V) return V; // Already computed. 2741 2742 BasicBlock::iterator InsertPt; 2743 if (BB == GEPI->getParent()) { 2744 // If insert into the GEP's block, insert right after the GEP. 2745 InsertPt = GEPI; 2746 ++InsertPt; 2747 } else { 2748 // Otherwise, insert at the top of BB, after any PHI nodes 2749 InsertPt = BB->begin(); 2750 while (isa<PHINode>(InsertPt)) ++InsertPt; 2751 } 2752 2753 // If Ptr is itself a cast, but in some other BB, emit a copy of the cast into 2754 // BB so that there is only one value live across basic blocks (the cast 2755 // operand). 2756 if (CastInst *CI = dyn_cast<CastInst>(Ptr)) 2757 if (CI->getParent() != BB && isa<PointerType>(CI->getOperand(0)->getType())) 2758 Ptr = new CastInst(CI->getOperand(0), CI->getType(), "", InsertPt); 2759 2760 // Add the offset, cast it to the right type. 2761 Ptr = BinaryOperator::createAdd(Ptr, PtrOffset, "", InsertPt); 2762 Ptr = new CastInst(Ptr, GEPI->getType(), "", InsertPt); 2763 return V = Ptr; 2764} 2765 2766/// OptimizeNoopCopyExpression - We have determined that the specified cast 2767/// instruction is a noop copy (e.g. it's casting from one pointer type to 2768/// another, int->uint, or int->sbyte on PPC. 2769static void OptimizeNoopCopyExpression(CastInst *CI) { 2770 BasicBlock *DefBB = CI->getParent(); 2771 2772 /// InsertedCasts - Only insert a cast in each block once. 2773 std::map<BasicBlock*, CastInst*> InsertedCasts; 2774 2775 for (Value::use_iterator UI = CI->use_begin(), E = CI->use_end(); 2776 UI != E; ) { 2777 Use &TheUse = UI.getUse(); 2778 Instruction *User = cast<Instruction>(*UI); 2779 2780 // Figure out which BB this cast is used in. For PHI's this is the 2781 // appropriate predecessor block. 2782 BasicBlock *UserBB = User->getParent(); 2783 if (PHINode *PN = dyn_cast<PHINode>(User)) { 2784 unsigned OpVal = UI.getOperandNo()/2; 2785 UserBB = PN->getIncomingBlock(OpVal); 2786 } 2787 2788 // Preincrement use iterator so we don't invalidate it. 2789 ++UI; 2790 2791 // If this user is in the same block as the cast, don't change the cast. 2792 if (UserBB == DefBB) continue; 2793 2794 // If we have already inserted a cast into this block, use it. 2795 CastInst *&InsertedCast = InsertedCasts[UserBB]; 2796 2797 if (!InsertedCast) { 2798 BasicBlock::iterator InsertPt = UserBB->begin(); 2799 while (isa<PHINode>(InsertPt)) ++InsertPt; 2800 2801 InsertedCast = 2802 new CastInst(CI->getOperand(0), CI->getType(), "", InsertPt); 2803 } 2804 2805 // Replace a use of the cast with a use of the new casat. 2806 TheUse = InsertedCast; 2807 } 2808} 2809 2810/// OptimizeGEPExpression - Since we are doing basic-block-at-a-time instruction 2811/// selection, we want to be a bit careful about some things. In particular, if 2812/// we have a GEP instruction that is used in a different block than it is 2813/// defined, the addressing expression of the GEP cannot be folded into loads or 2814/// stores that use it. In this case, decompose the GEP and move constant 2815/// indices into blocks that use it. 2816static void OptimizeGEPExpression(GetElementPtrInst *GEPI, 2817 const TargetData *TD) { 2818 // If this GEP is only used inside the block it is defined in, there is no 2819 // need to rewrite it. 2820 bool isUsedOutsideDefBB = false; 2821 BasicBlock *DefBB = GEPI->getParent(); 2822 for (Value::use_iterator UI = GEPI->use_begin(), E = GEPI->use_end(); 2823 UI != E; ++UI) { 2824 if (cast<Instruction>(*UI)->getParent() != DefBB) { 2825 isUsedOutsideDefBB = true; 2826 break; 2827 } 2828 } 2829 if (!isUsedOutsideDefBB) return; 2830 2831 // If this GEP has no non-zero constant indices, there is nothing we can do, 2832 // ignore it. 2833 bool hasConstantIndex = false; 2834 for (GetElementPtrInst::op_iterator OI = GEPI->op_begin()+1, 2835 E = GEPI->op_end(); OI != E; ++OI) { 2836 if (ConstantInt *CI = dyn_cast<ConstantInt>(*OI)) 2837 if (CI->getRawValue()) { 2838 hasConstantIndex = true; 2839 break; 2840 } 2841 } 2842 // If this is a GEP &Alloca, 0, 0, forward subst the frame index into uses. 2843 if (!hasConstantIndex && !isa<AllocaInst>(GEPI->getOperand(0))) return; 2844 2845 // Otherwise, decompose the GEP instruction into multiplies and adds. Sum the 2846 // constant offset (which we now know is non-zero) and deal with it later. 2847 uint64_t ConstantOffset = 0; 2848 const Type *UIntPtrTy = TD->getIntPtrType(); 2849 Value *Ptr = new CastInst(GEPI->getOperand(0), UIntPtrTy, "", GEPI); 2850 const Type *Ty = GEPI->getOperand(0)->getType(); 2851 2852 for (GetElementPtrInst::op_iterator OI = GEPI->op_begin()+1, 2853 E = GEPI->op_end(); OI != E; ++OI) { 2854 Value *Idx = *OI; 2855 if (const StructType *StTy = dyn_cast<StructType>(Ty)) { 2856 unsigned Field = cast<ConstantUInt>(Idx)->getValue(); 2857 if (Field) 2858 ConstantOffset += TD->getStructLayout(StTy)->MemberOffsets[Field]; 2859 Ty = StTy->getElementType(Field); 2860 } else { 2861 Ty = cast<SequentialType>(Ty)->getElementType(); 2862 2863 // Handle constant subscripts. 2864 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) { 2865 if (CI->getRawValue() == 0) continue; 2866 2867 if (ConstantSInt *CSI = dyn_cast<ConstantSInt>(CI)) 2868 ConstantOffset += (int64_t)TD->getTypeSize(Ty)*CSI->getValue(); 2869 else 2870 ConstantOffset+=TD->getTypeSize(Ty)*cast<ConstantUInt>(CI)->getValue(); 2871 continue; 2872 } 2873 2874 // Ptr = Ptr + Idx * ElementSize; 2875 2876 // Cast Idx to UIntPtrTy if needed. 2877 Idx = new CastInst(Idx, UIntPtrTy, "", GEPI); 2878 2879 uint64_t ElementSize = TD->getTypeSize(Ty); 2880 // Mask off bits that should not be set. 2881 ElementSize &= ~0ULL >> (64-UIntPtrTy->getPrimitiveSizeInBits()); 2882 Constant *SizeCst = ConstantUInt::get(UIntPtrTy, ElementSize); 2883 2884 // Multiply by the element size and add to the base. 2885 Idx = BinaryOperator::createMul(Idx, SizeCst, "", GEPI); 2886 Ptr = BinaryOperator::createAdd(Ptr, Idx, "", GEPI); 2887 } 2888 } 2889 2890 // Make sure that the offset fits in uintptr_t. 2891 ConstantOffset &= ~0ULL >> (64-UIntPtrTy->getPrimitiveSizeInBits()); 2892 Constant *PtrOffset = ConstantUInt::get(UIntPtrTy, ConstantOffset); 2893 2894 // Okay, we have now emitted all of the variable index parts to the BB that 2895 // the GEP is defined in. Loop over all of the using instructions, inserting 2896 // an "add Ptr, ConstantOffset" into each block that uses it and update the 2897 // instruction to use the newly computed value, making GEPI dead. When the 2898 // user is a load or store instruction address, we emit the add into the user 2899 // block, otherwise we use a canonical version right next to the gep (these 2900 // won't be foldable as addresses, so we might as well share the computation). 2901 2902 std::map<BasicBlock*,Value*> InsertedExprs; 2903 while (!GEPI->use_empty()) { 2904 Instruction *User = cast<Instruction>(GEPI->use_back()); 2905 2906 // If this use is not foldable into the addressing mode, use a version 2907 // emitted in the GEP block. 2908 Value *NewVal; 2909 if (!isa<LoadInst>(User) && 2910 (!isa<StoreInst>(User) || User->getOperand(0) == GEPI)) { 2911 NewVal = InsertGEPComputeCode(InsertedExprs[DefBB], DefBB, GEPI, 2912 Ptr, PtrOffset); 2913 } else { 2914 // Otherwise, insert the code in the User's block so it can be folded into 2915 // any users in that block. 2916 NewVal = InsertGEPComputeCode(InsertedExprs[User->getParent()], 2917 User->getParent(), GEPI, 2918 Ptr, PtrOffset); 2919 } 2920 User->replaceUsesOfWith(GEPI, NewVal); 2921 } 2922 2923 // Finally, the GEP is dead, remove it. 2924 GEPI->eraseFromParent(); 2925} 2926 2927bool SelectionDAGISel::runOnFunction(Function &Fn) { 2928 MachineFunction &MF = MachineFunction::construct(&Fn, TLI.getTargetMachine()); 2929 RegMap = MF.getSSARegMap(); 2930 DEBUG(std::cerr << "\n\n\n=== " << Fn.getName() << "\n"); 2931 2932 // First, split all critical edges for PHI nodes with incoming values that are 2933 // constants, this way the load of the constant into a vreg will not be placed 2934 // into MBBs that are used some other way. 2935 // 2936 // In this pass we also look for GEP and cast instructions that are used 2937 // across basic blocks and rewrite them to improve basic-block-at-a-time 2938 // selection. 2939 // 2940 // 2941 for (Function::iterator BB = Fn.begin(), E = Fn.end(); BB != E; ++BB) { 2942 PHINode *PN; 2943 BasicBlock::iterator BBI; 2944 for (BBI = BB->begin(); (PN = dyn_cast<PHINode>(BBI)); ++BBI) 2945 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 2946 if (isa<Constant>(PN->getIncomingValue(i))) 2947 SplitCriticalEdge(PN->getIncomingBlock(i), BB); 2948 2949 for (BasicBlock::iterator E = BB->end(); BBI != E; ) { 2950 Instruction *I = BBI++; 2951 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) { 2952 OptimizeGEPExpression(GEPI, TLI.getTargetData()); 2953 } else if (CastInst *CI = dyn_cast<CastInst>(I)) { 2954 // If this is a noop copy, sink it into user blocks to reduce the number 2955 // of virtual registers that must be created and coallesced. 2956 MVT::ValueType SrcVT = TLI.getValueType(CI->getOperand(0)->getType()); 2957 MVT::ValueType DstVT = TLI.getValueType(CI->getType()); 2958 2959 // This is an fp<->int conversion? 2960 if (MVT::isInteger(SrcVT) != MVT::isInteger(DstVT)) 2961 continue; 2962 2963 // If this is an extension, it will be a zero or sign extension, which 2964 // isn't a noop. 2965 if (SrcVT < DstVT) continue; 2966 2967 // If these values will be promoted, find out what they will be promoted 2968 // to. This helps us consider truncates on PPC as noop copies when they 2969 // are. 2970 if (TLI.getTypeAction(SrcVT) == TargetLowering::Promote) 2971 SrcVT = TLI.getTypeToTransformTo(SrcVT); 2972 if (TLI.getTypeAction(DstVT) == TargetLowering::Promote) 2973 DstVT = TLI.getTypeToTransformTo(DstVT); 2974 2975 // If, after promotion, these are the same types, this is a noop copy. 2976 if (SrcVT == DstVT) 2977 OptimizeNoopCopyExpression(CI); 2978 } 2979 } 2980 } 2981 2982 FunctionLoweringInfo FuncInfo(TLI, Fn, MF); 2983 2984 for (Function::iterator I = Fn.begin(), E = Fn.end(); I != E; ++I) 2985 SelectBasicBlock(I, MF, FuncInfo); 2986 2987 return true; 2988} 2989 2990 2991SDOperand SelectionDAGISel:: 2992CopyValueToVirtualRegister(SelectionDAGLowering &SDL, Value *V, unsigned Reg) { 2993 SDOperand Op = SDL.getValue(V); 2994 assert((Op.getOpcode() != ISD::CopyFromReg || 2995 cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) && 2996 "Copy from a reg to the same reg!"); 2997 2998 // If this type is not legal, we must make sure to not create an invalid 2999 // register use. 3000 MVT::ValueType SrcVT = Op.getValueType(); 3001 MVT::ValueType DestVT = TLI.getTypeToTransformTo(SrcVT); 3002 SelectionDAG &DAG = SDL.DAG; 3003 if (SrcVT == DestVT) { 3004 return DAG.getCopyToReg(SDL.getRoot(), Reg, Op); 3005 } else if (SrcVT == MVT::Vector) { 3006 // Handle copies from generic vectors to registers. 3007 MVT::ValueType PTyElementVT, PTyLegalElementVT; 3008 unsigned NE = TLI.getPackedTypeBreakdown(cast<PackedType>(V->getType()), 3009 PTyElementVT, PTyLegalElementVT); 3010 3011 // Insert a VBIT_CONVERT of the input vector to a "N x PTyElementVT" 3012 // MVT::Vector type. 3013 Op = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, Op, 3014 DAG.getConstant(NE, MVT::i32), 3015 DAG.getValueType(PTyElementVT)); 3016 3017 // Loop over all of the elements of the resultant vector, 3018 // VEXTRACT_VECTOR_ELT'ing them, converting them to PTyLegalElementVT, then 3019 // copying them into output registers. 3020 std::vector<SDOperand> OutChains; 3021 SDOperand Root = SDL.getRoot(); 3022 for (unsigned i = 0; i != NE; ++i) { 3023 SDOperand Elt = DAG.getNode(ISD::VEXTRACT_VECTOR_ELT, PTyElementVT, 3024 Op, DAG.getConstant(i, MVT::i32)); 3025 if (PTyElementVT == PTyLegalElementVT) { 3026 // Elements are legal. 3027 OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Elt)); 3028 } else if (PTyLegalElementVT > PTyElementVT) { 3029 // Elements are promoted. 3030 if (MVT::isFloatingPoint(PTyLegalElementVT)) 3031 Elt = DAG.getNode(ISD::FP_EXTEND, PTyLegalElementVT, Elt); 3032 else 3033 Elt = DAG.getNode(ISD::ANY_EXTEND, PTyLegalElementVT, Elt); 3034 OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Elt)); 3035 } else { 3036 // Elements are expanded. 3037 // The src value is expanded into multiple registers. 3038 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, PTyLegalElementVT, 3039 Elt, DAG.getConstant(0, MVT::i32)); 3040 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, PTyLegalElementVT, 3041 Elt, DAG.getConstant(1, MVT::i32)); 3042 OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Lo)); 3043 OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Hi)); 3044 } 3045 } 3046 return DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains); 3047 } else if (SrcVT < DestVT) { 3048 // The src value is promoted to the register. 3049 if (MVT::isFloatingPoint(SrcVT)) 3050 Op = DAG.getNode(ISD::FP_EXTEND, DestVT, Op); 3051 else 3052 Op = DAG.getNode(ISD::ANY_EXTEND, DestVT, Op); 3053 return DAG.getCopyToReg(SDL.getRoot(), Reg, Op); 3054 } else { 3055 // The src value is expanded into multiple registers. 3056 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DestVT, 3057 Op, DAG.getConstant(0, MVT::i32)); 3058 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DestVT, 3059 Op, DAG.getConstant(1, MVT::i32)); 3060 Op = DAG.getCopyToReg(SDL.getRoot(), Reg, Lo); 3061 return DAG.getCopyToReg(Op, Reg+1, Hi); 3062 } 3063} 3064 3065void SelectionDAGISel:: 3066LowerArguments(BasicBlock *BB, SelectionDAGLowering &SDL, 3067 std::vector<SDOperand> &UnorderedChains) { 3068 // If this is the entry block, emit arguments. 3069 Function &F = *BB->getParent(); 3070 FunctionLoweringInfo &FuncInfo = SDL.FuncInfo; 3071 SDOperand OldRoot = SDL.DAG.getRoot(); 3072 std::vector<SDOperand> Args = TLI.LowerArguments(F, SDL.DAG); 3073 3074 unsigned a = 0; 3075 for (Function::arg_iterator AI = F.arg_begin(), E = F.arg_end(); 3076 AI != E; ++AI, ++a) 3077 if (!AI->use_empty()) { 3078 SDL.setValue(AI, Args[a]); 3079 3080 // If this argument is live outside of the entry block, insert a copy from 3081 // whereever we got it to the vreg that other BB's will reference it as. 3082 if (FuncInfo.ValueMap.count(AI)) { 3083 SDOperand Copy = 3084 CopyValueToVirtualRegister(SDL, AI, FuncInfo.ValueMap[AI]); 3085 UnorderedChains.push_back(Copy); 3086 } 3087 } 3088 3089 // Next, if the function has live ins that need to be copied into vregs, 3090 // emit the copies now, into the top of the block. 3091 MachineFunction &MF = SDL.DAG.getMachineFunction(); 3092 if (MF.livein_begin() != MF.livein_end()) { 3093 SSARegMap *RegMap = MF.getSSARegMap(); 3094 const MRegisterInfo &MRI = *MF.getTarget().getRegisterInfo(); 3095 for (MachineFunction::livein_iterator LI = MF.livein_begin(), 3096 E = MF.livein_end(); LI != E; ++LI) 3097 if (LI->second) 3098 MRI.copyRegToReg(*MF.begin(), MF.begin()->end(), LI->second, 3099 LI->first, RegMap->getRegClass(LI->second)); 3100 } 3101 3102 // Finally, if the target has anything special to do, allow it to do so. 3103 EmitFunctionEntryCode(F, SDL.DAG.getMachineFunction()); 3104} 3105 3106 3107void SelectionDAGISel::BuildSelectionDAG(SelectionDAG &DAG, BasicBlock *LLVMBB, 3108 std::vector<std::pair<MachineInstr*, unsigned> > &PHINodesToUpdate, 3109 FunctionLoweringInfo &FuncInfo) { 3110 SelectionDAGLowering SDL(DAG, TLI, FuncInfo); 3111 3112 std::vector<SDOperand> UnorderedChains; 3113 3114 // Lower any arguments needed in this block if this is the entry block. 3115 if (LLVMBB == &LLVMBB->getParent()->front()) 3116 LowerArguments(LLVMBB, SDL, UnorderedChains); 3117 3118 BB = FuncInfo.MBBMap[LLVMBB]; 3119 SDL.setCurrentBasicBlock(BB); 3120 3121 // Lower all of the non-terminator instructions. 3122 for (BasicBlock::iterator I = LLVMBB->begin(), E = --LLVMBB->end(); 3123 I != E; ++I) 3124 SDL.visit(*I); 3125 3126 // Ensure that all instructions which are used outside of their defining 3127 // blocks are available as virtual registers. 3128 for (BasicBlock::iterator I = LLVMBB->begin(), E = LLVMBB->end(); I != E;++I) 3129 if (!I->use_empty() && !isa<PHINode>(I)) { 3130 std::map<const Value*, unsigned>::iterator VMI =FuncInfo.ValueMap.find(I); 3131 if (VMI != FuncInfo.ValueMap.end()) 3132 UnorderedChains.push_back( 3133 CopyValueToVirtualRegister(SDL, I, VMI->second)); 3134 } 3135 3136 // Handle PHI nodes in successor blocks. Emit code into the SelectionDAG to 3137 // ensure constants are generated when needed. Remember the virtual registers 3138 // that need to be added to the Machine PHI nodes as input. We cannot just 3139 // directly add them, because expansion might result in multiple MBB's for one 3140 // BB. As such, the start of the BB might correspond to a different MBB than 3141 // the end. 3142 // 3143 3144 // Emit constants only once even if used by multiple PHI nodes. 3145 std::map<Constant*, unsigned> ConstantsOut; 3146 3147 // Check successor nodes PHI nodes that expect a constant to be available from 3148 // this block. 3149 TerminatorInst *TI = LLVMBB->getTerminator(); 3150 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) { 3151 BasicBlock *SuccBB = TI->getSuccessor(succ); 3152 MachineBasicBlock::iterator MBBI = FuncInfo.MBBMap[SuccBB]->begin(); 3153 PHINode *PN; 3154 3155 // At this point we know that there is a 1-1 correspondence between LLVM PHI 3156 // nodes and Machine PHI nodes, but the incoming operands have not been 3157 // emitted yet. 3158 for (BasicBlock::iterator I = SuccBB->begin(); 3159 (PN = dyn_cast<PHINode>(I)); ++I) 3160 if (!PN->use_empty()) { 3161 unsigned Reg; 3162 Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB); 3163 if (Constant *C = dyn_cast<Constant>(PHIOp)) { 3164 unsigned &RegOut = ConstantsOut[C]; 3165 if (RegOut == 0) { 3166 RegOut = FuncInfo.CreateRegForValue(C); 3167 UnorderedChains.push_back( 3168 CopyValueToVirtualRegister(SDL, C, RegOut)); 3169 } 3170 Reg = RegOut; 3171 } else { 3172 Reg = FuncInfo.ValueMap[PHIOp]; 3173 if (Reg == 0) { 3174 assert(isa<AllocaInst>(PHIOp) && 3175 FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) && 3176 "Didn't codegen value into a register!??"); 3177 Reg = FuncInfo.CreateRegForValue(PHIOp); 3178 UnorderedChains.push_back( 3179 CopyValueToVirtualRegister(SDL, PHIOp, Reg)); 3180 } 3181 } 3182 3183 // Remember that this register needs to added to the machine PHI node as 3184 // the input for this MBB. 3185 MVT::ValueType VT = TLI.getValueType(PN->getType()); 3186 unsigned NumElements; 3187 if (VT != MVT::Vector) 3188 NumElements = TLI.getNumElements(VT); 3189 else { 3190 MVT::ValueType VT1,VT2; 3191 NumElements = 3192 TLI.getPackedTypeBreakdown(cast<PackedType>(PN->getType()), 3193 VT1, VT2); 3194 } 3195 for (unsigned i = 0, e = NumElements; i != e; ++i) 3196 PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg+i)); 3197 } 3198 } 3199 ConstantsOut.clear(); 3200 3201 // Turn all of the unordered chains into one factored node. 3202 if (!UnorderedChains.empty()) { 3203 SDOperand Root = SDL.getRoot(); 3204 if (Root.getOpcode() != ISD::EntryToken) { 3205 unsigned i = 0, e = UnorderedChains.size(); 3206 for (; i != e; ++i) { 3207 assert(UnorderedChains[i].Val->getNumOperands() > 1); 3208 if (UnorderedChains[i].Val->getOperand(0) == Root) 3209 break; // Don't add the root if we already indirectly depend on it. 3210 } 3211 3212 if (i == e) 3213 UnorderedChains.push_back(Root); 3214 } 3215 DAG.setRoot(DAG.getNode(ISD::TokenFactor, MVT::Other, UnorderedChains)); 3216 } 3217 3218 // Lower the terminator after the copies are emitted. 3219 SDL.visit(*LLVMBB->getTerminator()); 3220 3221 // Copy over any CaseBlock records that may now exist due to SwitchInst 3222 // lowering, as well as any jump table information. 3223 SwitchCases.clear(); 3224 SwitchCases = SDL.SwitchCases; 3225 JT = SDL.JT; 3226 3227 // Make sure the root of the DAG is up-to-date. 3228 DAG.setRoot(SDL.getRoot()); 3229} 3230 3231void SelectionDAGISel::CodeGenAndEmitDAG(SelectionDAG &DAG) { 3232 // Run the DAG combiner in pre-legalize mode. 3233 DAG.Combine(false); 3234 3235 DEBUG(std::cerr << "Lowered selection DAG:\n"); 3236 DEBUG(DAG.dump()); 3237 3238 // Second step, hack on the DAG until it only uses operations and types that 3239 // the target supports. 3240 DAG.Legalize(); 3241 3242 DEBUG(std::cerr << "Legalized selection DAG:\n"); 3243 DEBUG(DAG.dump()); 3244 3245 // Run the DAG combiner in post-legalize mode. 3246 DAG.Combine(true); 3247 3248 if (ViewISelDAGs) DAG.viewGraph(); 3249 3250 // Third, instruction select all of the operations to machine code, adding the 3251 // code to the MachineBasicBlock. 3252 InstructionSelectBasicBlock(DAG); 3253 3254 DEBUG(std::cerr << "Selected machine code:\n"); 3255 DEBUG(BB->dump()); 3256} 3257 3258void SelectionDAGISel::SelectBasicBlock(BasicBlock *LLVMBB, MachineFunction &MF, 3259 FunctionLoweringInfo &FuncInfo) { 3260 std::vector<std::pair<MachineInstr*, unsigned> > PHINodesToUpdate; 3261 { 3262 SelectionDAG DAG(TLI, MF, getAnalysisToUpdate<MachineDebugInfo>()); 3263 CurDAG = &DAG; 3264 3265 // First step, lower LLVM code to some DAG. This DAG may use operations and 3266 // types that are not supported by the target. 3267 BuildSelectionDAG(DAG, LLVMBB, PHINodesToUpdate, FuncInfo); 3268 3269 // Second step, emit the lowered DAG as machine code. 3270 CodeGenAndEmitDAG(DAG); 3271 } 3272 3273 // Next, now that we know what the last MBB the LLVM BB expanded is, update 3274 // PHI nodes in successors. 3275 if (SwitchCases.empty() && JT.Reg == 0) { 3276 for (unsigned i = 0, e = PHINodesToUpdate.size(); i != e; ++i) { 3277 MachineInstr *PHI = PHINodesToUpdate[i].first; 3278 assert(PHI->getOpcode() == TargetInstrInfo::PHI && 3279 "This is not a machine PHI node that we are updating!"); 3280 PHI->addRegOperand(PHINodesToUpdate[i].second); 3281 PHI->addMachineBasicBlockOperand(BB); 3282 } 3283 return; 3284 } 3285 3286 // If the JumpTable record is filled in, then we need to emit a jump table. 3287 // Updating the PHI nodes is tricky in this case, since we need to determine 3288 // whether the PHI is a successor of the range check MBB or the jump table MBB 3289 if (JT.Reg) { 3290 assert(SwitchCases.empty() && "Cannot have jump table and lowered switch"); 3291 SelectionDAG SDAG(TLI, MF, getAnalysisToUpdate<MachineDebugInfo>()); 3292 CurDAG = &SDAG; 3293 SelectionDAGLowering SDL(SDAG, TLI, FuncInfo); 3294 MachineBasicBlock *RangeBB = BB; 3295 // Set the current basic block to the mbb we wish to insert the code into 3296 BB = JT.MBB; 3297 SDL.setCurrentBasicBlock(BB); 3298 // Emit the code 3299 SDL.visitJumpTable(JT); 3300 SDAG.setRoot(SDL.getRoot()); 3301 CodeGenAndEmitDAG(SDAG); 3302 // Update PHI Nodes 3303 for (unsigned pi = 0, pe = PHINodesToUpdate.size(); pi != pe; ++pi) { 3304 MachineInstr *PHI = PHINodesToUpdate[pi].first; 3305 MachineBasicBlock *PHIBB = PHI->getParent(); 3306 assert(PHI->getOpcode() == TargetInstrInfo::PHI && 3307 "This is not a machine PHI node that we are updating!"); 3308 if (PHIBB == JT.Default) { 3309 PHI->addRegOperand(PHINodesToUpdate[pi].second); 3310 PHI->addMachineBasicBlockOperand(RangeBB); 3311 } 3312 if (BB->succ_end() != std::find(BB->succ_begin(),BB->succ_end(), PHIBB)) { 3313 PHI->addRegOperand(PHINodesToUpdate[pi].second); 3314 PHI->addMachineBasicBlockOperand(BB); 3315 } 3316 } 3317 return; 3318 } 3319 3320 // If we generated any switch lowering information, build and codegen any 3321 // additional DAGs necessary. 3322 for(unsigned i = 0, e = SwitchCases.size(); i != e; ++i) { 3323 SelectionDAG SDAG(TLI, MF, getAnalysisToUpdate<MachineDebugInfo>()); 3324 CurDAG = &SDAG; 3325 SelectionDAGLowering SDL(SDAG, TLI, FuncInfo); 3326 // Set the current basic block to the mbb we wish to insert the code into 3327 BB = SwitchCases[i].ThisBB; 3328 SDL.setCurrentBasicBlock(BB); 3329 // Emit the code 3330 SDL.visitSwitchCase(SwitchCases[i]); 3331 SDAG.setRoot(SDL.getRoot()); 3332 CodeGenAndEmitDAG(SDAG); 3333 // Iterate over the phi nodes, if there is a phi node in a successor of this 3334 // block (for instance, the default block), then add a pair of operands to 3335 // the phi node for this block, as if we were coming from the original 3336 // BB before switch expansion. 3337 for (unsigned pi = 0, pe = PHINodesToUpdate.size(); pi != pe; ++pi) { 3338 MachineInstr *PHI = PHINodesToUpdate[pi].first; 3339 MachineBasicBlock *PHIBB = PHI->getParent(); 3340 assert(PHI->getOpcode() == TargetInstrInfo::PHI && 3341 "This is not a machine PHI node that we are updating!"); 3342 if (PHIBB == SwitchCases[i].LHSBB || PHIBB == SwitchCases[i].RHSBB) { 3343 PHI->addRegOperand(PHINodesToUpdate[pi].second); 3344 PHI->addMachineBasicBlockOperand(BB); 3345 } 3346 } 3347 } 3348} 3349 3350//===----------------------------------------------------------------------===// 3351/// ScheduleAndEmitDAG - Pick a safe ordering and emit instructions for each 3352/// target node in the graph. 3353void SelectionDAGISel::ScheduleAndEmitDAG(SelectionDAG &DAG) { 3354 if (ViewSchedDAGs) DAG.viewGraph(); 3355 ScheduleDAG *SL = NULL; 3356 3357 switch (ISHeuristic) { 3358 default: assert(0 && "Unrecognized scheduling heuristic"); 3359 case defaultScheduling: 3360 if (TLI.getSchedulingPreference() == TargetLowering::SchedulingForLatency) 3361 SL = createTDListDAGScheduler(DAG, BB, CreateTargetHazardRecognizer()); 3362 else { 3363 assert(TLI.getSchedulingPreference() == 3364 TargetLowering::SchedulingForRegPressure && "Unknown sched type!"); 3365 SL = createBURRListDAGScheduler(DAG, BB); 3366 } 3367 break; 3368 case noScheduling: 3369 SL = createBFS_DAGScheduler(DAG, BB); 3370 break; 3371 case simpleScheduling: 3372 SL = createSimpleDAGScheduler(false, DAG, BB); 3373 break; 3374 case simpleNoItinScheduling: 3375 SL = createSimpleDAGScheduler(true, DAG, BB); 3376 break; 3377 case listSchedulingBURR: 3378 SL = createBURRListDAGScheduler(DAG, BB); 3379 break; 3380 case listSchedulingTD: 3381 SL = createTDListDAGScheduler(DAG, BB, CreateTargetHazardRecognizer()); 3382 break; 3383 } 3384 BB = SL->Run(); 3385 delete SL; 3386} 3387 3388HazardRecognizer *SelectionDAGISel::CreateTargetHazardRecognizer() { 3389 return new HazardRecognizer(); 3390} 3391 3392/// SelectInlineAsmMemoryOperands - Calls to this are automatically generated 3393/// by tblgen. Others should not call it. 3394void SelectionDAGISel:: 3395SelectInlineAsmMemoryOperands(std::vector<SDOperand> &Ops, SelectionDAG &DAG) { 3396 std::vector<SDOperand> InOps; 3397 std::swap(InOps, Ops); 3398 3399 Ops.push_back(InOps[0]); // input chain. 3400 Ops.push_back(InOps[1]); // input asm string. 3401 3402 const char *AsmStr = cast<ExternalSymbolSDNode>(InOps[1])->getSymbol(); 3403 unsigned i = 2, e = InOps.size(); 3404 if (InOps[e-1].getValueType() == MVT::Flag) 3405 --e; // Don't process a flag operand if it is here. 3406 3407 while (i != e) { 3408 unsigned Flags = cast<ConstantSDNode>(InOps[i])->getValue(); 3409 if ((Flags & 7) != 4 /*MEM*/) { 3410 // Just skip over this operand, copying the operands verbatim. 3411 Ops.insert(Ops.end(), InOps.begin()+i, InOps.begin()+i+(Flags >> 3) + 1); 3412 i += (Flags >> 3) + 1; 3413 } else { 3414 assert((Flags >> 3) == 1 && "Memory operand with multiple values?"); 3415 // Otherwise, this is a memory operand. Ask the target to select it. 3416 std::vector<SDOperand> SelOps; 3417 if (SelectInlineAsmMemoryOperand(InOps[i+1], 'm', SelOps, DAG)) { 3418 std::cerr << "Could not match memory address. Inline asm failure!\n"; 3419 exit(1); 3420 } 3421 3422 // Add this to the output node. 3423 Ops.push_back(DAG.getConstant(4/*MEM*/ | (SelOps.size() << 3), MVT::i32)); 3424 Ops.insert(Ops.end(), SelOps.begin(), SelOps.end()); 3425 i += 2; 3426 } 3427 } 3428 3429 // Add the flag input back if present. 3430 if (e != InOps.size()) 3431 Ops.push_back(InOps.back()); 3432} 3433