SelectionDAGISel.cpp revision f8814cf8b8ba8953add60078e304fd5a4113f9cc
1//===-- SelectionDAGISel.cpp - Implement the SelectionDAGISel class -------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file was developed by the LLVM research group and is distributed under 6// the University of Illinois Open Source License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This implements the SelectionDAGISel class. 11// 12//===----------------------------------------------------------------------===// 13 14#define DEBUG_TYPE "isel" 15#include "llvm/CodeGen/SelectionDAGISel.h" 16#include "llvm/CodeGen/ScheduleDAG.h" 17#include "llvm/CallingConv.h" 18#include "llvm/Constants.h" 19#include "llvm/DerivedTypes.h" 20#include "llvm/Function.h" 21#include "llvm/GlobalVariable.h" 22#include "llvm/InlineAsm.h" 23#include "llvm/Instructions.h" 24#include "llvm/Intrinsics.h" 25#include "llvm/IntrinsicInst.h" 26#include "llvm/CodeGen/IntrinsicLowering.h" 27#include "llvm/CodeGen/MachineDebugInfo.h" 28#include "llvm/CodeGen/MachineFunction.h" 29#include "llvm/CodeGen/MachineFrameInfo.h" 30#include "llvm/CodeGen/MachineInstrBuilder.h" 31#include "llvm/CodeGen/SelectionDAG.h" 32#include "llvm/CodeGen/SSARegMap.h" 33#include "llvm/Target/MRegisterInfo.h" 34#include "llvm/Target/TargetData.h" 35#include "llvm/Target/TargetFrameInfo.h" 36#include "llvm/Target/TargetInstrInfo.h" 37#include "llvm/Target/TargetLowering.h" 38#include "llvm/Target/TargetMachine.h" 39#include "llvm/Transforms/Utils/BasicBlockUtils.h" 40#include "llvm/Support/CommandLine.h" 41#include "llvm/Support/MathExtras.h" 42#include "llvm/Support/Debug.h" 43#include <map> 44#include <set> 45#include <iostream> 46#include <algorithm> 47using namespace llvm; 48 49#ifndef NDEBUG 50static cl::opt<bool> 51ViewISelDAGs("view-isel-dags", cl::Hidden, 52 cl::desc("Pop up a window to show isel dags as they are selected")); 53static cl::opt<bool> 54ViewSchedDAGs("view-sched-dags", cl::Hidden, 55 cl::desc("Pop up a window to show sched dags as they are processed")); 56#else 57static const bool ViewISelDAGs = 0; 58static const bool ViewSchedDAGs = 0; 59#endif 60 61// Scheduling heuristics 62enum SchedHeuristics { 63 defaultScheduling, // Let the target specify its preference. 64 noScheduling, // No scheduling, emit breadth first sequence. 65 simpleScheduling, // Two pass, min. critical path, max. utilization. 66 simpleNoItinScheduling, // Same as above exact using generic latency. 67 listSchedulingBURR, // Bottom up reg reduction list scheduling. 68 listSchedulingTD // Top-down list scheduler. 69}; 70 71namespace { 72 cl::opt<SchedHeuristics> 73 ISHeuristic( 74 "sched", 75 cl::desc("Choose scheduling style"), 76 cl::init(defaultScheduling), 77 cl::values( 78 clEnumValN(defaultScheduling, "default", 79 "Target preferred scheduling style"), 80 clEnumValN(noScheduling, "none", 81 "No scheduling: breadth first sequencing"), 82 clEnumValN(simpleScheduling, "simple", 83 "Simple two pass scheduling: minimize critical path " 84 "and maximize processor utilization"), 85 clEnumValN(simpleNoItinScheduling, "simple-noitin", 86 "Simple two pass scheduling: Same as simple " 87 "except using generic latency"), 88 clEnumValN(listSchedulingBURR, "list-burr", 89 "Bottom up register reduction list scheduling"), 90 clEnumValN(listSchedulingTD, "list-td", 91 "Top-down list scheduler"), 92 clEnumValEnd)); 93} // namespace 94 95namespace { 96 /// RegsForValue - This struct represents the physical registers that a 97 /// particular value is assigned and the type information about the value. 98 /// This is needed because values can be promoted into larger registers and 99 /// expanded into multiple smaller registers than the value. 100 struct RegsForValue { 101 /// Regs - This list hold the register (for legal and promoted values) 102 /// or register set (for expanded values) that the value should be assigned 103 /// to. 104 std::vector<unsigned> Regs; 105 106 /// RegVT - The value type of each register. 107 /// 108 MVT::ValueType RegVT; 109 110 /// ValueVT - The value type of the LLVM value, which may be promoted from 111 /// RegVT or made from merging the two expanded parts. 112 MVT::ValueType ValueVT; 113 114 RegsForValue() : RegVT(MVT::Other), ValueVT(MVT::Other) {} 115 116 RegsForValue(unsigned Reg, MVT::ValueType regvt, MVT::ValueType valuevt) 117 : RegVT(regvt), ValueVT(valuevt) { 118 Regs.push_back(Reg); 119 } 120 RegsForValue(const std::vector<unsigned> ®s, 121 MVT::ValueType regvt, MVT::ValueType valuevt) 122 : Regs(regs), RegVT(regvt), ValueVT(valuevt) { 123 } 124 125 /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from 126 /// this value and returns the result as a ValueVT value. This uses 127 /// Chain/Flag as the input and updates them for the output Chain/Flag. 128 SDOperand getCopyFromRegs(SelectionDAG &DAG, 129 SDOperand &Chain, SDOperand &Flag) const; 130 131 /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the 132 /// specified value into the registers specified by this object. This uses 133 /// Chain/Flag as the input and updates them for the output Chain/Flag. 134 void getCopyToRegs(SDOperand Val, SelectionDAG &DAG, 135 SDOperand &Chain, SDOperand &Flag) const; 136 137 /// AddInlineAsmOperands - Add this value to the specified inlineasm node 138 /// operand list. This adds the code marker and includes the number of 139 /// values added into it. 140 void AddInlineAsmOperands(unsigned Code, SelectionDAG &DAG, 141 std::vector<SDOperand> &Ops) const; 142 }; 143} 144 145namespace llvm { 146 //===--------------------------------------------------------------------===// 147 /// FunctionLoweringInfo - This contains information that is global to a 148 /// function that is used when lowering a region of the function. 149 class FunctionLoweringInfo { 150 public: 151 TargetLowering &TLI; 152 Function &Fn; 153 MachineFunction &MF; 154 SSARegMap *RegMap; 155 156 FunctionLoweringInfo(TargetLowering &TLI, Function &Fn,MachineFunction &MF); 157 158 /// MBBMap - A mapping from LLVM basic blocks to their machine code entry. 159 std::map<const BasicBlock*, MachineBasicBlock *> MBBMap; 160 161 /// ValueMap - Since we emit code for the function a basic block at a time, 162 /// we must remember which virtual registers hold the values for 163 /// cross-basic-block values. 164 std::map<const Value*, unsigned> ValueMap; 165 166 /// StaticAllocaMap - Keep track of frame indices for fixed sized allocas in 167 /// the entry block. This allows the allocas to be efficiently referenced 168 /// anywhere in the function. 169 std::map<const AllocaInst*, int> StaticAllocaMap; 170 171 unsigned MakeReg(MVT::ValueType VT) { 172 return RegMap->createVirtualRegister(TLI.getRegClassFor(VT)); 173 } 174 175 unsigned CreateRegForValue(const Value *V); 176 177 unsigned InitializeRegForValue(const Value *V) { 178 unsigned &R = ValueMap[V]; 179 assert(R == 0 && "Already initialized this value register!"); 180 return R = CreateRegForValue(V); 181 } 182 }; 183} 184 185/// isUsedOutsideOfDefiningBlock - Return true if this instruction is used by 186/// PHI nodes or outside of the basic block that defines it, or used by a 187/// switch instruction, which may expand to multiple basic blocks. 188static bool isUsedOutsideOfDefiningBlock(Instruction *I) { 189 if (isa<PHINode>(I)) return true; 190 BasicBlock *BB = I->getParent(); 191 for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E; ++UI) 192 if (cast<Instruction>(*UI)->getParent() != BB || isa<PHINode>(*UI) || 193 isa<SwitchInst>(*UI)) 194 return true; 195 return false; 196} 197 198/// isOnlyUsedInEntryBlock - If the specified argument is only used in the 199/// entry block, return true. This includes arguments used by switches, since 200/// the switch may expand into multiple basic blocks. 201static bool isOnlyUsedInEntryBlock(Argument *A) { 202 BasicBlock *Entry = A->getParent()->begin(); 203 for (Value::use_iterator UI = A->use_begin(), E = A->use_end(); UI != E; ++UI) 204 if (cast<Instruction>(*UI)->getParent() != Entry || isa<SwitchInst>(*UI)) 205 return false; // Use not in entry block. 206 return true; 207} 208 209FunctionLoweringInfo::FunctionLoweringInfo(TargetLowering &tli, 210 Function &fn, MachineFunction &mf) 211 : TLI(tli), Fn(fn), MF(mf), RegMap(MF.getSSARegMap()) { 212 213 // Create a vreg for each argument register that is not dead and is used 214 // outside of the entry block for the function. 215 for (Function::arg_iterator AI = Fn.arg_begin(), E = Fn.arg_end(); 216 AI != E; ++AI) 217 if (!isOnlyUsedInEntryBlock(AI)) 218 InitializeRegForValue(AI); 219 220 // Initialize the mapping of values to registers. This is only set up for 221 // instruction values that are used outside of the block that defines 222 // them. 223 Function::iterator BB = Fn.begin(), EB = Fn.end(); 224 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) 225 if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) 226 if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(AI->getArraySize())) { 227 const Type *Ty = AI->getAllocatedType(); 228 uint64_t TySize = TLI.getTargetData().getTypeSize(Ty); 229 unsigned Align = 230 std::max((unsigned)TLI.getTargetData().getTypeAlignment(Ty), 231 AI->getAlignment()); 232 233 // If the alignment of the value is smaller than the size of the value, 234 // and if the size of the value is particularly small (<= 8 bytes), 235 // round up to the size of the value for potentially better performance. 236 // 237 // FIXME: This could be made better with a preferred alignment hook in 238 // TargetData. It serves primarily to 8-byte align doubles for X86. 239 if (Align < TySize && TySize <= 8) Align = TySize; 240 TySize *= CUI->getValue(); // Get total allocated size. 241 if (TySize == 0) TySize = 1; // Don't create zero-sized stack objects. 242 StaticAllocaMap[AI] = 243 MF.getFrameInfo()->CreateStackObject((unsigned)TySize, Align); 244 } 245 246 for (; BB != EB; ++BB) 247 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) 248 if (!I->use_empty() && isUsedOutsideOfDefiningBlock(I)) 249 if (!isa<AllocaInst>(I) || 250 !StaticAllocaMap.count(cast<AllocaInst>(I))) 251 InitializeRegForValue(I); 252 253 // Create an initial MachineBasicBlock for each LLVM BasicBlock in F. This 254 // also creates the initial PHI MachineInstrs, though none of the input 255 // operands are populated. 256 for (BB = Fn.begin(), EB = Fn.end(); BB != EB; ++BB) { 257 MachineBasicBlock *MBB = new MachineBasicBlock(BB); 258 MBBMap[BB] = MBB; 259 MF.getBasicBlockList().push_back(MBB); 260 261 // Create Machine PHI nodes for LLVM PHI nodes, lowering them as 262 // appropriate. 263 PHINode *PN; 264 for (BasicBlock::iterator I = BB->begin(); 265 (PN = dyn_cast<PHINode>(I)); ++I) 266 if (!PN->use_empty()) { 267 MVT::ValueType VT = TLI.getValueType(PN->getType()); 268 unsigned NumElements; 269 if (VT != MVT::Vector) 270 NumElements = TLI.getNumElements(VT); 271 else { 272 MVT::ValueType VT1,VT2; 273 NumElements = 274 TLI.getPackedTypeBreakdown(cast<PackedType>(PN->getType()), 275 VT1, VT2); 276 } 277 unsigned PHIReg = ValueMap[PN]; 278 assert(PHIReg &&"PHI node does not have an assigned virtual register!"); 279 for (unsigned i = 0; i != NumElements; ++i) 280 BuildMI(MBB, TargetInstrInfo::PHI, PN->getNumOperands(), PHIReg+i); 281 } 282 } 283} 284 285/// CreateRegForValue - Allocate the appropriate number of virtual registers of 286/// the correctly promoted or expanded types. Assign these registers 287/// consecutive vreg numbers and return the first assigned number. 288unsigned FunctionLoweringInfo::CreateRegForValue(const Value *V) { 289 MVT::ValueType VT = TLI.getValueType(V->getType()); 290 291 // The number of multiples of registers that we need, to, e.g., split up 292 // a <2 x int64> -> 4 x i32 registers. 293 unsigned NumVectorRegs = 1; 294 295 // If this is a packed type, figure out what type it will decompose into 296 // and how many of the elements it will use. 297 if (VT == MVT::Vector) { 298 const PackedType *PTy = cast<PackedType>(V->getType()); 299 unsigned NumElts = PTy->getNumElements(); 300 MVT::ValueType EltTy = TLI.getValueType(PTy->getElementType()); 301 302 // Divide the input until we get to a supported size. This will always 303 // end with a scalar if the target doesn't support vectors. 304 while (NumElts > 1 && !TLI.isTypeLegal(getVectorType(EltTy, NumElts))) { 305 NumElts >>= 1; 306 NumVectorRegs <<= 1; 307 } 308 if (NumElts == 1) 309 VT = EltTy; 310 else 311 VT = getVectorType(EltTy, NumElts); 312 } 313 314 // The common case is that we will only create one register for this 315 // value. If we have that case, create and return the virtual register. 316 unsigned NV = TLI.getNumElements(VT); 317 if (NV == 1) { 318 // If we are promoting this value, pick the next largest supported type. 319 MVT::ValueType PromotedType = TLI.getTypeToTransformTo(VT); 320 unsigned Reg = MakeReg(PromotedType); 321 // If this is a vector of supported or promoted types (e.g. 4 x i16), 322 // create all of the registers. 323 for (unsigned i = 1; i != NumVectorRegs; ++i) 324 MakeReg(PromotedType); 325 return Reg; 326 } 327 328 // If this value is represented with multiple target registers, make sure 329 // to create enough consecutive registers of the right (smaller) type. 330 unsigned NT = VT-1; // Find the type to use. 331 while (TLI.getNumElements((MVT::ValueType)NT) != 1) 332 --NT; 333 334 unsigned R = MakeReg((MVT::ValueType)NT); 335 for (unsigned i = 1; i != NV*NumVectorRegs; ++i) 336 MakeReg((MVT::ValueType)NT); 337 return R; 338} 339 340//===----------------------------------------------------------------------===// 341/// SelectionDAGLowering - This is the common target-independent lowering 342/// implementation that is parameterized by a TargetLowering object. 343/// Also, targets can overload any lowering method. 344/// 345namespace llvm { 346class SelectionDAGLowering { 347 MachineBasicBlock *CurMBB; 348 349 std::map<const Value*, SDOperand> NodeMap; 350 351 /// PendingLoads - Loads are not emitted to the program immediately. We bunch 352 /// them up and then emit token factor nodes when possible. This allows us to 353 /// get simple disambiguation between loads without worrying about alias 354 /// analysis. 355 std::vector<SDOperand> PendingLoads; 356 357 /// Case - A pair of values to record the Value for a switch case, and the 358 /// case's target basic block. 359 typedef std::pair<Constant*, MachineBasicBlock*> Case; 360 typedef std::vector<Case>::iterator CaseItr; 361 typedef std::pair<CaseItr, CaseItr> CaseRange; 362 363 /// CaseRec - A struct with ctor used in lowering switches to a binary tree 364 /// of conditional branches. 365 struct CaseRec { 366 CaseRec(MachineBasicBlock *bb, Constant *lt, Constant *ge, CaseRange r) : 367 CaseBB(bb), LT(lt), GE(ge), Range(r) {} 368 369 /// CaseBB - The MBB in which to emit the compare and branch 370 MachineBasicBlock *CaseBB; 371 /// LT, GE - If nonzero, we know the current case value must be less-than or 372 /// greater-than-or-equal-to these Constants. 373 Constant *LT; 374 Constant *GE; 375 /// Range - A pair of iterators representing the range of case values to be 376 /// processed at this point in the binary search tree. 377 CaseRange Range; 378 }; 379 380 /// The comparison function for sorting Case values. 381 struct CaseCmp { 382 bool operator () (const Case& C1, const Case& C2) { 383 if (const ConstantUInt* U1 = dyn_cast<const ConstantUInt>(C1.first)) 384 return U1->getValue() < cast<const ConstantUInt>(C2.first)->getValue(); 385 386 const ConstantSInt* S1 = dyn_cast<const ConstantSInt>(C1.first); 387 return S1->getValue() < cast<const ConstantSInt>(C2.first)->getValue(); 388 } 389 }; 390 391public: 392 // TLI - This is information that describes the available target features we 393 // need for lowering. This indicates when operations are unavailable, 394 // implemented with a libcall, etc. 395 TargetLowering &TLI; 396 SelectionDAG &DAG; 397 const TargetData &TD; 398 399 /// SwitchCases - Vector of CaseBlock structures used to communicate 400 /// SwitchInst code generation information. 401 std::vector<SelectionDAGISel::CaseBlock> SwitchCases; 402 403 /// FuncInfo - Information about the function as a whole. 404 /// 405 FunctionLoweringInfo &FuncInfo; 406 407 SelectionDAGLowering(SelectionDAG &dag, TargetLowering &tli, 408 FunctionLoweringInfo &funcinfo) 409 : TLI(tli), DAG(dag), TD(DAG.getTarget().getTargetData()), 410 FuncInfo(funcinfo) { 411 } 412 413 /// getRoot - Return the current virtual root of the Selection DAG. 414 /// 415 SDOperand getRoot() { 416 if (PendingLoads.empty()) 417 return DAG.getRoot(); 418 419 if (PendingLoads.size() == 1) { 420 SDOperand Root = PendingLoads[0]; 421 DAG.setRoot(Root); 422 PendingLoads.clear(); 423 return Root; 424 } 425 426 // Otherwise, we have to make a token factor node. 427 SDOperand Root = DAG.getNode(ISD::TokenFactor, MVT::Other, PendingLoads); 428 PendingLoads.clear(); 429 DAG.setRoot(Root); 430 return Root; 431 } 432 433 void visit(Instruction &I) { visit(I.getOpcode(), I); } 434 435 void visit(unsigned Opcode, User &I) { 436 switch (Opcode) { 437 default: assert(0 && "Unknown instruction type encountered!"); 438 abort(); 439 // Build the switch statement using the Instruction.def file. 440#define HANDLE_INST(NUM, OPCODE, CLASS) \ 441 case Instruction::OPCODE:return visit##OPCODE((CLASS&)I); 442#include "llvm/Instruction.def" 443 } 444 } 445 446 void setCurrentBasicBlock(MachineBasicBlock *MBB) { CurMBB = MBB; } 447 448 SDOperand getLoadFrom(const Type *Ty, SDOperand Ptr, 449 SDOperand SrcValue, SDOperand Root, 450 bool isVolatile); 451 452 SDOperand getIntPtrConstant(uint64_t Val) { 453 return DAG.getConstant(Val, TLI.getPointerTy()); 454 } 455 456 SDOperand getValue(const Value *V); 457 458 const SDOperand &setValue(const Value *V, SDOperand NewN) { 459 SDOperand &N = NodeMap[V]; 460 assert(N.Val == 0 && "Already set a value for this node!"); 461 return N = NewN; 462 } 463 464 RegsForValue GetRegistersForValue(const std::string &ConstrCode, 465 MVT::ValueType VT, 466 bool OutReg, bool InReg, 467 std::set<unsigned> &OutputRegs, 468 std::set<unsigned> &InputRegs); 469 470 // Terminator instructions. 471 void visitRet(ReturnInst &I); 472 void visitBr(BranchInst &I); 473 void visitSwitch(SwitchInst &I); 474 void visitUnreachable(UnreachableInst &I) { /* noop */ } 475 476 // Helper for visitSwitch 477 void visitSwitchCase(SelectionDAGISel::CaseBlock &CB); 478 479 // These all get lowered before this pass. 480 void visitInvoke(InvokeInst &I) { assert(0 && "TODO"); } 481 void visitUnwind(UnwindInst &I) { assert(0 && "TODO"); } 482 483 void visitBinary(User &I, unsigned IntOp, unsigned FPOp, unsigned VecOp); 484 void visitShift(User &I, unsigned Opcode); 485 void visitAdd(User &I) { 486 visitBinary(I, ISD::ADD, ISD::FADD, ISD::VADD); 487 } 488 void visitSub(User &I); 489 void visitMul(User &I) { 490 visitBinary(I, ISD::MUL, ISD::FMUL, ISD::VMUL); 491 } 492 void visitDiv(User &I) { 493 const Type *Ty = I.getType(); 494 visitBinary(I, 495 Ty->isSigned() ? ISD::SDIV : ISD::UDIV, ISD::FDIV, 496 Ty->isSigned() ? ISD::VSDIV : ISD::VUDIV); 497 } 498 void visitRem(User &I) { 499 const Type *Ty = I.getType(); 500 visitBinary(I, Ty->isSigned() ? ISD::SREM : ISD::UREM, ISD::FREM, 0); 501 } 502 void visitAnd(User &I) { visitBinary(I, ISD::AND, 0, ISD::VAND); } 503 void visitOr (User &I) { visitBinary(I, ISD::OR, 0, ISD::VOR); } 504 void visitXor(User &I) { visitBinary(I, ISD::XOR, 0, ISD::VXOR); } 505 void visitShl(User &I) { visitShift(I, ISD::SHL); } 506 void visitShr(User &I) { 507 visitShift(I, I.getType()->isUnsigned() ? ISD::SRL : ISD::SRA); 508 } 509 510 void visitSetCC(User &I, ISD::CondCode SignedOpc, ISD::CondCode UnsignedOpc); 511 void visitSetEQ(User &I) { visitSetCC(I, ISD::SETEQ, ISD::SETEQ); } 512 void visitSetNE(User &I) { visitSetCC(I, ISD::SETNE, ISD::SETNE); } 513 void visitSetLE(User &I) { visitSetCC(I, ISD::SETLE, ISD::SETULE); } 514 void visitSetGE(User &I) { visitSetCC(I, ISD::SETGE, ISD::SETUGE); } 515 void visitSetLT(User &I) { visitSetCC(I, ISD::SETLT, ISD::SETULT); } 516 void visitSetGT(User &I) { visitSetCC(I, ISD::SETGT, ISD::SETUGT); } 517 518 void visitExtractElement(User &I); 519 void visitInsertElement(User &I); 520 521 void visitGetElementPtr(User &I); 522 void visitCast(User &I); 523 void visitSelect(User &I); 524 525 void visitMalloc(MallocInst &I); 526 void visitFree(FreeInst &I); 527 void visitAlloca(AllocaInst &I); 528 void visitLoad(LoadInst &I); 529 void visitStore(StoreInst &I); 530 void visitPHI(PHINode &I) { } // PHI nodes are handled specially. 531 void visitCall(CallInst &I); 532 void visitInlineAsm(CallInst &I); 533 const char *visitIntrinsicCall(CallInst &I, unsigned Intrinsic); 534 void visitTargetIntrinsic(CallInst &I, unsigned Intrinsic); 535 536 void visitVAStart(CallInst &I); 537 void visitVAArg(VAArgInst &I); 538 void visitVAEnd(CallInst &I); 539 void visitVACopy(CallInst &I); 540 void visitFrameReturnAddress(CallInst &I, bool isFrameAddress); 541 542 void visitMemIntrinsic(CallInst &I, unsigned Op); 543 544 void visitUserOp1(Instruction &I) { 545 assert(0 && "UserOp1 should not exist at instruction selection time!"); 546 abort(); 547 } 548 void visitUserOp2(Instruction &I) { 549 assert(0 && "UserOp2 should not exist at instruction selection time!"); 550 abort(); 551 } 552}; 553} // end namespace llvm 554 555SDOperand SelectionDAGLowering::getValue(const Value *V) { 556 SDOperand &N = NodeMap[V]; 557 if (N.Val) return N; 558 559 const Type *VTy = V->getType(); 560 MVT::ValueType VT = TLI.getValueType(VTy); 561 if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(V))) { 562 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) { 563 visit(CE->getOpcode(), *CE); 564 assert(N.Val && "visit didn't populate the ValueMap!"); 565 return N; 566 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(C)) { 567 return N = DAG.getGlobalAddress(GV, VT); 568 } else if (isa<ConstantPointerNull>(C)) { 569 return N = DAG.getConstant(0, TLI.getPointerTy()); 570 } else if (isa<UndefValue>(C)) { 571 if (!isa<PackedType>(VTy)) 572 return N = DAG.getNode(ISD::UNDEF, VT); 573 574 // Create a VBUILD_VECTOR of undef nodes. 575 const PackedType *PTy = cast<PackedType>(VTy); 576 unsigned NumElements = PTy->getNumElements(); 577 MVT::ValueType PVT = TLI.getValueType(PTy->getElementType()); 578 579 std::vector<SDOperand> Ops; 580 Ops.assign(NumElements, DAG.getNode(ISD::UNDEF, PVT)); 581 582 // Create a VConstant node with generic Vector type. 583 Ops.push_back(DAG.getConstant(NumElements, MVT::i32)); 584 Ops.push_back(DAG.getValueType(PVT)); 585 return N = DAG.getNode(ISD::VBUILD_VECTOR, MVT::Vector, Ops); 586 } else if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) { 587 return N = DAG.getConstantFP(CFP->getValue(), VT); 588 } else if (const PackedType *PTy = dyn_cast<PackedType>(VTy)) { 589 unsigned NumElements = PTy->getNumElements(); 590 MVT::ValueType PVT = TLI.getValueType(PTy->getElementType()); 591 592 // Now that we know the number and type of the elements, push a 593 // Constant or ConstantFP node onto the ops list for each element of 594 // the packed constant. 595 std::vector<SDOperand> Ops; 596 if (ConstantPacked *CP = dyn_cast<ConstantPacked>(C)) { 597 for (unsigned i = 0; i != NumElements; ++i) 598 Ops.push_back(getValue(CP->getOperand(i))); 599 } else { 600 assert(isa<ConstantAggregateZero>(C) && "Unknown packed constant!"); 601 SDOperand Op; 602 if (MVT::isFloatingPoint(PVT)) 603 Op = DAG.getConstantFP(0, PVT); 604 else 605 Op = DAG.getConstant(0, PVT); 606 Ops.assign(NumElements, Op); 607 } 608 609 // Create a VBUILD_VECTOR node with generic Vector type. 610 Ops.push_back(DAG.getConstant(NumElements, MVT::i32)); 611 Ops.push_back(DAG.getValueType(PVT)); 612 return N = DAG.getNode(ISD::VBUILD_VECTOR, MVT::Vector, Ops); 613 } else { 614 // Canonicalize all constant ints to be unsigned. 615 return N = DAG.getConstant(cast<ConstantIntegral>(C)->getRawValue(),VT); 616 } 617 } 618 619 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) { 620 std::map<const AllocaInst*, int>::iterator SI = 621 FuncInfo.StaticAllocaMap.find(AI); 622 if (SI != FuncInfo.StaticAllocaMap.end()) 623 return DAG.getFrameIndex(SI->second, TLI.getPointerTy()); 624 } 625 626 std::map<const Value*, unsigned>::const_iterator VMI = 627 FuncInfo.ValueMap.find(V); 628 assert(VMI != FuncInfo.ValueMap.end() && "Value not in map!"); 629 630 unsigned InReg = VMI->second; 631 632 // If this type is not legal, make it so now. 633 if (VT != MVT::Vector) { 634 MVT::ValueType DestVT = TLI.getTypeToTransformTo(VT); 635 636 N = DAG.getCopyFromReg(DAG.getEntryNode(), InReg, DestVT); 637 if (DestVT < VT) { 638 // Source must be expanded. This input value is actually coming from the 639 // register pair VMI->second and VMI->second+1. 640 N = DAG.getNode(ISD::BUILD_PAIR, VT, N, 641 DAG.getCopyFromReg(DAG.getEntryNode(), InReg+1, DestVT)); 642 } else if (DestVT > VT) { // Promotion case 643 if (MVT::isFloatingPoint(VT)) 644 N = DAG.getNode(ISD::FP_ROUND, VT, N); 645 else 646 N = DAG.getNode(ISD::TRUNCATE, VT, N); 647 } 648 } else { 649 // Otherwise, if this is a vector, make it available as a generic vector 650 // here. 651 MVT::ValueType PTyElementVT, PTyLegalElementVT; 652 unsigned NE = TLI.getPackedTypeBreakdown(cast<PackedType>(VTy),PTyElementVT, 653 PTyLegalElementVT); 654 655 // Build a VBUILD_VECTOR with the input registers. 656 std::vector<SDOperand> Ops; 657 if (PTyElementVT == PTyLegalElementVT) { 658 // If the value types are legal, just VBUILD the CopyFromReg nodes. 659 for (unsigned i = 0; i != NE; ++i) 660 Ops.push_back(DAG.getCopyFromReg(DAG.getEntryNode(), InReg++, 661 PTyElementVT)); 662 } else if (PTyElementVT < PTyLegalElementVT) { 663 // If the register was promoted, use TRUNCATE of FP_ROUND as appropriate. 664 for (unsigned i = 0; i != NE; ++i) { 665 SDOperand Op = DAG.getCopyFromReg(DAG.getEntryNode(), InReg++, 666 PTyElementVT); 667 if (MVT::isFloatingPoint(PTyElementVT)) 668 Op = DAG.getNode(ISD::FP_ROUND, PTyElementVT, Op); 669 else 670 Op = DAG.getNode(ISD::TRUNCATE, PTyElementVT, Op); 671 Ops.push_back(Op); 672 } 673 } else { 674 // If the register was expanded, use BUILD_PAIR. 675 assert((NE & 1) == 0 && "Must expand into a multiple of 2 elements!"); 676 for (unsigned i = 0; i != NE/2; ++i) { 677 SDOperand Op0 = DAG.getCopyFromReg(DAG.getEntryNode(), InReg++, 678 PTyElementVT); 679 SDOperand Op1 = DAG.getCopyFromReg(DAG.getEntryNode(), InReg++, 680 PTyElementVT); 681 Ops.push_back(DAG.getNode(ISD::BUILD_PAIR, VT, Op0, Op1)); 682 } 683 } 684 685 Ops.push_back(DAG.getConstant(NE, MVT::i32)); 686 Ops.push_back(DAG.getValueType(PTyLegalElementVT)); 687 N = DAG.getNode(ISD::VBUILD_VECTOR, MVT::Vector, Ops); 688 } 689 690 return N; 691} 692 693 694void SelectionDAGLowering::visitRet(ReturnInst &I) { 695 if (I.getNumOperands() == 0) { 696 DAG.setRoot(DAG.getNode(ISD::RET, MVT::Other, getRoot())); 697 return; 698 } 699 std::vector<SDOperand> NewValues; 700 NewValues.push_back(getRoot()); 701 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) { 702 SDOperand RetOp = getValue(I.getOperand(i)); 703 704 // If this is an integer return value, we need to promote it ourselves to 705 // the full width of a register, since LegalizeOp will use ANY_EXTEND rather 706 // than sign/zero. 707 if (MVT::isInteger(RetOp.getValueType()) && 708 RetOp.getValueType() < MVT::i64) { 709 MVT::ValueType TmpVT; 710 if (TLI.getTypeAction(MVT::i32) == TargetLowering::Promote) 711 TmpVT = TLI.getTypeToTransformTo(MVT::i32); 712 else 713 TmpVT = MVT::i32; 714 715 if (I.getOperand(i)->getType()->isSigned()) 716 RetOp = DAG.getNode(ISD::SIGN_EXTEND, TmpVT, RetOp); 717 else 718 RetOp = DAG.getNode(ISD::ZERO_EXTEND, TmpVT, RetOp); 719 } 720 NewValues.push_back(RetOp); 721 } 722 DAG.setRoot(DAG.getNode(ISD::RET, MVT::Other, NewValues)); 723} 724 725void SelectionDAGLowering::visitBr(BranchInst &I) { 726 // Update machine-CFG edges. 727 MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)]; 728 CurMBB->addSuccessor(Succ0MBB); 729 730 // Figure out which block is immediately after the current one. 731 MachineBasicBlock *NextBlock = 0; 732 MachineFunction::iterator BBI = CurMBB; 733 if (++BBI != CurMBB->getParent()->end()) 734 NextBlock = BBI; 735 736 if (I.isUnconditional()) { 737 // If this is not a fall-through branch, emit the branch. 738 if (Succ0MBB != NextBlock) 739 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, getRoot(), 740 DAG.getBasicBlock(Succ0MBB))); 741 } else { 742 MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)]; 743 CurMBB->addSuccessor(Succ1MBB); 744 745 SDOperand Cond = getValue(I.getCondition()); 746 if (Succ1MBB == NextBlock) { 747 // If the condition is false, fall through. This means we should branch 748 // if the condition is true to Succ #0. 749 DAG.setRoot(DAG.getNode(ISD::BRCOND, MVT::Other, getRoot(), 750 Cond, DAG.getBasicBlock(Succ0MBB))); 751 } else if (Succ0MBB == NextBlock) { 752 // If the condition is true, fall through. This means we should branch if 753 // the condition is false to Succ #1. Invert the condition first. 754 SDOperand True = DAG.getConstant(1, Cond.getValueType()); 755 Cond = DAG.getNode(ISD::XOR, Cond.getValueType(), Cond, True); 756 DAG.setRoot(DAG.getNode(ISD::BRCOND, MVT::Other, getRoot(), 757 Cond, DAG.getBasicBlock(Succ1MBB))); 758 } else { 759 std::vector<SDOperand> Ops; 760 Ops.push_back(getRoot()); 761 // If the false case is the current basic block, then this is a self 762 // loop. We do not want to emit "Loop: ... brcond Out; br Loop", as it 763 // adds an extra instruction in the loop. Instead, invert the 764 // condition and emit "Loop: ... br!cond Loop; br Out. 765 if (CurMBB == Succ1MBB) { 766 std::swap(Succ0MBB, Succ1MBB); 767 SDOperand True = DAG.getConstant(1, Cond.getValueType()); 768 Cond = DAG.getNode(ISD::XOR, Cond.getValueType(), Cond, True); 769 } 770 SDOperand True = DAG.getNode(ISD::BRCOND, MVT::Other, getRoot(), Cond, 771 DAG.getBasicBlock(Succ0MBB)); 772 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, True, 773 DAG.getBasicBlock(Succ1MBB))); 774 } 775 } 776} 777 778/// visitSwitchCase - Emits the necessary code to represent a single node in 779/// the binary search tree resulting from lowering a switch instruction. 780void SelectionDAGLowering::visitSwitchCase(SelectionDAGISel::CaseBlock &CB) { 781 SDOperand SwitchOp = getValue(CB.SwitchV); 782 SDOperand CaseOp = getValue(CB.CaseC); 783 SDOperand Cond = DAG.getSetCC(MVT::i1, SwitchOp, CaseOp, CB.CC); 784 785 // Set NextBlock to be the MBB immediately after the current one, if any. 786 // This is used to avoid emitting unnecessary branches to the next block. 787 MachineBasicBlock *NextBlock = 0; 788 MachineFunction::iterator BBI = CurMBB; 789 if (++BBI != CurMBB->getParent()->end()) 790 NextBlock = BBI; 791 792 // If the lhs block is the next block, invert the condition so that we can 793 // fall through to the lhs instead of the rhs block. 794 if (CB.LHSBB == NextBlock) { 795 std::swap(CB.LHSBB, CB.RHSBB); 796 SDOperand True = DAG.getConstant(1, Cond.getValueType()); 797 Cond = DAG.getNode(ISD::XOR, Cond.getValueType(), Cond, True); 798 } 799 SDOperand BrCond = DAG.getNode(ISD::BRCOND, MVT::Other, getRoot(), Cond, 800 DAG.getBasicBlock(CB.LHSBB)); 801 if (CB.RHSBB == NextBlock) 802 DAG.setRoot(BrCond); 803 else 804 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, BrCond, 805 DAG.getBasicBlock(CB.RHSBB))); 806 // Update successor info 807 CurMBB->addSuccessor(CB.LHSBB); 808 CurMBB->addSuccessor(CB.RHSBB); 809} 810 811void SelectionDAGLowering::visitSwitch(SwitchInst &I) { 812 // Figure out which block is immediately after the current one. 813 MachineBasicBlock *NextBlock = 0; 814 MachineFunction::iterator BBI = CurMBB; 815 if (++BBI != CurMBB->getParent()->end()) 816 NextBlock = BBI; 817 818 // If there is only the default destination, branch to it if it is not the 819 // next basic block. Otherwise, just fall through. 820 if (I.getNumOperands() == 2) { 821 // Update machine-CFG edges. 822 MachineBasicBlock *DefaultMBB = FuncInfo.MBBMap[I.getDefaultDest()]; 823 // If this is not a fall-through branch, emit the branch. 824 if (DefaultMBB != NextBlock) 825 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, getRoot(), 826 DAG.getBasicBlock(DefaultMBB))); 827 return; 828 } 829 830 // If there are any non-default case statements, create a vector of Cases 831 // representing each one, and sort the vector so that we can efficiently 832 // create a binary search tree from them. 833 std::vector<Case> Cases; 834 for (unsigned i = 1; i < I.getNumSuccessors(); ++i) { 835 MachineBasicBlock *SMBB = FuncInfo.MBBMap[I.getSuccessor(i)]; 836 Cases.push_back(Case(I.getSuccessorValue(i), SMBB)); 837 } 838 std::sort(Cases.begin(), Cases.end(), CaseCmp()); 839 840 // Get the Value to be switched on and default basic blocks, which will be 841 // inserted into CaseBlock records, representing basic blocks in the binary 842 // search tree. 843 Value *SV = I.getOperand(0); 844 MachineBasicBlock *Default = FuncInfo.MBBMap[I.getDefaultDest()]; 845 846 // Get the current MachineFunction and LLVM basic block, for use in creating 847 // and inserting new MBBs during the creation of the binary search tree. 848 MachineFunction *CurMF = CurMBB->getParent(); 849 const BasicBlock *LLVMBB = CurMBB->getBasicBlock(); 850 851 // Push the initial CaseRec onto the worklist 852 std::vector<CaseRec> CaseVec; 853 CaseVec.push_back(CaseRec(CurMBB,0,0,CaseRange(Cases.begin(),Cases.end()))); 854 855 while (!CaseVec.empty()) { 856 // Grab a record representing a case range to process off the worklist 857 CaseRec CR = CaseVec.back(); 858 CaseVec.pop_back(); 859 860 // Size is the number of Cases represented by this range. If Size is 1, 861 // then we are processing a leaf of the binary search tree. Otherwise, 862 // we need to pick a pivot, and push left and right ranges onto the 863 // worklist. 864 unsigned Size = CR.Range.second - CR.Range.first; 865 866 if (Size == 1) { 867 // Create a CaseBlock record representing a conditional branch to 868 // the Case's target mbb if the value being switched on SV is equal 869 // to C. Otherwise, branch to default. 870 Constant *C = CR.Range.first->first; 871 MachineBasicBlock *Target = CR.Range.first->second; 872 SelectionDAGISel::CaseBlock CB(ISD::SETEQ, SV, C, Target, Default, 873 CR.CaseBB); 874 // If the MBB representing the leaf node is the current MBB, then just 875 // call visitSwitchCase to emit the code into the current block. 876 // Otherwise, push the CaseBlock onto the vector to be later processed 877 // by SDISel, and insert the node's MBB before the next MBB. 878 if (CR.CaseBB == CurMBB) 879 visitSwitchCase(CB); 880 else { 881 SwitchCases.push_back(CB); 882 CurMF->getBasicBlockList().insert(BBI, CR.CaseBB); 883 } 884 } else { 885 // split case range at pivot 886 CaseItr Pivot = CR.Range.first + (Size / 2); 887 CaseRange LHSR(CR.Range.first, Pivot); 888 CaseRange RHSR(Pivot, CR.Range.second); 889 Constant *C = Pivot->first; 890 MachineBasicBlock *RHSBB = 0, *LHSBB = 0; 891 // We know that we branch to the LHS if the Value being switched on is 892 // less than the Pivot value, C. We use this to optimize our binary 893 // tree a bit, by recognizing that if SV is greater than or equal to the 894 // LHS's Case Value, and that Case Value is exactly one less than the 895 // Pivot's Value, then we can branch directly to the LHS's Target, 896 // rather than creating a leaf node for it. 897 if ((LHSR.second - LHSR.first) == 1 && 898 LHSR.first->first == CR.GE && 899 cast<ConstantIntegral>(C)->getRawValue() == 900 (cast<ConstantIntegral>(CR.GE)->getRawValue() + 1ULL)) { 901 LHSBB = LHSR.first->second; 902 } else { 903 LHSBB = new MachineBasicBlock(LLVMBB); 904 CaseVec.push_back(CaseRec(LHSBB,C,CR.GE,LHSR)); 905 } 906 // Similar to the optimization above, if the Value being switched on is 907 // known to be less than the Constant CR.LT, and the current Case Value 908 // is CR.LT - 1, then we can branch directly to the target block for 909 // the current Case Value, rather than emitting a RHS leaf node for it. 910 if ((RHSR.second - RHSR.first) == 1 && CR.LT && 911 cast<ConstantIntegral>(RHSR.first->first)->getRawValue() == 912 (cast<ConstantIntegral>(CR.LT)->getRawValue() - 1ULL)) { 913 RHSBB = RHSR.first->second; 914 } else { 915 RHSBB = new MachineBasicBlock(LLVMBB); 916 CaseVec.push_back(CaseRec(RHSBB,CR.LT,C,RHSR)); 917 } 918 // Create a CaseBlock record representing a conditional branch to 919 // the LHS node if the value being switched on SV is less than C. 920 // Otherwise, branch to LHS. 921 ISD::CondCode CC = C->getType()->isSigned() ? ISD::SETLT : ISD::SETULT; 922 SelectionDAGISel::CaseBlock CB(CC, SV, C, LHSBB, RHSBB, CR.CaseBB); 923 if (CR.CaseBB == CurMBB) 924 visitSwitchCase(CB); 925 else { 926 SwitchCases.push_back(CB); 927 CurMF->getBasicBlockList().insert(BBI, CR.CaseBB); 928 } 929 } 930 } 931} 932 933void SelectionDAGLowering::visitSub(User &I) { 934 // -0.0 - X --> fneg 935 if (I.getType()->isFloatingPoint()) { 936 if (ConstantFP *CFP = dyn_cast<ConstantFP>(I.getOperand(0))) 937 if (CFP->isExactlyValue(-0.0)) { 938 SDOperand Op2 = getValue(I.getOperand(1)); 939 setValue(&I, DAG.getNode(ISD::FNEG, Op2.getValueType(), Op2)); 940 return; 941 } 942 } 943 visitBinary(I, ISD::SUB, ISD::FSUB, ISD::VSUB); 944} 945 946void SelectionDAGLowering::visitBinary(User &I, unsigned IntOp, unsigned FPOp, 947 unsigned VecOp) { 948 const Type *Ty = I.getType(); 949 SDOperand Op1 = getValue(I.getOperand(0)); 950 SDOperand Op2 = getValue(I.getOperand(1)); 951 952 if (Ty->isIntegral()) { 953 setValue(&I, DAG.getNode(IntOp, Op1.getValueType(), Op1, Op2)); 954 } else if (Ty->isFloatingPoint()) { 955 setValue(&I, DAG.getNode(FPOp, Op1.getValueType(), Op1, Op2)); 956 } else { 957 const PackedType *PTy = cast<PackedType>(Ty); 958 SDOperand Num = DAG.getConstant(PTy->getNumElements(), MVT::i32); 959 SDOperand Typ = DAG.getValueType(TLI.getValueType(PTy->getElementType())); 960 setValue(&I, DAG.getNode(VecOp, MVT::Vector, Op1, Op2, Num, Typ)); 961 } 962} 963 964void SelectionDAGLowering::visitShift(User &I, unsigned Opcode) { 965 SDOperand Op1 = getValue(I.getOperand(0)); 966 SDOperand Op2 = getValue(I.getOperand(1)); 967 968 Op2 = DAG.getNode(ISD::ANY_EXTEND, TLI.getShiftAmountTy(), Op2); 969 970 setValue(&I, DAG.getNode(Opcode, Op1.getValueType(), Op1, Op2)); 971} 972 973void SelectionDAGLowering::visitSetCC(User &I,ISD::CondCode SignedOpcode, 974 ISD::CondCode UnsignedOpcode) { 975 SDOperand Op1 = getValue(I.getOperand(0)); 976 SDOperand Op2 = getValue(I.getOperand(1)); 977 ISD::CondCode Opcode = SignedOpcode; 978 if (I.getOperand(0)->getType()->isUnsigned()) 979 Opcode = UnsignedOpcode; 980 setValue(&I, DAG.getSetCC(MVT::i1, Op1, Op2, Opcode)); 981} 982 983void SelectionDAGLowering::visitSelect(User &I) { 984 SDOperand Cond = getValue(I.getOperand(0)); 985 SDOperand TrueVal = getValue(I.getOperand(1)); 986 SDOperand FalseVal = getValue(I.getOperand(2)); 987 setValue(&I, DAG.getNode(ISD::SELECT, TrueVal.getValueType(), Cond, 988 TrueVal, FalseVal)); 989} 990 991void SelectionDAGLowering::visitCast(User &I) { 992 SDOperand N = getValue(I.getOperand(0)); 993 MVT::ValueType SrcVT = N.getValueType(); 994 MVT::ValueType DestVT = TLI.getValueType(I.getType()); 995 996 if (DestVT == MVT::Vector) { 997 // This is a cast to a vector from something else. This is always a bit 998 // convert. Get information about the input vector. 999 const PackedType *DestTy = cast<PackedType>(I.getType()); 1000 MVT::ValueType EltVT = TLI.getValueType(DestTy->getElementType()); 1001 setValue(&I, DAG.getNode(ISD::VBIT_CONVERT, DestVT, N, 1002 DAG.getConstant(DestTy->getNumElements(),MVT::i32), 1003 DAG.getValueType(EltVT))); 1004 } else if (SrcVT == DestVT) { 1005 setValue(&I, N); // noop cast. 1006 } else if (DestVT == MVT::i1) { 1007 // Cast to bool is a comparison against zero, not truncation to zero. 1008 SDOperand Zero = isInteger(SrcVT) ? DAG.getConstant(0, N.getValueType()) : 1009 DAG.getConstantFP(0.0, N.getValueType()); 1010 setValue(&I, DAG.getSetCC(MVT::i1, N, Zero, ISD::SETNE)); 1011 } else if (isInteger(SrcVT)) { 1012 if (isInteger(DestVT)) { // Int -> Int cast 1013 if (DestVT < SrcVT) // Truncating cast? 1014 setValue(&I, DAG.getNode(ISD::TRUNCATE, DestVT, N)); 1015 else if (I.getOperand(0)->getType()->isSigned()) 1016 setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, DestVT, N)); 1017 else 1018 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, DestVT, N)); 1019 } else if (isFloatingPoint(DestVT)) { // Int -> FP cast 1020 if (I.getOperand(0)->getType()->isSigned()) 1021 setValue(&I, DAG.getNode(ISD::SINT_TO_FP, DestVT, N)); 1022 else 1023 setValue(&I, DAG.getNode(ISD::UINT_TO_FP, DestVT, N)); 1024 } else { 1025 assert(0 && "Unknown cast!"); 1026 } 1027 } else if (isFloatingPoint(SrcVT)) { 1028 if (isFloatingPoint(DestVT)) { // FP -> FP cast 1029 if (DestVT < SrcVT) // Rounding cast? 1030 setValue(&I, DAG.getNode(ISD::FP_ROUND, DestVT, N)); 1031 else 1032 setValue(&I, DAG.getNode(ISD::FP_EXTEND, DestVT, N)); 1033 } else if (isInteger(DestVT)) { // FP -> Int cast. 1034 if (I.getType()->isSigned()) 1035 setValue(&I, DAG.getNode(ISD::FP_TO_SINT, DestVT, N)); 1036 else 1037 setValue(&I, DAG.getNode(ISD::FP_TO_UINT, DestVT, N)); 1038 } else { 1039 assert(0 && "Unknown cast!"); 1040 } 1041 } else { 1042 assert(SrcVT == MVT::Vector && "Unknown cast!"); 1043 assert(DestVT != MVT::Vector && "Casts to vector already handled!"); 1044 // This is a cast from a vector to something else. This is always a bit 1045 // convert. Get information about the input vector. 1046 setValue(&I, DAG.getNode(ISD::VBIT_CONVERT, DestVT, N)); 1047 } 1048} 1049 1050void SelectionDAGLowering::visitInsertElement(User &I) { 1051 SDOperand InVec = getValue(I.getOperand(0)); 1052 SDOperand InVal = getValue(I.getOperand(1)); 1053 SDOperand InIdx = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(), 1054 getValue(I.getOperand(2))); 1055 1056 SDOperand Num = *(InVec.Val->op_end()-2); 1057 SDOperand Typ = *(InVec.Val->op_end()-1); 1058 setValue(&I, DAG.getNode(ISD::VINSERT_VECTOR_ELT, MVT::Vector, 1059 InVec, InVal, InIdx, Num, Typ)); 1060} 1061 1062void SelectionDAGLowering::visitExtractElement(User &I) { 1063 SDOperand InVec = getValue(I.getOperand(0)); 1064 SDOperand InIdx = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(), 1065 getValue(I.getOperand(1))); 1066 SDOperand Typ = *(InVec.Val->op_end()-1); 1067 setValue(&I, DAG.getNode(ISD::VEXTRACT_VECTOR_ELT, 1068 TLI.getValueType(I.getType()), InVec, InIdx)); 1069} 1070 1071void SelectionDAGLowering::visitGetElementPtr(User &I) { 1072 SDOperand N = getValue(I.getOperand(0)); 1073 const Type *Ty = I.getOperand(0)->getType(); 1074 const Type *UIntPtrTy = TD.getIntPtrType(); 1075 1076 for (GetElementPtrInst::op_iterator OI = I.op_begin()+1, E = I.op_end(); 1077 OI != E; ++OI) { 1078 Value *Idx = *OI; 1079 if (const StructType *StTy = dyn_cast<StructType>(Ty)) { 1080 unsigned Field = cast<ConstantUInt>(Idx)->getValue(); 1081 if (Field) { 1082 // N = N + Offset 1083 uint64_t Offset = TD.getStructLayout(StTy)->MemberOffsets[Field]; 1084 N = DAG.getNode(ISD::ADD, N.getValueType(), N, 1085 getIntPtrConstant(Offset)); 1086 } 1087 Ty = StTy->getElementType(Field); 1088 } else { 1089 Ty = cast<SequentialType>(Ty)->getElementType(); 1090 1091 // If this is a constant subscript, handle it quickly. 1092 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) { 1093 if (CI->getRawValue() == 0) continue; 1094 1095 uint64_t Offs; 1096 if (ConstantSInt *CSI = dyn_cast<ConstantSInt>(CI)) 1097 Offs = (int64_t)TD.getTypeSize(Ty)*CSI->getValue(); 1098 else 1099 Offs = TD.getTypeSize(Ty)*cast<ConstantUInt>(CI)->getValue(); 1100 N = DAG.getNode(ISD::ADD, N.getValueType(), N, getIntPtrConstant(Offs)); 1101 continue; 1102 } 1103 1104 // N = N + Idx * ElementSize; 1105 uint64_t ElementSize = TD.getTypeSize(Ty); 1106 SDOperand IdxN = getValue(Idx); 1107 1108 // If the index is smaller or larger than intptr_t, truncate or extend 1109 // it. 1110 if (IdxN.getValueType() < N.getValueType()) { 1111 if (Idx->getType()->isSigned()) 1112 IdxN = DAG.getNode(ISD::SIGN_EXTEND, N.getValueType(), IdxN); 1113 else 1114 IdxN = DAG.getNode(ISD::ZERO_EXTEND, N.getValueType(), IdxN); 1115 } else if (IdxN.getValueType() > N.getValueType()) 1116 IdxN = DAG.getNode(ISD::TRUNCATE, N.getValueType(), IdxN); 1117 1118 // If this is a multiply by a power of two, turn it into a shl 1119 // immediately. This is a very common case. 1120 if (isPowerOf2_64(ElementSize)) { 1121 unsigned Amt = Log2_64(ElementSize); 1122 IdxN = DAG.getNode(ISD::SHL, N.getValueType(), IdxN, 1123 DAG.getConstant(Amt, TLI.getShiftAmountTy())); 1124 N = DAG.getNode(ISD::ADD, N.getValueType(), N, IdxN); 1125 continue; 1126 } 1127 1128 SDOperand Scale = getIntPtrConstant(ElementSize); 1129 IdxN = DAG.getNode(ISD::MUL, N.getValueType(), IdxN, Scale); 1130 N = DAG.getNode(ISD::ADD, N.getValueType(), N, IdxN); 1131 } 1132 } 1133 setValue(&I, N); 1134} 1135 1136void SelectionDAGLowering::visitAlloca(AllocaInst &I) { 1137 // If this is a fixed sized alloca in the entry block of the function, 1138 // allocate it statically on the stack. 1139 if (FuncInfo.StaticAllocaMap.count(&I)) 1140 return; // getValue will auto-populate this. 1141 1142 const Type *Ty = I.getAllocatedType(); 1143 uint64_t TySize = TLI.getTargetData().getTypeSize(Ty); 1144 unsigned Align = std::max((unsigned)TLI.getTargetData().getTypeAlignment(Ty), 1145 I.getAlignment()); 1146 1147 SDOperand AllocSize = getValue(I.getArraySize()); 1148 MVT::ValueType IntPtr = TLI.getPointerTy(); 1149 if (IntPtr < AllocSize.getValueType()) 1150 AllocSize = DAG.getNode(ISD::TRUNCATE, IntPtr, AllocSize); 1151 else if (IntPtr > AllocSize.getValueType()) 1152 AllocSize = DAG.getNode(ISD::ZERO_EXTEND, IntPtr, AllocSize); 1153 1154 AllocSize = DAG.getNode(ISD::MUL, IntPtr, AllocSize, 1155 getIntPtrConstant(TySize)); 1156 1157 // Handle alignment. If the requested alignment is less than or equal to the 1158 // stack alignment, ignore it and round the size of the allocation up to the 1159 // stack alignment size. If the size is greater than the stack alignment, we 1160 // note this in the DYNAMIC_STACKALLOC node. 1161 unsigned StackAlign = 1162 TLI.getTargetMachine().getFrameInfo()->getStackAlignment(); 1163 if (Align <= StackAlign) { 1164 Align = 0; 1165 // Add SA-1 to the size. 1166 AllocSize = DAG.getNode(ISD::ADD, AllocSize.getValueType(), AllocSize, 1167 getIntPtrConstant(StackAlign-1)); 1168 // Mask out the low bits for alignment purposes. 1169 AllocSize = DAG.getNode(ISD::AND, AllocSize.getValueType(), AllocSize, 1170 getIntPtrConstant(~(uint64_t)(StackAlign-1))); 1171 } 1172 1173 std::vector<MVT::ValueType> VTs; 1174 VTs.push_back(AllocSize.getValueType()); 1175 VTs.push_back(MVT::Other); 1176 std::vector<SDOperand> Ops; 1177 Ops.push_back(getRoot()); 1178 Ops.push_back(AllocSize); 1179 Ops.push_back(getIntPtrConstant(Align)); 1180 SDOperand DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, VTs, Ops); 1181 DAG.setRoot(setValue(&I, DSA).getValue(1)); 1182 1183 // Inform the Frame Information that we have just allocated a variable-sized 1184 // object. 1185 CurMBB->getParent()->getFrameInfo()->CreateVariableSizedObject(); 1186} 1187 1188void SelectionDAGLowering::visitLoad(LoadInst &I) { 1189 SDOperand Ptr = getValue(I.getOperand(0)); 1190 1191 SDOperand Root; 1192 if (I.isVolatile()) 1193 Root = getRoot(); 1194 else { 1195 // Do not serialize non-volatile loads against each other. 1196 Root = DAG.getRoot(); 1197 } 1198 1199 setValue(&I, getLoadFrom(I.getType(), Ptr, DAG.getSrcValue(I.getOperand(0)), 1200 Root, I.isVolatile())); 1201} 1202 1203SDOperand SelectionDAGLowering::getLoadFrom(const Type *Ty, SDOperand Ptr, 1204 SDOperand SrcValue, SDOperand Root, 1205 bool isVolatile) { 1206 SDOperand L; 1207 if (const PackedType *PTy = dyn_cast<PackedType>(Ty)) { 1208 MVT::ValueType PVT = TLI.getValueType(PTy->getElementType()); 1209 L = DAG.getVecLoad(PTy->getNumElements(), PVT, Root, Ptr, SrcValue); 1210 } else { 1211 L = DAG.getLoad(TLI.getValueType(Ty), Root, Ptr, SrcValue); 1212 } 1213 1214 if (isVolatile) 1215 DAG.setRoot(L.getValue(1)); 1216 else 1217 PendingLoads.push_back(L.getValue(1)); 1218 1219 return L; 1220} 1221 1222 1223void SelectionDAGLowering::visitStore(StoreInst &I) { 1224 Value *SrcV = I.getOperand(0); 1225 SDOperand Src = getValue(SrcV); 1226 SDOperand Ptr = getValue(I.getOperand(1)); 1227 DAG.setRoot(DAG.getNode(ISD::STORE, MVT::Other, getRoot(), Src, Ptr, 1228 DAG.getSrcValue(I.getOperand(1)))); 1229} 1230 1231/// IntrinsicCannotAccessMemory - Return true if the specified intrinsic cannot 1232/// access memory and has no other side effects at all. 1233static bool IntrinsicCannotAccessMemory(unsigned IntrinsicID) { 1234#define GET_NO_MEMORY_INTRINSICS 1235#include "llvm/Intrinsics.gen" 1236#undef GET_NO_MEMORY_INTRINSICS 1237 return false; 1238} 1239 1240/// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC 1241/// node. 1242void SelectionDAGLowering::visitTargetIntrinsic(CallInst &I, 1243 unsigned Intrinsic) { 1244 bool HasChain = !IntrinsicCannotAccessMemory(Intrinsic); 1245 1246 // Build the operand list. 1247 std::vector<SDOperand> Ops; 1248 if (HasChain) // If this intrinsic has side-effects, chainify it. 1249 Ops.push_back(getRoot()); 1250 1251 // Add the intrinsic ID as an integer operand. 1252 Ops.push_back(DAG.getConstant(Intrinsic, TLI.getPointerTy())); 1253 1254 // Add all operands of the call to the operand list. 1255 for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) { 1256 SDOperand Op = getValue(I.getOperand(i)); 1257 1258 // If this is a vector type, force it to the right packed type. 1259 if (Op.getValueType() == MVT::Vector) { 1260 const PackedType *OpTy = cast<PackedType>(I.getOperand(i)->getType()); 1261 MVT::ValueType EltVT = TLI.getValueType(OpTy->getElementType()); 1262 1263 MVT::ValueType VVT = MVT::getVectorType(EltVT, OpTy->getNumElements()); 1264 assert(VVT != MVT::Other && "Intrinsic uses a non-legal type?"); 1265 Op = DAG.getNode(ISD::VBIT_CONVERT, VVT, Op); 1266 } 1267 1268 assert(TLI.isTypeLegal(Op.getValueType()) && 1269 "Intrinsic uses a non-legal type?"); 1270 Ops.push_back(Op); 1271 } 1272 1273 std::vector<MVT::ValueType> VTs; 1274 if (I.getType() != Type::VoidTy) { 1275 MVT::ValueType VT = TLI.getValueType(I.getType()); 1276 if (VT == MVT::Vector) { 1277 const PackedType *DestTy = cast<PackedType>(I.getType()); 1278 MVT::ValueType EltVT = TLI.getValueType(DestTy->getElementType()); 1279 1280 VT = MVT::getVectorType(EltVT, DestTy->getNumElements()); 1281 assert(VT != MVT::Other && "Intrinsic uses a non-legal type?"); 1282 } 1283 1284 assert(TLI.isTypeLegal(VT) && "Intrinsic uses a non-legal type?"); 1285 VTs.push_back(VT); 1286 } 1287 if (HasChain) 1288 VTs.push_back(MVT::Other); 1289 1290 // Create the node. 1291 SDOperand Result; 1292 if (!HasChain) 1293 Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, VTs, Ops); 1294 else if (I.getType() != Type::VoidTy) 1295 Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, VTs, Ops); 1296 else 1297 Result = DAG.getNode(ISD::INTRINSIC_VOID, VTs, Ops); 1298 1299 if (HasChain) 1300 DAG.setRoot(Result.getValue(Result.Val->getNumValues()-1)); 1301 if (I.getType() != Type::VoidTy) { 1302 if (const PackedType *PTy = dyn_cast<PackedType>(I.getType())) { 1303 MVT::ValueType EVT = TLI.getValueType(PTy->getElementType()); 1304 Result = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, Result, 1305 DAG.getConstant(PTy->getNumElements(), MVT::i32), 1306 DAG.getValueType(EVT)); 1307 } 1308 setValue(&I, Result); 1309 } 1310} 1311 1312/// visitIntrinsicCall - Lower the call to the specified intrinsic function. If 1313/// we want to emit this as a call to a named external function, return the name 1314/// otherwise lower it and return null. 1315const char * 1316SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) { 1317 switch (Intrinsic) { 1318 default: 1319 // By default, turn this into a target intrinsic node. 1320 visitTargetIntrinsic(I, Intrinsic); 1321 return 0; 1322 case Intrinsic::vastart: visitVAStart(I); return 0; 1323 case Intrinsic::vaend: visitVAEnd(I); return 0; 1324 case Intrinsic::vacopy: visitVACopy(I); return 0; 1325 case Intrinsic::returnaddress: visitFrameReturnAddress(I, false); return 0; 1326 case Intrinsic::frameaddress: visitFrameReturnAddress(I, true); return 0; 1327 case Intrinsic::setjmp: 1328 return "_setjmp"+!TLI.usesUnderscoreSetJmpLongJmp(); 1329 break; 1330 case Intrinsic::longjmp: 1331 return "_longjmp"+!TLI.usesUnderscoreSetJmpLongJmp(); 1332 break; 1333 case Intrinsic::memcpy_i32: 1334 case Intrinsic::memcpy_i64: 1335 visitMemIntrinsic(I, ISD::MEMCPY); 1336 return 0; 1337 case Intrinsic::memset_i32: 1338 case Intrinsic::memset_i64: 1339 visitMemIntrinsic(I, ISD::MEMSET); 1340 return 0; 1341 case Intrinsic::memmove_i32: 1342 case Intrinsic::memmove_i64: 1343 visitMemIntrinsic(I, ISD::MEMMOVE); 1344 return 0; 1345 1346 case Intrinsic::dbg_stoppoint: { 1347 MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo(); 1348 DbgStopPointInst &SPI = cast<DbgStopPointInst>(I); 1349 if (DebugInfo && SPI.getContext() && DebugInfo->Verify(SPI.getContext())) { 1350 std::vector<SDOperand> Ops; 1351 1352 Ops.push_back(getRoot()); 1353 Ops.push_back(getValue(SPI.getLineValue())); 1354 Ops.push_back(getValue(SPI.getColumnValue())); 1355 1356 DebugInfoDesc *DD = DebugInfo->getDescFor(SPI.getContext()); 1357 assert(DD && "Not a debug information descriptor"); 1358 CompileUnitDesc *CompileUnit = cast<CompileUnitDesc>(DD); 1359 1360 Ops.push_back(DAG.getString(CompileUnit->getFileName())); 1361 Ops.push_back(DAG.getString(CompileUnit->getDirectory())); 1362 1363 DAG.setRoot(DAG.getNode(ISD::LOCATION, MVT::Other, Ops)); 1364 } 1365 1366 return 0; 1367 } 1368 case Intrinsic::dbg_region_start: { 1369 MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo(); 1370 DbgRegionStartInst &RSI = cast<DbgRegionStartInst>(I); 1371 if (DebugInfo && RSI.getContext() && DebugInfo->Verify(RSI.getContext())) { 1372 std::vector<SDOperand> Ops; 1373 1374 unsigned LabelID = DebugInfo->RecordRegionStart(RSI.getContext()); 1375 1376 Ops.push_back(getRoot()); 1377 Ops.push_back(DAG.getConstant(LabelID, MVT::i32)); 1378 1379 DAG.setRoot(DAG.getNode(ISD::DEBUG_LABEL, MVT::Other, Ops)); 1380 } 1381 1382 return 0; 1383 } 1384 case Intrinsic::dbg_region_end: { 1385 MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo(); 1386 DbgRegionEndInst &REI = cast<DbgRegionEndInst>(I); 1387 if (DebugInfo && REI.getContext() && DebugInfo->Verify(REI.getContext())) { 1388 std::vector<SDOperand> Ops; 1389 1390 unsigned LabelID = DebugInfo->RecordRegionEnd(REI.getContext()); 1391 1392 Ops.push_back(getRoot()); 1393 Ops.push_back(DAG.getConstant(LabelID, MVT::i32)); 1394 1395 DAG.setRoot(DAG.getNode(ISD::DEBUG_LABEL, MVT::Other, Ops)); 1396 } 1397 1398 return 0; 1399 } 1400 case Intrinsic::dbg_func_start: { 1401 MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo(); 1402 DbgFuncStartInst &FSI = cast<DbgFuncStartInst>(I); 1403 if (DebugInfo && FSI.getSubprogram() && 1404 DebugInfo->Verify(FSI.getSubprogram())) { 1405 std::vector<SDOperand> Ops; 1406 1407 unsigned LabelID = DebugInfo->RecordRegionStart(FSI.getSubprogram()); 1408 1409 Ops.push_back(getRoot()); 1410 Ops.push_back(DAG.getConstant(LabelID, MVT::i32)); 1411 1412 DAG.setRoot(DAG.getNode(ISD::DEBUG_LABEL, MVT::Other, Ops)); 1413 } 1414 1415 return 0; 1416 } 1417 case Intrinsic::dbg_declare: { 1418 MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo(); 1419 DbgDeclareInst &DI = cast<DbgDeclareInst>(I); 1420 if (DebugInfo && DI.getVariable() && DebugInfo->Verify(DI.getVariable())) { 1421 std::vector<SDOperand> Ops; 1422 1423 SDOperand AddressOp = getValue(DI.getAddress()); 1424 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(AddressOp)) { 1425 DebugInfo->RecordVariable(DI.getVariable(), FI->getIndex()); 1426 } 1427 } 1428 1429 return 0; 1430 } 1431 1432 case Intrinsic::isunordered_f32: 1433 case Intrinsic::isunordered_f64: 1434 setValue(&I, DAG.getSetCC(MVT::i1,getValue(I.getOperand(1)), 1435 getValue(I.getOperand(2)), ISD::SETUO)); 1436 return 0; 1437 1438 case Intrinsic::sqrt_f32: 1439 case Intrinsic::sqrt_f64: 1440 setValue(&I, DAG.getNode(ISD::FSQRT, 1441 getValue(I.getOperand(1)).getValueType(), 1442 getValue(I.getOperand(1)))); 1443 return 0; 1444 case Intrinsic::pcmarker: { 1445 SDOperand Tmp = getValue(I.getOperand(1)); 1446 DAG.setRoot(DAG.getNode(ISD::PCMARKER, MVT::Other, getRoot(), Tmp)); 1447 return 0; 1448 } 1449 case Intrinsic::readcyclecounter: { 1450 std::vector<MVT::ValueType> VTs; 1451 VTs.push_back(MVT::i64); 1452 VTs.push_back(MVT::Other); 1453 std::vector<SDOperand> Ops; 1454 Ops.push_back(getRoot()); 1455 SDOperand Tmp = DAG.getNode(ISD::READCYCLECOUNTER, VTs, Ops); 1456 setValue(&I, Tmp); 1457 DAG.setRoot(Tmp.getValue(1)); 1458 return 0; 1459 } 1460 case Intrinsic::bswap_i16: 1461 case Intrinsic::bswap_i32: 1462 case Intrinsic::bswap_i64: 1463 setValue(&I, DAG.getNode(ISD::BSWAP, 1464 getValue(I.getOperand(1)).getValueType(), 1465 getValue(I.getOperand(1)))); 1466 return 0; 1467 case Intrinsic::cttz_i8: 1468 case Intrinsic::cttz_i16: 1469 case Intrinsic::cttz_i32: 1470 case Intrinsic::cttz_i64: 1471 setValue(&I, DAG.getNode(ISD::CTTZ, 1472 getValue(I.getOperand(1)).getValueType(), 1473 getValue(I.getOperand(1)))); 1474 return 0; 1475 case Intrinsic::ctlz_i8: 1476 case Intrinsic::ctlz_i16: 1477 case Intrinsic::ctlz_i32: 1478 case Intrinsic::ctlz_i64: 1479 setValue(&I, DAG.getNode(ISD::CTLZ, 1480 getValue(I.getOperand(1)).getValueType(), 1481 getValue(I.getOperand(1)))); 1482 return 0; 1483 case Intrinsic::ctpop_i8: 1484 case Intrinsic::ctpop_i16: 1485 case Intrinsic::ctpop_i32: 1486 case Intrinsic::ctpop_i64: 1487 setValue(&I, DAG.getNode(ISD::CTPOP, 1488 getValue(I.getOperand(1)).getValueType(), 1489 getValue(I.getOperand(1)))); 1490 return 0; 1491 case Intrinsic::stacksave: { 1492 std::vector<MVT::ValueType> VTs; 1493 VTs.push_back(TLI.getPointerTy()); 1494 VTs.push_back(MVT::Other); 1495 std::vector<SDOperand> Ops; 1496 Ops.push_back(getRoot()); 1497 SDOperand Tmp = DAG.getNode(ISD::STACKSAVE, VTs, Ops); 1498 setValue(&I, Tmp); 1499 DAG.setRoot(Tmp.getValue(1)); 1500 return 0; 1501 } 1502 case Intrinsic::stackrestore: { 1503 SDOperand Tmp = getValue(I.getOperand(1)); 1504 DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, MVT::Other, getRoot(), Tmp)); 1505 return 0; 1506 } 1507 case Intrinsic::prefetch: 1508 // FIXME: Currently discarding prefetches. 1509 return 0; 1510 } 1511} 1512 1513 1514void SelectionDAGLowering::visitCall(CallInst &I) { 1515 const char *RenameFn = 0; 1516 if (Function *F = I.getCalledFunction()) { 1517 if (F->isExternal()) 1518 if (unsigned IID = F->getIntrinsicID()) { 1519 RenameFn = visitIntrinsicCall(I, IID); 1520 if (!RenameFn) 1521 return; 1522 } else { // Not an LLVM intrinsic. 1523 const std::string &Name = F->getName(); 1524 if (Name[0] == 'c' && (Name == "copysign" || Name == "copysignf")) { 1525 if (I.getNumOperands() == 3 && // Basic sanity checks. 1526 I.getOperand(1)->getType()->isFloatingPoint() && 1527 I.getType() == I.getOperand(1)->getType() && 1528 I.getType() == I.getOperand(2)->getType()) { 1529 SDOperand LHS = getValue(I.getOperand(1)); 1530 SDOperand RHS = getValue(I.getOperand(2)); 1531 setValue(&I, DAG.getNode(ISD::FCOPYSIGN, LHS.getValueType(), 1532 LHS, RHS)); 1533 return; 1534 } 1535 } else if (Name[0] == 'f' && (Name == "fabs" || Name == "fabsf")) { 1536 if (I.getNumOperands() == 2 && // Basic sanity checks. 1537 I.getOperand(1)->getType()->isFloatingPoint() && 1538 I.getType() == I.getOperand(1)->getType()) { 1539 SDOperand Tmp = getValue(I.getOperand(1)); 1540 setValue(&I, DAG.getNode(ISD::FABS, Tmp.getValueType(), Tmp)); 1541 return; 1542 } 1543 } else if (Name[0] == 's' && (Name == "sin" || Name == "sinf")) { 1544 if (I.getNumOperands() == 2 && // Basic sanity checks. 1545 I.getOperand(1)->getType()->isFloatingPoint() && 1546 I.getType() == I.getOperand(1)->getType()) { 1547 SDOperand Tmp = getValue(I.getOperand(1)); 1548 setValue(&I, DAG.getNode(ISD::FSIN, Tmp.getValueType(), Tmp)); 1549 return; 1550 } 1551 } else if (Name[0] == 'c' && (Name == "cos" || Name == "cosf")) { 1552 if (I.getNumOperands() == 2 && // Basic sanity checks. 1553 I.getOperand(1)->getType()->isFloatingPoint() && 1554 I.getType() == I.getOperand(1)->getType()) { 1555 SDOperand Tmp = getValue(I.getOperand(1)); 1556 setValue(&I, DAG.getNode(ISD::FCOS, Tmp.getValueType(), Tmp)); 1557 return; 1558 } 1559 } 1560 } 1561 } else if (isa<InlineAsm>(I.getOperand(0))) { 1562 visitInlineAsm(I); 1563 return; 1564 } 1565 1566 SDOperand Callee; 1567 if (!RenameFn) 1568 Callee = getValue(I.getOperand(0)); 1569 else 1570 Callee = DAG.getExternalSymbol(RenameFn, TLI.getPointerTy()); 1571 std::vector<std::pair<SDOperand, const Type*> > Args; 1572 Args.reserve(I.getNumOperands()); 1573 for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) { 1574 Value *Arg = I.getOperand(i); 1575 SDOperand ArgNode = getValue(Arg); 1576 Args.push_back(std::make_pair(ArgNode, Arg->getType())); 1577 } 1578 1579 const PointerType *PT = cast<PointerType>(I.getCalledValue()->getType()); 1580 const FunctionType *FTy = cast<FunctionType>(PT->getElementType()); 1581 1582 std::pair<SDOperand,SDOperand> Result = 1583 TLI.LowerCallTo(getRoot(), I.getType(), FTy->isVarArg(), I.getCallingConv(), 1584 I.isTailCall(), Callee, Args, DAG); 1585 if (I.getType() != Type::VoidTy) 1586 setValue(&I, Result.first); 1587 DAG.setRoot(Result.second); 1588} 1589 1590SDOperand RegsForValue::getCopyFromRegs(SelectionDAG &DAG, 1591 SDOperand &Chain, SDOperand &Flag)const{ 1592 SDOperand Val = DAG.getCopyFromReg(Chain, Regs[0], RegVT, Flag); 1593 Chain = Val.getValue(1); 1594 Flag = Val.getValue(2); 1595 1596 // If the result was expanded, copy from the top part. 1597 if (Regs.size() > 1) { 1598 assert(Regs.size() == 2 && 1599 "Cannot expand to more than 2 elts yet!"); 1600 SDOperand Hi = DAG.getCopyFromReg(Chain, Regs[1], RegVT, Flag); 1601 Chain = Val.getValue(1); 1602 Flag = Val.getValue(2); 1603 if (DAG.getTargetLoweringInfo().isLittleEndian()) 1604 return DAG.getNode(ISD::BUILD_PAIR, ValueVT, Val, Hi); 1605 else 1606 return DAG.getNode(ISD::BUILD_PAIR, ValueVT, Hi, Val); 1607 } 1608 1609 // Otherwise, if the return value was promoted, truncate it to the 1610 // appropriate type. 1611 if (RegVT == ValueVT) 1612 return Val; 1613 1614 if (MVT::isInteger(RegVT)) 1615 return DAG.getNode(ISD::TRUNCATE, ValueVT, Val); 1616 else 1617 return DAG.getNode(ISD::FP_ROUND, ValueVT, Val); 1618} 1619 1620/// getCopyToRegs - Emit a series of CopyToReg nodes that copies the 1621/// specified value into the registers specified by this object. This uses 1622/// Chain/Flag as the input and updates them for the output Chain/Flag. 1623void RegsForValue::getCopyToRegs(SDOperand Val, SelectionDAG &DAG, 1624 SDOperand &Chain, SDOperand &Flag) const { 1625 if (Regs.size() == 1) { 1626 // If there is a single register and the types differ, this must be 1627 // a promotion. 1628 if (RegVT != ValueVT) { 1629 if (MVT::isInteger(RegVT)) 1630 Val = DAG.getNode(ISD::ANY_EXTEND, RegVT, Val); 1631 else 1632 Val = DAG.getNode(ISD::FP_EXTEND, RegVT, Val); 1633 } 1634 Chain = DAG.getCopyToReg(Chain, Regs[0], Val, Flag); 1635 Flag = Chain.getValue(1); 1636 } else { 1637 std::vector<unsigned> R(Regs); 1638 if (!DAG.getTargetLoweringInfo().isLittleEndian()) 1639 std::reverse(R.begin(), R.end()); 1640 1641 for (unsigned i = 0, e = R.size(); i != e; ++i) { 1642 SDOperand Part = DAG.getNode(ISD::EXTRACT_ELEMENT, RegVT, Val, 1643 DAG.getConstant(i, MVT::i32)); 1644 Chain = DAG.getCopyToReg(Chain, R[i], Part, Flag); 1645 Flag = Chain.getValue(1); 1646 } 1647 } 1648} 1649 1650/// AddInlineAsmOperands - Add this value to the specified inlineasm node 1651/// operand list. This adds the code marker and includes the number of 1652/// values added into it. 1653void RegsForValue::AddInlineAsmOperands(unsigned Code, SelectionDAG &DAG, 1654 std::vector<SDOperand> &Ops) const { 1655 Ops.push_back(DAG.getConstant(Code | (Regs.size() << 3), MVT::i32)); 1656 for (unsigned i = 0, e = Regs.size(); i != e; ++i) 1657 Ops.push_back(DAG.getRegister(Regs[i], RegVT)); 1658} 1659 1660/// isAllocatableRegister - If the specified register is safe to allocate, 1661/// i.e. it isn't a stack pointer or some other special register, return the 1662/// register class for the register. Otherwise, return null. 1663static const TargetRegisterClass * 1664isAllocatableRegister(unsigned Reg, MachineFunction &MF, 1665 const TargetLowering &TLI, const MRegisterInfo *MRI) { 1666 MVT::ValueType FoundVT = MVT::Other; 1667 const TargetRegisterClass *FoundRC = 0; 1668 for (MRegisterInfo::regclass_iterator RCI = MRI->regclass_begin(), 1669 E = MRI->regclass_end(); RCI != E; ++RCI) { 1670 MVT::ValueType ThisVT = MVT::Other; 1671 1672 const TargetRegisterClass *RC = *RCI; 1673 // If none of the the value types for this register class are valid, we 1674 // can't use it. For example, 64-bit reg classes on 32-bit targets. 1675 for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end(); 1676 I != E; ++I) { 1677 if (TLI.isTypeLegal(*I)) { 1678 // If we have already found this register in a different register class, 1679 // choose the one with the largest VT specified. For example, on 1680 // PowerPC, we favor f64 register classes over f32. 1681 if (FoundVT == MVT::Other || 1682 MVT::getSizeInBits(FoundVT) < MVT::getSizeInBits(*I)) { 1683 ThisVT = *I; 1684 break; 1685 } 1686 } 1687 } 1688 1689 if (ThisVT == MVT::Other) continue; 1690 1691 // NOTE: This isn't ideal. In particular, this might allocate the 1692 // frame pointer in functions that need it (due to them not being taken 1693 // out of allocation, because a variable sized allocation hasn't been seen 1694 // yet). This is a slight code pessimization, but should still work. 1695 for (TargetRegisterClass::iterator I = RC->allocation_order_begin(MF), 1696 E = RC->allocation_order_end(MF); I != E; ++I) 1697 if (*I == Reg) { 1698 // We found a matching register class. Keep looking at others in case 1699 // we find one with larger registers that this physreg is also in. 1700 FoundRC = RC; 1701 FoundVT = ThisVT; 1702 break; 1703 } 1704 } 1705 return FoundRC; 1706} 1707 1708RegsForValue SelectionDAGLowering:: 1709GetRegistersForValue(const std::string &ConstrCode, 1710 MVT::ValueType VT, bool isOutReg, bool isInReg, 1711 std::set<unsigned> &OutputRegs, 1712 std::set<unsigned> &InputRegs) { 1713 std::pair<unsigned, const TargetRegisterClass*> PhysReg = 1714 TLI.getRegForInlineAsmConstraint(ConstrCode, VT); 1715 std::vector<unsigned> Regs; 1716 1717 unsigned NumRegs = VT != MVT::Other ? TLI.getNumElements(VT) : 1; 1718 MVT::ValueType RegVT; 1719 MVT::ValueType ValueVT = VT; 1720 1721 if (PhysReg.first) { 1722 if (VT == MVT::Other) 1723 ValueVT = *PhysReg.second->vt_begin(); 1724 RegVT = VT; 1725 1726 // This is a explicit reference to a physical register. 1727 Regs.push_back(PhysReg.first); 1728 1729 // If this is an expanded reference, add the rest of the regs to Regs. 1730 if (NumRegs != 1) { 1731 RegVT = *PhysReg.second->vt_begin(); 1732 TargetRegisterClass::iterator I = PhysReg.second->begin(); 1733 TargetRegisterClass::iterator E = PhysReg.second->end(); 1734 for (; *I != PhysReg.first; ++I) 1735 assert(I != E && "Didn't find reg!"); 1736 1737 // Already added the first reg. 1738 --NumRegs; ++I; 1739 for (; NumRegs; --NumRegs, ++I) { 1740 assert(I != E && "Ran out of registers to allocate!"); 1741 Regs.push_back(*I); 1742 } 1743 } 1744 return RegsForValue(Regs, RegVT, ValueVT); 1745 } 1746 1747 // This is a reference to a register class. Allocate NumRegs consecutive, 1748 // available, registers from the class. 1749 std::vector<unsigned> RegClassRegs = 1750 TLI.getRegClassForInlineAsmConstraint(ConstrCode, VT); 1751 1752 const MRegisterInfo *MRI = DAG.getTarget().getRegisterInfo(); 1753 MachineFunction &MF = *CurMBB->getParent(); 1754 unsigned NumAllocated = 0; 1755 for (unsigned i = 0, e = RegClassRegs.size(); i != e; ++i) { 1756 unsigned Reg = RegClassRegs[i]; 1757 // See if this register is available. 1758 if ((isOutReg && OutputRegs.count(Reg)) || // Already used. 1759 (isInReg && InputRegs.count(Reg))) { // Already used. 1760 // Make sure we find consecutive registers. 1761 NumAllocated = 0; 1762 continue; 1763 } 1764 1765 // Check to see if this register is allocatable (i.e. don't give out the 1766 // stack pointer). 1767 const TargetRegisterClass *RC = isAllocatableRegister(Reg, MF, TLI, MRI); 1768 if (!RC) { 1769 // Make sure we find consecutive registers. 1770 NumAllocated = 0; 1771 continue; 1772 } 1773 1774 // Okay, this register is good, we can use it. 1775 ++NumAllocated; 1776 1777 // If we allocated enough consecutive 1778 if (NumAllocated == NumRegs) { 1779 unsigned RegStart = (i-NumAllocated)+1; 1780 unsigned RegEnd = i+1; 1781 // Mark all of the allocated registers used. 1782 for (unsigned i = RegStart; i != RegEnd; ++i) { 1783 unsigned Reg = RegClassRegs[i]; 1784 Regs.push_back(Reg); 1785 if (isOutReg) OutputRegs.insert(Reg); // Mark reg used. 1786 if (isInReg) InputRegs.insert(Reg); // Mark reg used. 1787 } 1788 1789 return RegsForValue(Regs, *RC->vt_begin(), VT); 1790 } 1791 } 1792 1793 // Otherwise, we couldn't allocate enough registers for this. 1794 return RegsForValue(); 1795} 1796 1797 1798/// visitInlineAsm - Handle a call to an InlineAsm object. 1799/// 1800void SelectionDAGLowering::visitInlineAsm(CallInst &I) { 1801 InlineAsm *IA = cast<InlineAsm>(I.getOperand(0)); 1802 1803 SDOperand AsmStr = DAG.getTargetExternalSymbol(IA->getAsmString().c_str(), 1804 MVT::Other); 1805 1806 // Note, we treat inline asms both with and without side-effects as the same. 1807 // If an inline asm doesn't have side effects and doesn't access memory, we 1808 // could not choose to not chain it. 1809 bool hasSideEffects = IA->hasSideEffects(); 1810 1811 std::vector<InlineAsm::ConstraintInfo> Constraints = IA->ParseConstraints(); 1812 std::vector<MVT::ValueType> ConstraintVTs; 1813 1814 /// AsmNodeOperands - A list of pairs. The first element is a register, the 1815 /// second is a bitfield where bit #0 is set if it is a use and bit #1 is set 1816 /// if it is a def of that register. 1817 std::vector<SDOperand> AsmNodeOperands; 1818 AsmNodeOperands.push_back(SDOperand()); // reserve space for input chain 1819 AsmNodeOperands.push_back(AsmStr); 1820 1821 SDOperand Chain = getRoot(); 1822 SDOperand Flag; 1823 1824 // We fully assign registers here at isel time. This is not optimal, but 1825 // should work. For register classes that correspond to LLVM classes, we 1826 // could let the LLVM RA do its thing, but we currently don't. Do a prepass 1827 // over the constraints, collecting fixed registers that we know we can't use. 1828 std::set<unsigned> OutputRegs, InputRegs; 1829 unsigned OpNum = 1; 1830 for (unsigned i = 0, e = Constraints.size(); i != e; ++i) { 1831 assert(Constraints[i].Codes.size() == 1 && "Only handles one code so far!"); 1832 std::string &ConstraintCode = Constraints[i].Codes[0]; 1833 1834 MVT::ValueType OpVT; 1835 1836 // Compute the value type for each operand and add it to ConstraintVTs. 1837 switch (Constraints[i].Type) { 1838 case InlineAsm::isOutput: 1839 if (!Constraints[i].isIndirectOutput) { 1840 assert(I.getType() != Type::VoidTy && "Bad inline asm!"); 1841 OpVT = TLI.getValueType(I.getType()); 1842 } else { 1843 const Type *OpTy = I.getOperand(OpNum)->getType(); 1844 OpVT = TLI.getValueType(cast<PointerType>(OpTy)->getElementType()); 1845 OpNum++; // Consumes a call operand. 1846 } 1847 break; 1848 case InlineAsm::isInput: 1849 OpVT = TLI.getValueType(I.getOperand(OpNum)->getType()); 1850 OpNum++; // Consumes a call operand. 1851 break; 1852 case InlineAsm::isClobber: 1853 OpVT = MVT::Other; 1854 break; 1855 } 1856 1857 ConstraintVTs.push_back(OpVT); 1858 1859 if (TLI.getRegForInlineAsmConstraint(ConstraintCode, OpVT).first == 0) 1860 continue; // Not assigned a fixed reg. 1861 1862 // Build a list of regs that this operand uses. This always has a single 1863 // element for promoted/expanded operands. 1864 RegsForValue Regs = GetRegistersForValue(ConstraintCode, OpVT, 1865 false, false, 1866 OutputRegs, InputRegs); 1867 1868 switch (Constraints[i].Type) { 1869 case InlineAsm::isOutput: 1870 // We can't assign any other output to this register. 1871 OutputRegs.insert(Regs.Regs.begin(), Regs.Regs.end()); 1872 // If this is an early-clobber output, it cannot be assigned to the same 1873 // value as the input reg. 1874 if (Constraints[i].isEarlyClobber || Constraints[i].hasMatchingInput) 1875 InputRegs.insert(Regs.Regs.begin(), Regs.Regs.end()); 1876 break; 1877 case InlineAsm::isInput: 1878 // We can't assign any other input to this register. 1879 InputRegs.insert(Regs.Regs.begin(), Regs.Regs.end()); 1880 break; 1881 case InlineAsm::isClobber: 1882 // Clobbered regs cannot be used as inputs or outputs. 1883 InputRegs.insert(Regs.Regs.begin(), Regs.Regs.end()); 1884 OutputRegs.insert(Regs.Regs.begin(), Regs.Regs.end()); 1885 break; 1886 } 1887 } 1888 1889 // Loop over all of the inputs, copying the operand values into the 1890 // appropriate registers and processing the output regs. 1891 RegsForValue RetValRegs; 1892 std::vector<std::pair<RegsForValue, Value*> > IndirectStoresToEmit; 1893 OpNum = 1; 1894 1895 for (unsigned i = 0, e = Constraints.size(); i != e; ++i) { 1896 assert(Constraints[i].Codes.size() == 1 && "Only handles one code so far!"); 1897 std::string &ConstraintCode = Constraints[i].Codes[0]; 1898 1899 switch (Constraints[i].Type) { 1900 case InlineAsm::isOutput: { 1901 TargetLowering::ConstraintType CTy = TargetLowering::C_RegisterClass; 1902 if (ConstraintCode.size() == 1) // not a physreg name. 1903 CTy = TLI.getConstraintType(ConstraintCode[0]); 1904 1905 if (CTy == TargetLowering::C_Memory) { 1906 // Memory output. 1907 SDOperand InOperandVal = getValue(I.getOperand(OpNum)); 1908 1909 // Check that the operand (the address to store to) isn't a float. 1910 if (!MVT::isInteger(InOperandVal.getValueType())) 1911 assert(0 && "MATCH FAIL!"); 1912 1913 if (!Constraints[i].isIndirectOutput) 1914 assert(0 && "MATCH FAIL!"); 1915 1916 OpNum++; // Consumes a call operand. 1917 1918 // Extend/truncate to the right pointer type if needed. 1919 MVT::ValueType PtrType = TLI.getPointerTy(); 1920 if (InOperandVal.getValueType() < PtrType) 1921 InOperandVal = DAG.getNode(ISD::ZERO_EXTEND, PtrType, InOperandVal); 1922 else if (InOperandVal.getValueType() > PtrType) 1923 InOperandVal = DAG.getNode(ISD::TRUNCATE, PtrType, InOperandVal); 1924 1925 // Add information to the INLINEASM node to know about this output. 1926 unsigned ResOpType = 4/*MEM*/ | (1 << 3); 1927 AsmNodeOperands.push_back(DAG.getConstant(ResOpType, MVT::i32)); 1928 AsmNodeOperands.push_back(InOperandVal); 1929 break; 1930 } 1931 1932 // Otherwise, this is a register output. 1933 assert(CTy == TargetLowering::C_RegisterClass && "Unknown op type!"); 1934 1935 // If this is an early-clobber output, or if there is an input 1936 // constraint that matches this, we need to reserve the input register 1937 // so no other inputs allocate to it. 1938 bool UsesInputRegister = false; 1939 if (Constraints[i].isEarlyClobber || Constraints[i].hasMatchingInput) 1940 UsesInputRegister = true; 1941 1942 // Copy the output from the appropriate register. Find a register that 1943 // we can use. 1944 RegsForValue Regs = 1945 GetRegistersForValue(ConstraintCode, ConstraintVTs[i], 1946 true, UsesInputRegister, 1947 OutputRegs, InputRegs); 1948 assert(!Regs.Regs.empty() && "Couldn't allocate output reg!"); 1949 1950 if (!Constraints[i].isIndirectOutput) { 1951 assert(RetValRegs.Regs.empty() && 1952 "Cannot have multiple output constraints yet!"); 1953 assert(I.getType() != Type::VoidTy && "Bad inline asm!"); 1954 RetValRegs = Regs; 1955 } else { 1956 IndirectStoresToEmit.push_back(std::make_pair(Regs, 1957 I.getOperand(OpNum))); 1958 OpNum++; // Consumes a call operand. 1959 } 1960 1961 // Add information to the INLINEASM node to know that this register is 1962 // set. 1963 Regs.AddInlineAsmOperands(2 /*REGDEF*/, DAG, AsmNodeOperands); 1964 break; 1965 } 1966 case InlineAsm::isInput: { 1967 SDOperand InOperandVal = getValue(I.getOperand(OpNum)); 1968 OpNum++; // Consumes a call operand. 1969 1970 if (isdigit(ConstraintCode[0])) { // Matching constraint? 1971 // If this is required to match an output register we have already set, 1972 // just use its register. 1973 unsigned OperandNo = atoi(ConstraintCode.c_str()); 1974 1975 // Scan until we find the definition we already emitted of this operand. 1976 // When we find it, create a RegsForValue operand. 1977 unsigned CurOp = 2; // The first operand. 1978 for (; OperandNo; --OperandNo) { 1979 // Advance to the next operand. 1980 unsigned NumOps = 1981 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getValue(); 1982 assert((NumOps & 7) == 2 /*REGDEF*/ && 1983 "Skipped past definitions?"); 1984 CurOp += (NumOps>>3)+1; 1985 } 1986 1987 unsigned NumOps = 1988 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getValue(); 1989 assert((NumOps & 7) == 2 /*REGDEF*/ && 1990 "Skipped past definitions?"); 1991 1992 // Add NumOps>>3 registers to MatchedRegs. 1993 RegsForValue MatchedRegs; 1994 MatchedRegs.ValueVT = InOperandVal.getValueType(); 1995 MatchedRegs.RegVT = AsmNodeOperands[CurOp+1].getValueType(); 1996 for (unsigned i = 0, e = NumOps>>3; i != e; ++i) { 1997 unsigned Reg=cast<RegisterSDNode>(AsmNodeOperands[++CurOp])->getReg(); 1998 MatchedRegs.Regs.push_back(Reg); 1999 } 2000 2001 // Use the produced MatchedRegs object to 2002 MatchedRegs.getCopyToRegs(InOperandVal, DAG, Chain, Flag); 2003 MatchedRegs.AddInlineAsmOperands(1 /*REGUSE*/, DAG, AsmNodeOperands); 2004 break; 2005 } 2006 2007 TargetLowering::ConstraintType CTy = TargetLowering::C_RegisterClass; 2008 if (ConstraintCode.size() == 1) // not a physreg name. 2009 CTy = TLI.getConstraintType(ConstraintCode[0]); 2010 2011 if (CTy == TargetLowering::C_Other) { 2012 if (!TLI.isOperandValidForConstraint(InOperandVal, ConstraintCode[0])) 2013 assert(0 && "MATCH FAIL!"); 2014 2015 // Add information to the INLINEASM node to know about this input. 2016 unsigned ResOpType = 3 /*IMM*/ | (1 << 3); 2017 AsmNodeOperands.push_back(DAG.getConstant(ResOpType, MVT::i32)); 2018 AsmNodeOperands.push_back(InOperandVal); 2019 break; 2020 } else if (CTy == TargetLowering::C_Memory) { 2021 // Memory input. 2022 2023 // Check that the operand isn't a float. 2024 if (!MVT::isInteger(InOperandVal.getValueType())) 2025 assert(0 && "MATCH FAIL!"); 2026 2027 // Extend/truncate to the right pointer type if needed. 2028 MVT::ValueType PtrType = TLI.getPointerTy(); 2029 if (InOperandVal.getValueType() < PtrType) 2030 InOperandVal = DAG.getNode(ISD::ZERO_EXTEND, PtrType, InOperandVal); 2031 else if (InOperandVal.getValueType() > PtrType) 2032 InOperandVal = DAG.getNode(ISD::TRUNCATE, PtrType, InOperandVal); 2033 2034 // Add information to the INLINEASM node to know about this input. 2035 unsigned ResOpType = 4/*MEM*/ | (1 << 3); 2036 AsmNodeOperands.push_back(DAG.getConstant(ResOpType, MVT::i32)); 2037 AsmNodeOperands.push_back(InOperandVal); 2038 break; 2039 } 2040 2041 assert(CTy == TargetLowering::C_RegisterClass && "Unknown op type!"); 2042 2043 // Copy the input into the appropriate registers. 2044 RegsForValue InRegs = 2045 GetRegistersForValue(ConstraintCode, ConstraintVTs[i], 2046 false, true, OutputRegs, InputRegs); 2047 // FIXME: should be match fail. 2048 assert(!InRegs.Regs.empty() && "Couldn't allocate input reg!"); 2049 2050 InRegs.getCopyToRegs(InOperandVal, DAG, Chain, Flag); 2051 2052 InRegs.AddInlineAsmOperands(1/*REGUSE*/, DAG, AsmNodeOperands); 2053 break; 2054 } 2055 case InlineAsm::isClobber: { 2056 RegsForValue ClobberedRegs = 2057 GetRegistersForValue(ConstraintCode, MVT::Other, false, false, 2058 OutputRegs, InputRegs); 2059 // Add the clobbered value to the operand list, so that the register 2060 // allocator is aware that the physreg got clobbered. 2061 if (!ClobberedRegs.Regs.empty()) 2062 ClobberedRegs.AddInlineAsmOperands(2/*REGDEF*/, DAG, AsmNodeOperands); 2063 break; 2064 } 2065 } 2066 } 2067 2068 // Finish up input operands. 2069 AsmNodeOperands[0] = Chain; 2070 if (Flag.Val) AsmNodeOperands.push_back(Flag); 2071 2072 std::vector<MVT::ValueType> VTs; 2073 VTs.push_back(MVT::Other); 2074 VTs.push_back(MVT::Flag); 2075 Chain = DAG.getNode(ISD::INLINEASM, VTs, AsmNodeOperands); 2076 Flag = Chain.getValue(1); 2077 2078 // If this asm returns a register value, copy the result from that register 2079 // and set it as the value of the call. 2080 if (!RetValRegs.Regs.empty()) 2081 setValue(&I, RetValRegs.getCopyFromRegs(DAG, Chain, Flag)); 2082 2083 std::vector<std::pair<SDOperand, Value*> > StoresToEmit; 2084 2085 // Process indirect outputs, first output all of the flagged copies out of 2086 // physregs. 2087 for (unsigned i = 0, e = IndirectStoresToEmit.size(); i != e; ++i) { 2088 RegsForValue &OutRegs = IndirectStoresToEmit[i].first; 2089 Value *Ptr = IndirectStoresToEmit[i].second; 2090 SDOperand OutVal = OutRegs.getCopyFromRegs(DAG, Chain, Flag); 2091 StoresToEmit.push_back(std::make_pair(OutVal, Ptr)); 2092 } 2093 2094 // Emit the non-flagged stores from the physregs. 2095 std::vector<SDOperand> OutChains; 2096 for (unsigned i = 0, e = StoresToEmit.size(); i != e; ++i) 2097 OutChains.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain, 2098 StoresToEmit[i].first, 2099 getValue(StoresToEmit[i].second), 2100 DAG.getSrcValue(StoresToEmit[i].second))); 2101 if (!OutChains.empty()) 2102 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains); 2103 DAG.setRoot(Chain); 2104} 2105 2106 2107void SelectionDAGLowering::visitMalloc(MallocInst &I) { 2108 SDOperand Src = getValue(I.getOperand(0)); 2109 2110 MVT::ValueType IntPtr = TLI.getPointerTy(); 2111 2112 if (IntPtr < Src.getValueType()) 2113 Src = DAG.getNode(ISD::TRUNCATE, IntPtr, Src); 2114 else if (IntPtr > Src.getValueType()) 2115 Src = DAG.getNode(ISD::ZERO_EXTEND, IntPtr, Src); 2116 2117 // Scale the source by the type size. 2118 uint64_t ElementSize = TD.getTypeSize(I.getType()->getElementType()); 2119 Src = DAG.getNode(ISD::MUL, Src.getValueType(), 2120 Src, getIntPtrConstant(ElementSize)); 2121 2122 std::vector<std::pair<SDOperand, const Type*> > Args; 2123 Args.push_back(std::make_pair(Src, TLI.getTargetData().getIntPtrType())); 2124 2125 std::pair<SDOperand,SDOperand> Result = 2126 TLI.LowerCallTo(getRoot(), I.getType(), false, CallingConv::C, true, 2127 DAG.getExternalSymbol("malloc", IntPtr), 2128 Args, DAG); 2129 setValue(&I, Result.first); // Pointers always fit in registers 2130 DAG.setRoot(Result.second); 2131} 2132 2133void SelectionDAGLowering::visitFree(FreeInst &I) { 2134 std::vector<std::pair<SDOperand, const Type*> > Args; 2135 Args.push_back(std::make_pair(getValue(I.getOperand(0)), 2136 TLI.getTargetData().getIntPtrType())); 2137 MVT::ValueType IntPtr = TLI.getPointerTy(); 2138 std::pair<SDOperand,SDOperand> Result = 2139 TLI.LowerCallTo(getRoot(), Type::VoidTy, false, CallingConv::C, true, 2140 DAG.getExternalSymbol("free", IntPtr), Args, DAG); 2141 DAG.setRoot(Result.second); 2142} 2143 2144// InsertAtEndOfBasicBlock - This method should be implemented by targets that 2145// mark instructions with the 'usesCustomDAGSchedInserter' flag. These 2146// instructions are special in various ways, which require special support to 2147// insert. The specified MachineInstr is created but not inserted into any 2148// basic blocks, and the scheduler passes ownership of it to this method. 2149MachineBasicBlock *TargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI, 2150 MachineBasicBlock *MBB) { 2151 std::cerr << "If a target marks an instruction with " 2152 "'usesCustomDAGSchedInserter', it must implement " 2153 "TargetLowering::InsertAtEndOfBasicBlock!\n"; 2154 abort(); 2155 return 0; 2156} 2157 2158void SelectionDAGLowering::visitVAStart(CallInst &I) { 2159 DAG.setRoot(DAG.getNode(ISD::VASTART, MVT::Other, getRoot(), 2160 getValue(I.getOperand(1)), 2161 DAG.getSrcValue(I.getOperand(1)))); 2162} 2163 2164void SelectionDAGLowering::visitVAArg(VAArgInst &I) { 2165 SDOperand V = DAG.getVAArg(TLI.getValueType(I.getType()), getRoot(), 2166 getValue(I.getOperand(0)), 2167 DAG.getSrcValue(I.getOperand(0))); 2168 setValue(&I, V); 2169 DAG.setRoot(V.getValue(1)); 2170} 2171 2172void SelectionDAGLowering::visitVAEnd(CallInst &I) { 2173 DAG.setRoot(DAG.getNode(ISD::VAEND, MVT::Other, getRoot(), 2174 getValue(I.getOperand(1)), 2175 DAG.getSrcValue(I.getOperand(1)))); 2176} 2177 2178void SelectionDAGLowering::visitVACopy(CallInst &I) { 2179 DAG.setRoot(DAG.getNode(ISD::VACOPY, MVT::Other, getRoot(), 2180 getValue(I.getOperand(1)), 2181 getValue(I.getOperand(2)), 2182 DAG.getSrcValue(I.getOperand(1)), 2183 DAG.getSrcValue(I.getOperand(2)))); 2184} 2185 2186// It is always conservatively correct for llvm.returnaddress and 2187// llvm.frameaddress to return 0. 2188std::pair<SDOperand, SDOperand> 2189TargetLowering::LowerFrameReturnAddress(bool isFrameAddr, SDOperand Chain, 2190 unsigned Depth, SelectionDAG &DAG) { 2191 return std::make_pair(DAG.getConstant(0, getPointerTy()), Chain); 2192} 2193 2194SDOperand TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { 2195 assert(0 && "LowerOperation not implemented for this target!"); 2196 abort(); 2197 return SDOperand(); 2198} 2199 2200SDOperand TargetLowering::CustomPromoteOperation(SDOperand Op, 2201 SelectionDAG &DAG) { 2202 assert(0 && "CustomPromoteOperation not implemented for this target!"); 2203 abort(); 2204 return SDOperand(); 2205} 2206 2207void SelectionDAGLowering::visitFrameReturnAddress(CallInst &I, bool isFrame) { 2208 unsigned Depth = (unsigned)cast<ConstantUInt>(I.getOperand(1))->getValue(); 2209 std::pair<SDOperand,SDOperand> Result = 2210 TLI.LowerFrameReturnAddress(isFrame, getRoot(), Depth, DAG); 2211 setValue(&I, Result.first); 2212 DAG.setRoot(Result.second); 2213} 2214 2215/// getMemsetValue - Vectorized representation of the memset value 2216/// operand. 2217static SDOperand getMemsetValue(SDOperand Value, MVT::ValueType VT, 2218 SelectionDAG &DAG) { 2219 MVT::ValueType CurVT = VT; 2220 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) { 2221 uint64_t Val = C->getValue() & 255; 2222 unsigned Shift = 8; 2223 while (CurVT != MVT::i8) { 2224 Val = (Val << Shift) | Val; 2225 Shift <<= 1; 2226 CurVT = (MVT::ValueType)((unsigned)CurVT - 1); 2227 } 2228 return DAG.getConstant(Val, VT); 2229 } else { 2230 Value = DAG.getNode(ISD::ZERO_EXTEND, VT, Value); 2231 unsigned Shift = 8; 2232 while (CurVT != MVT::i8) { 2233 Value = 2234 DAG.getNode(ISD::OR, VT, 2235 DAG.getNode(ISD::SHL, VT, Value, 2236 DAG.getConstant(Shift, MVT::i8)), Value); 2237 Shift <<= 1; 2238 CurVT = (MVT::ValueType)((unsigned)CurVT - 1); 2239 } 2240 2241 return Value; 2242 } 2243} 2244 2245/// getMemsetStringVal - Similar to getMemsetValue. Except this is only 2246/// used when a memcpy is turned into a memset when the source is a constant 2247/// string ptr. 2248static SDOperand getMemsetStringVal(MVT::ValueType VT, 2249 SelectionDAG &DAG, TargetLowering &TLI, 2250 std::string &Str, unsigned Offset) { 2251 MVT::ValueType CurVT = VT; 2252 uint64_t Val = 0; 2253 unsigned MSB = getSizeInBits(VT) / 8; 2254 if (TLI.isLittleEndian()) 2255 Offset = Offset + MSB - 1; 2256 for (unsigned i = 0; i != MSB; ++i) { 2257 Val = (Val << 8) | Str[Offset]; 2258 Offset += TLI.isLittleEndian() ? -1 : 1; 2259 } 2260 return DAG.getConstant(Val, VT); 2261} 2262 2263/// getMemBasePlusOffset - Returns base and offset node for the 2264static SDOperand getMemBasePlusOffset(SDOperand Base, unsigned Offset, 2265 SelectionDAG &DAG, TargetLowering &TLI) { 2266 MVT::ValueType VT = Base.getValueType(); 2267 return DAG.getNode(ISD::ADD, VT, Base, DAG.getConstant(Offset, VT)); 2268} 2269 2270/// MeetsMaxMemopRequirement - Determines if the number of memory ops required 2271/// to replace the memset / memcpy is below the threshold. It also returns the 2272/// types of the sequence of memory ops to perform memset / memcpy. 2273static bool MeetsMaxMemopRequirement(std::vector<MVT::ValueType> &MemOps, 2274 unsigned Limit, uint64_t Size, 2275 unsigned Align, TargetLowering &TLI) { 2276 MVT::ValueType VT; 2277 2278 if (TLI.allowsUnalignedMemoryAccesses()) { 2279 VT = MVT::i64; 2280 } else { 2281 switch (Align & 7) { 2282 case 0: 2283 VT = MVT::i64; 2284 break; 2285 case 4: 2286 VT = MVT::i32; 2287 break; 2288 case 2: 2289 VT = MVT::i16; 2290 break; 2291 default: 2292 VT = MVT::i8; 2293 break; 2294 } 2295 } 2296 2297 MVT::ValueType LVT = MVT::i64; 2298 while (!TLI.isTypeLegal(LVT)) 2299 LVT = (MVT::ValueType)((unsigned)LVT - 1); 2300 assert(MVT::isInteger(LVT)); 2301 2302 if (VT > LVT) 2303 VT = LVT; 2304 2305 unsigned NumMemOps = 0; 2306 while (Size != 0) { 2307 unsigned VTSize = getSizeInBits(VT) / 8; 2308 while (VTSize > Size) { 2309 VT = (MVT::ValueType)((unsigned)VT - 1); 2310 VTSize >>= 1; 2311 } 2312 assert(MVT::isInteger(VT)); 2313 2314 if (++NumMemOps > Limit) 2315 return false; 2316 MemOps.push_back(VT); 2317 Size -= VTSize; 2318 } 2319 2320 return true; 2321} 2322 2323void SelectionDAGLowering::visitMemIntrinsic(CallInst &I, unsigned Op) { 2324 SDOperand Op1 = getValue(I.getOperand(1)); 2325 SDOperand Op2 = getValue(I.getOperand(2)); 2326 SDOperand Op3 = getValue(I.getOperand(3)); 2327 SDOperand Op4 = getValue(I.getOperand(4)); 2328 unsigned Align = (unsigned)cast<ConstantSDNode>(Op4)->getValue(); 2329 if (Align == 0) Align = 1; 2330 2331 if (ConstantSDNode *Size = dyn_cast<ConstantSDNode>(Op3)) { 2332 std::vector<MVT::ValueType> MemOps; 2333 2334 // Expand memset / memcpy to a series of load / store ops 2335 // if the size operand falls below a certain threshold. 2336 std::vector<SDOperand> OutChains; 2337 switch (Op) { 2338 default: break; // Do nothing for now. 2339 case ISD::MEMSET: { 2340 if (MeetsMaxMemopRequirement(MemOps, TLI.getMaxStoresPerMemset(), 2341 Size->getValue(), Align, TLI)) { 2342 unsigned NumMemOps = MemOps.size(); 2343 unsigned Offset = 0; 2344 for (unsigned i = 0; i < NumMemOps; i++) { 2345 MVT::ValueType VT = MemOps[i]; 2346 unsigned VTSize = getSizeInBits(VT) / 8; 2347 SDOperand Value = getMemsetValue(Op2, VT, DAG); 2348 SDOperand Store = DAG.getNode(ISD::STORE, MVT::Other, getRoot(), 2349 Value, 2350 getMemBasePlusOffset(Op1, Offset, DAG, TLI), 2351 DAG.getSrcValue(I.getOperand(1), Offset)); 2352 OutChains.push_back(Store); 2353 Offset += VTSize; 2354 } 2355 } 2356 break; 2357 } 2358 case ISD::MEMCPY: { 2359 if (MeetsMaxMemopRequirement(MemOps, TLI.getMaxStoresPerMemcpy(), 2360 Size->getValue(), Align, TLI)) { 2361 unsigned NumMemOps = MemOps.size(); 2362 unsigned SrcOff = 0, DstOff = 0, SrcDelta = 0; 2363 GlobalAddressSDNode *G = NULL; 2364 std::string Str; 2365 bool CopyFromStr = false; 2366 2367 if (Op2.getOpcode() == ISD::GlobalAddress) 2368 G = cast<GlobalAddressSDNode>(Op2); 2369 else if (Op2.getOpcode() == ISD::ADD && 2370 Op2.getOperand(0).getOpcode() == ISD::GlobalAddress && 2371 Op2.getOperand(1).getOpcode() == ISD::Constant) { 2372 G = cast<GlobalAddressSDNode>(Op2.getOperand(0)); 2373 SrcDelta = cast<ConstantSDNode>(Op2.getOperand(1))->getValue(); 2374 } 2375 if (G) { 2376 GlobalVariable *GV = dyn_cast<GlobalVariable>(G->getGlobal()); 2377 if (GV) { 2378 Str = GV->getStringValue(false); 2379 if (!Str.empty()) { 2380 CopyFromStr = true; 2381 SrcOff += SrcDelta; 2382 } 2383 } 2384 } 2385 2386 for (unsigned i = 0; i < NumMemOps; i++) { 2387 MVT::ValueType VT = MemOps[i]; 2388 unsigned VTSize = getSizeInBits(VT) / 8; 2389 SDOperand Value, Chain, Store; 2390 2391 if (CopyFromStr) { 2392 Value = getMemsetStringVal(VT, DAG, TLI, Str, SrcOff); 2393 Chain = getRoot(); 2394 Store = 2395 DAG.getNode(ISD::STORE, MVT::Other, Chain, Value, 2396 getMemBasePlusOffset(Op1, DstOff, DAG, TLI), 2397 DAG.getSrcValue(I.getOperand(1), DstOff)); 2398 } else { 2399 Value = DAG.getLoad(VT, getRoot(), 2400 getMemBasePlusOffset(Op2, SrcOff, DAG, TLI), 2401 DAG.getSrcValue(I.getOperand(2), SrcOff)); 2402 Chain = Value.getValue(1); 2403 Store = 2404 DAG.getNode(ISD::STORE, MVT::Other, Chain, Value, 2405 getMemBasePlusOffset(Op1, DstOff, DAG, TLI), 2406 DAG.getSrcValue(I.getOperand(1), DstOff)); 2407 } 2408 OutChains.push_back(Store); 2409 SrcOff += VTSize; 2410 DstOff += VTSize; 2411 } 2412 } 2413 break; 2414 } 2415 } 2416 2417 if (!OutChains.empty()) { 2418 DAG.setRoot(DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains)); 2419 return; 2420 } 2421 } 2422 2423 std::vector<SDOperand> Ops; 2424 Ops.push_back(getRoot()); 2425 Ops.push_back(Op1); 2426 Ops.push_back(Op2); 2427 Ops.push_back(Op3); 2428 Ops.push_back(Op4); 2429 DAG.setRoot(DAG.getNode(Op, MVT::Other, Ops)); 2430} 2431 2432//===----------------------------------------------------------------------===// 2433// SelectionDAGISel code 2434//===----------------------------------------------------------------------===// 2435 2436unsigned SelectionDAGISel::MakeReg(MVT::ValueType VT) { 2437 return RegMap->createVirtualRegister(TLI.getRegClassFor(VT)); 2438} 2439 2440void SelectionDAGISel::getAnalysisUsage(AnalysisUsage &AU) const { 2441 // FIXME: we only modify the CFG to split critical edges. This 2442 // updates dom and loop info. 2443} 2444 2445 2446/// InsertGEPComputeCode - Insert code into BB to compute Ptr+PtrOffset, 2447/// casting to the type of GEPI. 2448static Value *InsertGEPComputeCode(Value *&V, BasicBlock *BB, Instruction *GEPI, 2449 Value *Ptr, Value *PtrOffset) { 2450 if (V) return V; // Already computed. 2451 2452 BasicBlock::iterator InsertPt; 2453 if (BB == GEPI->getParent()) { 2454 // If insert into the GEP's block, insert right after the GEP. 2455 InsertPt = GEPI; 2456 ++InsertPt; 2457 } else { 2458 // Otherwise, insert at the top of BB, after any PHI nodes 2459 InsertPt = BB->begin(); 2460 while (isa<PHINode>(InsertPt)) ++InsertPt; 2461 } 2462 2463 // If Ptr is itself a cast, but in some other BB, emit a copy of the cast into 2464 // BB so that there is only one value live across basic blocks (the cast 2465 // operand). 2466 if (CastInst *CI = dyn_cast<CastInst>(Ptr)) 2467 if (CI->getParent() != BB && isa<PointerType>(CI->getOperand(0)->getType())) 2468 Ptr = new CastInst(CI->getOperand(0), CI->getType(), "", InsertPt); 2469 2470 // Add the offset, cast it to the right type. 2471 Ptr = BinaryOperator::createAdd(Ptr, PtrOffset, "", InsertPt); 2472 Ptr = new CastInst(Ptr, GEPI->getType(), "", InsertPt); 2473 return V = Ptr; 2474} 2475 2476 2477/// OptimizeGEPExpression - Since we are doing basic-block-at-a-time instruction 2478/// selection, we want to be a bit careful about some things. In particular, if 2479/// we have a GEP instruction that is used in a different block than it is 2480/// defined, the addressing expression of the GEP cannot be folded into loads or 2481/// stores that use it. In this case, decompose the GEP and move constant 2482/// indices into blocks that use it. 2483static void OptimizeGEPExpression(GetElementPtrInst *GEPI, 2484 const TargetData &TD) { 2485 // If this GEP is only used inside the block it is defined in, there is no 2486 // need to rewrite it. 2487 bool isUsedOutsideDefBB = false; 2488 BasicBlock *DefBB = GEPI->getParent(); 2489 for (Value::use_iterator UI = GEPI->use_begin(), E = GEPI->use_end(); 2490 UI != E; ++UI) { 2491 if (cast<Instruction>(*UI)->getParent() != DefBB) { 2492 isUsedOutsideDefBB = true; 2493 break; 2494 } 2495 } 2496 if (!isUsedOutsideDefBB) return; 2497 2498 // If this GEP has no non-zero constant indices, there is nothing we can do, 2499 // ignore it. 2500 bool hasConstantIndex = false; 2501 for (GetElementPtrInst::op_iterator OI = GEPI->op_begin()+1, 2502 E = GEPI->op_end(); OI != E; ++OI) { 2503 if (ConstantInt *CI = dyn_cast<ConstantInt>(*OI)) 2504 if (CI->getRawValue()) { 2505 hasConstantIndex = true; 2506 break; 2507 } 2508 } 2509 // If this is a GEP &Alloca, 0, 0, forward subst the frame index into uses. 2510 if (!hasConstantIndex && !isa<AllocaInst>(GEPI->getOperand(0))) return; 2511 2512 // Otherwise, decompose the GEP instruction into multiplies and adds. Sum the 2513 // constant offset (which we now know is non-zero) and deal with it later. 2514 uint64_t ConstantOffset = 0; 2515 const Type *UIntPtrTy = TD.getIntPtrType(); 2516 Value *Ptr = new CastInst(GEPI->getOperand(0), UIntPtrTy, "", GEPI); 2517 const Type *Ty = GEPI->getOperand(0)->getType(); 2518 2519 for (GetElementPtrInst::op_iterator OI = GEPI->op_begin()+1, 2520 E = GEPI->op_end(); OI != E; ++OI) { 2521 Value *Idx = *OI; 2522 if (const StructType *StTy = dyn_cast<StructType>(Ty)) { 2523 unsigned Field = cast<ConstantUInt>(Idx)->getValue(); 2524 if (Field) 2525 ConstantOffset += TD.getStructLayout(StTy)->MemberOffsets[Field]; 2526 Ty = StTy->getElementType(Field); 2527 } else { 2528 Ty = cast<SequentialType>(Ty)->getElementType(); 2529 2530 // Handle constant subscripts. 2531 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) { 2532 if (CI->getRawValue() == 0) continue; 2533 2534 if (ConstantSInt *CSI = dyn_cast<ConstantSInt>(CI)) 2535 ConstantOffset += (int64_t)TD.getTypeSize(Ty)*CSI->getValue(); 2536 else 2537 ConstantOffset+=TD.getTypeSize(Ty)*cast<ConstantUInt>(CI)->getValue(); 2538 continue; 2539 } 2540 2541 // Ptr = Ptr + Idx * ElementSize; 2542 2543 // Cast Idx to UIntPtrTy if needed. 2544 Idx = new CastInst(Idx, UIntPtrTy, "", GEPI); 2545 2546 uint64_t ElementSize = TD.getTypeSize(Ty); 2547 // Mask off bits that should not be set. 2548 ElementSize &= ~0ULL >> (64-UIntPtrTy->getPrimitiveSizeInBits()); 2549 Constant *SizeCst = ConstantUInt::get(UIntPtrTy, ElementSize); 2550 2551 // Multiply by the element size and add to the base. 2552 Idx = BinaryOperator::createMul(Idx, SizeCst, "", GEPI); 2553 Ptr = BinaryOperator::createAdd(Ptr, Idx, "", GEPI); 2554 } 2555 } 2556 2557 // Make sure that the offset fits in uintptr_t. 2558 ConstantOffset &= ~0ULL >> (64-UIntPtrTy->getPrimitiveSizeInBits()); 2559 Constant *PtrOffset = ConstantUInt::get(UIntPtrTy, ConstantOffset); 2560 2561 // Okay, we have now emitted all of the variable index parts to the BB that 2562 // the GEP is defined in. Loop over all of the using instructions, inserting 2563 // an "add Ptr, ConstantOffset" into each block that uses it and update the 2564 // instruction to use the newly computed value, making GEPI dead. When the 2565 // user is a load or store instruction address, we emit the add into the user 2566 // block, otherwise we use a canonical version right next to the gep (these 2567 // won't be foldable as addresses, so we might as well share the computation). 2568 2569 std::map<BasicBlock*,Value*> InsertedExprs; 2570 while (!GEPI->use_empty()) { 2571 Instruction *User = cast<Instruction>(GEPI->use_back()); 2572 2573 // If this use is not foldable into the addressing mode, use a version 2574 // emitted in the GEP block. 2575 Value *NewVal; 2576 if (!isa<LoadInst>(User) && 2577 (!isa<StoreInst>(User) || User->getOperand(0) == GEPI)) { 2578 NewVal = InsertGEPComputeCode(InsertedExprs[DefBB], DefBB, GEPI, 2579 Ptr, PtrOffset); 2580 } else { 2581 // Otherwise, insert the code in the User's block so it can be folded into 2582 // any users in that block. 2583 NewVal = InsertGEPComputeCode(InsertedExprs[User->getParent()], 2584 User->getParent(), GEPI, 2585 Ptr, PtrOffset); 2586 } 2587 User->replaceUsesOfWith(GEPI, NewVal); 2588 } 2589 2590 // Finally, the GEP is dead, remove it. 2591 GEPI->eraseFromParent(); 2592} 2593 2594bool SelectionDAGISel::runOnFunction(Function &Fn) { 2595 MachineFunction &MF = MachineFunction::construct(&Fn, TLI.getTargetMachine()); 2596 RegMap = MF.getSSARegMap(); 2597 DEBUG(std::cerr << "\n\n\n=== " << Fn.getName() << "\n"); 2598 2599 // First, split all critical edges for PHI nodes with incoming values that are 2600 // constants, this way the load of the constant into a vreg will not be placed 2601 // into MBBs that are used some other way. 2602 // 2603 // In this pass we also look for GEP instructions that are used across basic 2604 // blocks and rewrites them to improve basic-block-at-a-time selection. 2605 // 2606 for (Function::iterator BB = Fn.begin(), E = Fn.end(); BB != E; ++BB) { 2607 PHINode *PN; 2608 BasicBlock::iterator BBI; 2609 for (BBI = BB->begin(); (PN = dyn_cast<PHINode>(BBI)); ++BBI) 2610 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 2611 if (isa<Constant>(PN->getIncomingValue(i))) 2612 SplitCriticalEdge(PN->getIncomingBlock(i), BB); 2613 2614 for (BasicBlock::iterator E = BB->end(); BBI != E; ) 2615 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(BBI++)) 2616 OptimizeGEPExpression(GEPI, TLI.getTargetData()); 2617 } 2618 2619 FunctionLoweringInfo FuncInfo(TLI, Fn, MF); 2620 2621 for (Function::iterator I = Fn.begin(), E = Fn.end(); I != E; ++I) 2622 SelectBasicBlock(I, MF, FuncInfo); 2623 2624 return true; 2625} 2626 2627 2628SDOperand SelectionDAGISel:: 2629CopyValueToVirtualRegister(SelectionDAGLowering &SDL, Value *V, unsigned Reg) { 2630 SDOperand Op = SDL.getValue(V); 2631 assert((Op.getOpcode() != ISD::CopyFromReg || 2632 cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) && 2633 "Copy from a reg to the same reg!"); 2634 2635 // If this type is not legal, we must make sure to not create an invalid 2636 // register use. 2637 MVT::ValueType SrcVT = Op.getValueType(); 2638 MVT::ValueType DestVT = TLI.getTypeToTransformTo(SrcVT); 2639 SelectionDAG &DAG = SDL.DAG; 2640 if (SrcVT == DestVT) { 2641 return DAG.getCopyToReg(SDL.getRoot(), Reg, Op); 2642 } else if (SrcVT == MVT::Vector) { 2643 // Handle copies from generic vectors to registers. 2644 MVT::ValueType PTyElementVT, PTyLegalElementVT; 2645 unsigned NE = TLI.getPackedTypeBreakdown(cast<PackedType>(V->getType()), 2646 PTyElementVT, PTyLegalElementVT); 2647 2648 // Insert a VBIT_CONVERT of the input vector to a "N x PTyElementVT" 2649 // MVT::Vector type. 2650 Op = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, Op, 2651 DAG.getConstant(NE, MVT::i32), 2652 DAG.getValueType(PTyElementVT)); 2653 2654 // Loop over all of the elements of the resultant vector, 2655 // VEXTRACT_VECTOR_ELT'ing them, converting them to PTyLegalElementVT, then 2656 // copying them into output registers. 2657 std::vector<SDOperand> OutChains; 2658 SDOperand Root = SDL.getRoot(); 2659 for (unsigned i = 0; i != NE; ++i) { 2660 SDOperand Elt = DAG.getNode(ISD::VEXTRACT_VECTOR_ELT, PTyElementVT, 2661 Op, DAG.getConstant(i, MVT::i32)); 2662 if (PTyElementVT == PTyLegalElementVT) { 2663 // Elements are legal. 2664 OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Elt)); 2665 } else if (PTyLegalElementVT > PTyElementVT) { 2666 // Elements are promoted. 2667 if (MVT::isFloatingPoint(PTyLegalElementVT)) 2668 Elt = DAG.getNode(ISD::FP_EXTEND, PTyLegalElementVT, Elt); 2669 else 2670 Elt = DAG.getNode(ISD::ANY_EXTEND, PTyLegalElementVT, Elt); 2671 OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Elt)); 2672 } else { 2673 // Elements are expanded. 2674 // The src value is expanded into multiple registers. 2675 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, PTyLegalElementVT, 2676 Elt, DAG.getConstant(0, MVT::i32)); 2677 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, PTyLegalElementVT, 2678 Elt, DAG.getConstant(1, MVT::i32)); 2679 OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Lo)); 2680 OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Hi)); 2681 } 2682 } 2683 return DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains); 2684 } else if (SrcVT < DestVT) { 2685 // The src value is promoted to the register. 2686 if (MVT::isFloatingPoint(SrcVT)) 2687 Op = DAG.getNode(ISD::FP_EXTEND, DestVT, Op); 2688 else 2689 Op = DAG.getNode(ISD::ANY_EXTEND, DestVT, Op); 2690 return DAG.getCopyToReg(SDL.getRoot(), Reg, Op); 2691 } else { 2692 // The src value is expanded into multiple registers. 2693 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DestVT, 2694 Op, DAG.getConstant(0, MVT::i32)); 2695 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DestVT, 2696 Op, DAG.getConstant(1, MVT::i32)); 2697 Op = DAG.getCopyToReg(SDL.getRoot(), Reg, Lo); 2698 return DAG.getCopyToReg(Op, Reg+1, Hi); 2699 } 2700} 2701 2702void SelectionDAGISel:: 2703LowerArguments(BasicBlock *BB, SelectionDAGLowering &SDL, 2704 std::vector<SDOperand> &UnorderedChains) { 2705 // If this is the entry block, emit arguments. 2706 Function &F = *BB->getParent(); 2707 FunctionLoweringInfo &FuncInfo = SDL.FuncInfo; 2708 SDOperand OldRoot = SDL.DAG.getRoot(); 2709 std::vector<SDOperand> Args = TLI.LowerArguments(F, SDL.DAG); 2710 2711 unsigned a = 0; 2712 for (Function::arg_iterator AI = F.arg_begin(), E = F.arg_end(); 2713 AI != E; ++AI, ++a) 2714 if (!AI->use_empty()) { 2715 SDL.setValue(AI, Args[a]); 2716 2717 // If this argument is live outside of the entry block, insert a copy from 2718 // whereever we got it to the vreg that other BB's will reference it as. 2719 if (FuncInfo.ValueMap.count(AI)) { 2720 SDOperand Copy = 2721 CopyValueToVirtualRegister(SDL, AI, FuncInfo.ValueMap[AI]); 2722 UnorderedChains.push_back(Copy); 2723 } 2724 } 2725 2726 // Next, if the function has live ins that need to be copied into vregs, 2727 // emit the copies now, into the top of the block. 2728 MachineFunction &MF = SDL.DAG.getMachineFunction(); 2729 if (MF.livein_begin() != MF.livein_end()) { 2730 SSARegMap *RegMap = MF.getSSARegMap(); 2731 const MRegisterInfo &MRI = *MF.getTarget().getRegisterInfo(); 2732 for (MachineFunction::livein_iterator LI = MF.livein_begin(), 2733 E = MF.livein_end(); LI != E; ++LI) 2734 if (LI->second) 2735 MRI.copyRegToReg(*MF.begin(), MF.begin()->end(), LI->second, 2736 LI->first, RegMap->getRegClass(LI->second)); 2737 } 2738 2739 // Finally, if the target has anything special to do, allow it to do so. 2740 EmitFunctionEntryCode(F, SDL.DAG.getMachineFunction()); 2741} 2742 2743 2744void SelectionDAGISel::BuildSelectionDAG(SelectionDAG &DAG, BasicBlock *LLVMBB, 2745 std::vector<std::pair<MachineInstr*, unsigned> > &PHINodesToUpdate, 2746 FunctionLoweringInfo &FuncInfo) { 2747 SelectionDAGLowering SDL(DAG, TLI, FuncInfo); 2748 2749 std::vector<SDOperand> UnorderedChains; 2750 2751 // Lower any arguments needed in this block if this is the entry block. 2752 if (LLVMBB == &LLVMBB->getParent()->front()) 2753 LowerArguments(LLVMBB, SDL, UnorderedChains); 2754 2755 BB = FuncInfo.MBBMap[LLVMBB]; 2756 SDL.setCurrentBasicBlock(BB); 2757 2758 // Lower all of the non-terminator instructions. 2759 for (BasicBlock::iterator I = LLVMBB->begin(), E = --LLVMBB->end(); 2760 I != E; ++I) 2761 SDL.visit(*I); 2762 2763 // Ensure that all instructions which are used outside of their defining 2764 // blocks are available as virtual registers. 2765 for (BasicBlock::iterator I = LLVMBB->begin(), E = LLVMBB->end(); I != E;++I) 2766 if (!I->use_empty() && !isa<PHINode>(I)) { 2767 std::map<const Value*, unsigned>::iterator VMI =FuncInfo.ValueMap.find(I); 2768 if (VMI != FuncInfo.ValueMap.end()) 2769 UnorderedChains.push_back( 2770 CopyValueToVirtualRegister(SDL, I, VMI->second)); 2771 } 2772 2773 // Handle PHI nodes in successor blocks. Emit code into the SelectionDAG to 2774 // ensure constants are generated when needed. Remember the virtual registers 2775 // that need to be added to the Machine PHI nodes as input. We cannot just 2776 // directly add them, because expansion might result in multiple MBB's for one 2777 // BB. As such, the start of the BB might correspond to a different MBB than 2778 // the end. 2779 // 2780 2781 // Emit constants only once even if used by multiple PHI nodes. 2782 std::map<Constant*, unsigned> ConstantsOut; 2783 2784 // Check successor nodes PHI nodes that expect a constant to be available from 2785 // this block. 2786 TerminatorInst *TI = LLVMBB->getTerminator(); 2787 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) { 2788 BasicBlock *SuccBB = TI->getSuccessor(succ); 2789 MachineBasicBlock::iterator MBBI = FuncInfo.MBBMap[SuccBB]->begin(); 2790 PHINode *PN; 2791 2792 // At this point we know that there is a 1-1 correspondence between LLVM PHI 2793 // nodes and Machine PHI nodes, but the incoming operands have not been 2794 // emitted yet. 2795 for (BasicBlock::iterator I = SuccBB->begin(); 2796 (PN = dyn_cast<PHINode>(I)); ++I) 2797 if (!PN->use_empty()) { 2798 unsigned Reg; 2799 Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB); 2800 if (Constant *C = dyn_cast<Constant>(PHIOp)) { 2801 unsigned &RegOut = ConstantsOut[C]; 2802 if (RegOut == 0) { 2803 RegOut = FuncInfo.CreateRegForValue(C); 2804 UnorderedChains.push_back( 2805 CopyValueToVirtualRegister(SDL, C, RegOut)); 2806 } 2807 Reg = RegOut; 2808 } else { 2809 Reg = FuncInfo.ValueMap[PHIOp]; 2810 if (Reg == 0) { 2811 assert(isa<AllocaInst>(PHIOp) && 2812 FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) && 2813 "Didn't codegen value into a register!??"); 2814 Reg = FuncInfo.CreateRegForValue(PHIOp); 2815 UnorderedChains.push_back( 2816 CopyValueToVirtualRegister(SDL, PHIOp, Reg)); 2817 } 2818 } 2819 2820 // Remember that this register needs to added to the machine PHI node as 2821 // the input for this MBB. 2822 MVT::ValueType VT = TLI.getValueType(PN->getType()); 2823 unsigned NumElements; 2824 if (VT != MVT::Vector) 2825 NumElements = TLI.getNumElements(VT); 2826 else { 2827 MVT::ValueType VT1,VT2; 2828 NumElements = 2829 TLI.getPackedTypeBreakdown(cast<PackedType>(PN->getType()), 2830 VT1, VT2); 2831 } 2832 for (unsigned i = 0, e = NumElements; i != e; ++i) 2833 PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg+i)); 2834 } 2835 } 2836 ConstantsOut.clear(); 2837 2838 // Turn all of the unordered chains into one factored node. 2839 if (!UnorderedChains.empty()) { 2840 SDOperand Root = SDL.getRoot(); 2841 if (Root.getOpcode() != ISD::EntryToken) { 2842 unsigned i = 0, e = UnorderedChains.size(); 2843 for (; i != e; ++i) { 2844 assert(UnorderedChains[i].Val->getNumOperands() > 1); 2845 if (UnorderedChains[i].Val->getOperand(0) == Root) 2846 break; // Don't add the root if we already indirectly depend on it. 2847 } 2848 2849 if (i == e) 2850 UnorderedChains.push_back(Root); 2851 } 2852 DAG.setRoot(DAG.getNode(ISD::TokenFactor, MVT::Other, UnorderedChains)); 2853 } 2854 2855 // Lower the terminator after the copies are emitted. 2856 SDL.visit(*LLVMBB->getTerminator()); 2857 2858 // Copy over any CaseBlock records that may now exist due to SwitchInst 2859 // lowering. 2860 SwitchCases.clear(); 2861 SwitchCases = SDL.SwitchCases; 2862 2863 // Make sure the root of the DAG is up-to-date. 2864 DAG.setRoot(SDL.getRoot()); 2865} 2866 2867void SelectionDAGISel::CodeGenAndEmitDAG(SelectionDAG &DAG) { 2868 // Run the DAG combiner in pre-legalize mode. 2869 DAG.Combine(false); 2870 2871 DEBUG(std::cerr << "Lowered selection DAG:\n"); 2872 DEBUG(DAG.dump()); 2873 2874 // Second step, hack on the DAG until it only uses operations and types that 2875 // the target supports. 2876 DAG.Legalize(); 2877 2878 DEBUG(std::cerr << "Legalized selection DAG:\n"); 2879 DEBUG(DAG.dump()); 2880 2881 // Run the DAG combiner in post-legalize mode. 2882 DAG.Combine(true); 2883 2884 if (ViewISelDAGs) DAG.viewGraph(); 2885 2886 // Third, instruction select all of the operations to machine code, adding the 2887 // code to the MachineBasicBlock. 2888 InstructionSelectBasicBlock(DAG); 2889 2890 DEBUG(std::cerr << "Selected machine code:\n"); 2891 DEBUG(BB->dump()); 2892} 2893 2894void SelectionDAGISel::SelectBasicBlock(BasicBlock *LLVMBB, MachineFunction &MF, 2895 FunctionLoweringInfo &FuncInfo) { 2896 std::vector<std::pair<MachineInstr*, unsigned> > PHINodesToUpdate; 2897 { 2898 SelectionDAG DAG(TLI, MF, getAnalysisToUpdate<MachineDebugInfo>()); 2899 CurDAG = &DAG; 2900 2901 // First step, lower LLVM code to some DAG. This DAG may use operations and 2902 // types that are not supported by the target. 2903 BuildSelectionDAG(DAG, LLVMBB, PHINodesToUpdate, FuncInfo); 2904 2905 // Second step, emit the lowered DAG as machine code. 2906 CodeGenAndEmitDAG(DAG); 2907 } 2908 2909 // Next, now that we know what the last MBB the LLVM BB expanded is, update 2910 // PHI nodes in successors. 2911 if (SwitchCases.empty()) { 2912 for (unsigned i = 0, e = PHINodesToUpdate.size(); i != e; ++i) { 2913 MachineInstr *PHI = PHINodesToUpdate[i].first; 2914 assert(PHI->getOpcode() == TargetInstrInfo::PHI && 2915 "This is not a machine PHI node that we are updating!"); 2916 PHI->addRegOperand(PHINodesToUpdate[i].second); 2917 PHI->addMachineBasicBlockOperand(BB); 2918 } 2919 return; 2920 } 2921 2922 // If we generated any switch lowering information, build and codegen any 2923 // additional DAGs necessary. 2924 for(unsigned i = 0, e = SwitchCases.size(); i != e; ++i) { 2925 SelectionDAG SDAG(TLI, MF, getAnalysisToUpdate<MachineDebugInfo>()); 2926 CurDAG = &SDAG; 2927 SelectionDAGLowering SDL(SDAG, TLI, FuncInfo); 2928 // Set the current basic block to the mbb we wish to insert the code into 2929 BB = SwitchCases[i].ThisBB; 2930 SDL.setCurrentBasicBlock(BB); 2931 // Emit the code 2932 SDL.visitSwitchCase(SwitchCases[i]); 2933 SDAG.setRoot(SDL.getRoot()); 2934 CodeGenAndEmitDAG(SDAG); 2935 // Iterate over the phi nodes, if there is a phi node in a successor of this 2936 // block (for instance, the default block), then add a pair of operands to 2937 // the phi node for this block, as if we were coming from the original 2938 // BB before switch expansion. 2939 for (unsigned pi = 0, pe = PHINodesToUpdate.size(); pi != pe; ++pi) { 2940 MachineInstr *PHI = PHINodesToUpdate[pi].first; 2941 MachineBasicBlock *PHIBB = PHI->getParent(); 2942 assert(PHI->getOpcode() == TargetInstrInfo::PHI && 2943 "This is not a machine PHI node that we are updating!"); 2944 if (PHIBB == SwitchCases[i].LHSBB || PHIBB == SwitchCases[i].RHSBB) { 2945 PHI->addRegOperand(PHINodesToUpdate[pi].second); 2946 PHI->addMachineBasicBlockOperand(BB); 2947 } 2948 } 2949 } 2950} 2951 2952//===----------------------------------------------------------------------===// 2953/// ScheduleAndEmitDAG - Pick a safe ordering and emit instructions for each 2954/// target node in the graph. 2955void SelectionDAGISel::ScheduleAndEmitDAG(SelectionDAG &DAG) { 2956 if (ViewSchedDAGs) DAG.viewGraph(); 2957 ScheduleDAG *SL = NULL; 2958 2959 switch (ISHeuristic) { 2960 default: assert(0 && "Unrecognized scheduling heuristic"); 2961 case defaultScheduling: 2962 if (TLI.getSchedulingPreference() == TargetLowering::SchedulingForLatency) 2963 SL = createSimpleDAGScheduler(noScheduling, DAG, BB); 2964 else /* TargetLowering::SchedulingForRegPressure */ 2965 SL = createBURRListDAGScheduler(DAG, BB); 2966 break; 2967 case noScheduling: 2968 SL = createBFS_DAGScheduler(DAG, BB); 2969 break; 2970 case simpleScheduling: 2971 SL = createSimpleDAGScheduler(false, DAG, BB); 2972 break; 2973 case simpleNoItinScheduling: 2974 SL = createSimpleDAGScheduler(true, DAG, BB); 2975 break; 2976 case listSchedulingBURR: 2977 SL = createBURRListDAGScheduler(DAG, BB); 2978 break; 2979 case listSchedulingTD: 2980 SL = createTDListDAGScheduler(DAG, BB, CreateTargetHazardRecognizer()); 2981 break; 2982 } 2983 BB = SL->Run(); 2984 delete SL; 2985} 2986 2987HazardRecognizer *SelectionDAGISel::CreateTargetHazardRecognizer() { 2988 return new HazardRecognizer(); 2989} 2990 2991/// SelectInlineAsmMemoryOperands - Calls to this are automatically generated 2992/// by tblgen. Others should not call it. 2993void SelectionDAGISel:: 2994SelectInlineAsmMemoryOperands(std::vector<SDOperand> &Ops, SelectionDAG &DAG) { 2995 std::vector<SDOperand> InOps; 2996 std::swap(InOps, Ops); 2997 2998 Ops.push_back(InOps[0]); // input chain. 2999 Ops.push_back(InOps[1]); // input asm string. 3000 3001 const char *AsmStr = cast<ExternalSymbolSDNode>(InOps[1])->getSymbol(); 3002 unsigned i = 2, e = InOps.size(); 3003 if (InOps[e-1].getValueType() == MVT::Flag) 3004 --e; // Don't process a flag operand if it is here. 3005 3006 while (i != e) { 3007 unsigned Flags = cast<ConstantSDNode>(InOps[i])->getValue(); 3008 if ((Flags & 7) != 4 /*MEM*/) { 3009 // Just skip over this operand, copying the operands verbatim. 3010 Ops.insert(Ops.end(), InOps.begin()+i, InOps.begin()+i+(Flags >> 3) + 1); 3011 i += (Flags >> 3) + 1; 3012 } else { 3013 assert((Flags >> 3) == 1 && "Memory operand with multiple values?"); 3014 // Otherwise, this is a memory operand. Ask the target to select it. 3015 std::vector<SDOperand> SelOps; 3016 if (SelectInlineAsmMemoryOperand(InOps[i+1], 'm', SelOps, DAG)) { 3017 std::cerr << "Could not match memory address. Inline asm failure!\n"; 3018 exit(1); 3019 } 3020 3021 // Add this to the output node. 3022 Ops.push_back(DAG.getConstant(4/*MEM*/ | (SelOps.size() << 3), MVT::i32)); 3023 Ops.insert(Ops.end(), SelOps.begin(), SelOps.end()); 3024 i += 2; 3025 } 3026 } 3027 3028 // Add the flag input back if present. 3029 if (e != InOps.size()) 3030 Ops.push_back(InOps.back()); 3031} 3032