ARMFastISel.cpp revision a4633f5d7458f4d04e4bf89be48d3b14e1fae044
1//===-- ARMFastISel.cpp - ARM FastISel implementation ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the ARM-specific support for the FastISel class. Some 11// of the target-specific code is generated by tablegen in the file 12// ARMGenFastISel.inc, which is #included here. 13// 14//===----------------------------------------------------------------------===// 15 16#include "ARM.h" 17#include "ARMBaseInstrInfo.h" 18#include "ARMCallingConv.h" 19#include "ARMRegisterInfo.h" 20#include "ARMTargetMachine.h" 21#include "ARMSubtarget.h" 22#include "ARMConstantPoolValue.h" 23#include "llvm/CallingConv.h" 24#include "llvm/DerivedTypes.h" 25#include "llvm/GlobalVariable.h" 26#include "llvm/Instructions.h" 27#include "llvm/IntrinsicInst.h" 28#include "llvm/Module.h" 29#include "llvm/CodeGen/Analysis.h" 30#include "llvm/CodeGen/FastISel.h" 31#include "llvm/CodeGen/FunctionLoweringInfo.h" 32#include "llvm/CodeGen/MachineInstrBuilder.h" 33#include "llvm/CodeGen/MachineModuleInfo.h" 34#include "llvm/CodeGen/MachineConstantPool.h" 35#include "llvm/CodeGen/MachineFrameInfo.h" 36#include "llvm/CodeGen/MachineMemOperand.h" 37#include "llvm/CodeGen/MachineRegisterInfo.h" 38#include "llvm/CodeGen/PseudoSourceValue.h" 39#include "llvm/Support/CallSite.h" 40#include "llvm/Support/CommandLine.h" 41#include "llvm/Support/ErrorHandling.h" 42#include "llvm/Support/GetElementPtrTypeIterator.h" 43#include "llvm/Target/TargetData.h" 44#include "llvm/Target/TargetInstrInfo.h" 45#include "llvm/Target/TargetLowering.h" 46#include "llvm/Target/TargetMachine.h" 47#include "llvm/Target/TargetOptions.h" 48using namespace llvm; 49 50static cl::opt<bool> 51DisableARMFastISel("disable-arm-fast-isel", 52 cl::desc("Turn off experimental ARM fast-isel support"), 53 cl::init(false), cl::Hidden); 54 55namespace { 56 57class ARMFastISel : public FastISel { 58 59 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can 60 /// make the right decision when generating code for different targets. 61 const ARMSubtarget *Subtarget; 62 const TargetMachine &TM; 63 const TargetInstrInfo &TII; 64 const TargetLowering &TLI; 65 ARMFunctionInfo *AFI; 66 67 // Convenience variables to avoid some queries. 68 bool isThumb; 69 LLVMContext *Context; 70 71 public: 72 explicit ARMFastISel(FunctionLoweringInfo &funcInfo) 73 : FastISel(funcInfo), 74 TM(funcInfo.MF->getTarget()), 75 TII(*TM.getInstrInfo()), 76 TLI(*TM.getTargetLowering()) { 77 Subtarget = &TM.getSubtarget<ARMSubtarget>(); 78 AFI = funcInfo.MF->getInfo<ARMFunctionInfo>(); 79 isThumb = AFI->isThumbFunction(); 80 Context = &funcInfo.Fn->getContext(); 81 } 82 83 // Code from FastISel.cpp. 84 virtual unsigned FastEmitInst_(unsigned MachineInstOpcode, 85 const TargetRegisterClass *RC); 86 virtual unsigned FastEmitInst_r(unsigned MachineInstOpcode, 87 const TargetRegisterClass *RC, 88 unsigned Op0, bool Op0IsKill); 89 virtual unsigned FastEmitInst_rr(unsigned MachineInstOpcode, 90 const TargetRegisterClass *RC, 91 unsigned Op0, bool Op0IsKill, 92 unsigned Op1, bool Op1IsKill); 93 virtual unsigned FastEmitInst_ri(unsigned MachineInstOpcode, 94 const TargetRegisterClass *RC, 95 unsigned Op0, bool Op0IsKill, 96 uint64_t Imm); 97 virtual unsigned FastEmitInst_rf(unsigned MachineInstOpcode, 98 const TargetRegisterClass *RC, 99 unsigned Op0, bool Op0IsKill, 100 const ConstantFP *FPImm); 101 virtual unsigned FastEmitInst_i(unsigned MachineInstOpcode, 102 const TargetRegisterClass *RC, 103 uint64_t Imm); 104 virtual unsigned FastEmitInst_rri(unsigned MachineInstOpcode, 105 const TargetRegisterClass *RC, 106 unsigned Op0, bool Op0IsKill, 107 unsigned Op1, bool Op1IsKill, 108 uint64_t Imm); 109 virtual unsigned FastEmitInst_extractsubreg(MVT RetVT, 110 unsigned Op0, bool Op0IsKill, 111 uint32_t Idx); 112 113 // Backend specific FastISel code. 114 virtual bool TargetSelectInstruction(const Instruction *I); 115 virtual unsigned TargetMaterializeConstant(const Constant *C); 116 virtual unsigned TargetMaterializeAlloca(const AllocaInst *AI); 117 118 #include "ARMGenFastISel.inc" 119 120 // Instruction selection routines. 121 private: 122 bool SelectLoad(const Instruction *I); 123 bool SelectStore(const Instruction *I); 124 bool SelectBranch(const Instruction *I); 125 bool SelectCmp(const Instruction *I); 126 bool SelectFPExt(const Instruction *I); 127 bool SelectFPTrunc(const Instruction *I); 128 bool SelectBinaryOp(const Instruction *I, unsigned ISDOpcode); 129 bool SelectSIToFP(const Instruction *I); 130 bool SelectFPToSI(const Instruction *I); 131 bool SelectSDiv(const Instruction *I); 132 bool SelectSRem(const Instruction *I); 133 bool SelectCall(const Instruction *I); 134 bool SelectSelect(const Instruction *I); 135 bool SelectRet(const Instruction *I); 136 137 // Utility routines. 138 private: 139 bool isTypeLegal(const Type *Ty, EVT &VT); 140 bool isLoadTypeLegal(const Type *Ty, EVT &VT); 141 bool ARMEmitLoad(EVT VT, unsigned &ResultReg, unsigned Base, int Offset); 142 bool ARMEmitStore(EVT VT, unsigned SrcReg, unsigned Base, int Offset); 143 bool ARMComputeRegOffset(const Value *Obj, unsigned &Base, int &Offset); 144 void ARMSimplifyRegOffset(unsigned &Base, int &Offset, EVT VT); 145 unsigned ARMMaterializeFP(const ConstantFP *CFP, EVT VT); 146 unsigned ARMMaterializeInt(const Constant *C, EVT VT); 147 unsigned ARMMaterializeGV(const GlobalValue *GV, EVT VT); 148 unsigned ARMMoveToFPReg(EVT VT, unsigned SrcReg); 149 unsigned ARMMoveToIntReg(EVT VT, unsigned SrcReg); 150 151 // Call handling routines. 152 private: 153 bool FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, EVT SrcVT, 154 unsigned &ResultReg); 155 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool Return); 156 bool ProcessCallArgs(SmallVectorImpl<Value*> &Args, 157 SmallVectorImpl<unsigned> &ArgRegs, 158 SmallVectorImpl<EVT> &ArgVTs, 159 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 160 SmallVectorImpl<unsigned> &RegArgs, 161 CallingConv::ID CC, 162 unsigned &NumBytes); 163 bool FinishCall(EVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 164 const Instruction *I, CallingConv::ID CC, 165 unsigned &NumBytes); 166 bool ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call); 167 168 // OptionalDef handling routines. 169 private: 170 bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR); 171 const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB); 172}; 173 174} // end anonymous namespace 175 176#include "ARMGenCallingConv.inc" 177 178// DefinesOptionalPredicate - This is different from DefinesPredicate in that 179// we don't care about implicit defs here, just places we'll need to add a 180// default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR. 181bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) { 182 const TargetInstrDesc &TID = MI->getDesc(); 183 if (!TID.hasOptionalDef()) 184 return false; 185 186 // Look to see if our OptionalDef is defining CPSR or CCR. 187 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 188 const MachineOperand &MO = MI->getOperand(i); 189 if (!MO.isReg() || !MO.isDef()) continue; 190 if (MO.getReg() == ARM::CPSR) 191 *CPSR = true; 192 } 193 return true; 194} 195 196// If the machine is predicable go ahead and add the predicate operands, if 197// it needs default CC operands add those. 198const MachineInstrBuilder & 199ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) { 200 MachineInstr *MI = &*MIB; 201 202 // Do we use a predicate? 203 if (TII.isPredicable(MI)) 204 AddDefaultPred(MIB); 205 206 // Do we optionally set a predicate? Preds is size > 0 iff the predicate 207 // defines CPSR. All other OptionalDefines in ARM are the CCR register. 208 bool CPSR = false; 209 if (DefinesOptionalPredicate(MI, &CPSR)) { 210 if (CPSR) 211 AddDefaultT1CC(MIB); 212 else 213 AddDefaultCC(MIB); 214 } 215 return MIB; 216} 217 218unsigned ARMFastISel::FastEmitInst_(unsigned MachineInstOpcode, 219 const TargetRegisterClass* RC) { 220 unsigned ResultReg = createResultReg(RC); 221 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 222 223 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)); 224 return ResultReg; 225} 226 227unsigned ARMFastISel::FastEmitInst_r(unsigned MachineInstOpcode, 228 const TargetRegisterClass *RC, 229 unsigned Op0, bool Op0IsKill) { 230 unsigned ResultReg = createResultReg(RC); 231 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 232 233 if (II.getNumDefs() >= 1) 234 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 235 .addReg(Op0, Op0IsKill * RegState::Kill)); 236 else { 237 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 238 .addReg(Op0, Op0IsKill * RegState::Kill)); 239 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 240 TII.get(TargetOpcode::COPY), ResultReg) 241 .addReg(II.ImplicitDefs[0])); 242 } 243 return ResultReg; 244} 245 246unsigned ARMFastISel::FastEmitInst_rr(unsigned MachineInstOpcode, 247 const TargetRegisterClass *RC, 248 unsigned Op0, bool Op0IsKill, 249 unsigned Op1, bool Op1IsKill) { 250 unsigned ResultReg = createResultReg(RC); 251 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 252 253 if (II.getNumDefs() >= 1) 254 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 255 .addReg(Op0, Op0IsKill * RegState::Kill) 256 .addReg(Op1, Op1IsKill * RegState::Kill)); 257 else { 258 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 259 .addReg(Op0, Op0IsKill * RegState::Kill) 260 .addReg(Op1, Op1IsKill * RegState::Kill)); 261 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 262 TII.get(TargetOpcode::COPY), ResultReg) 263 .addReg(II.ImplicitDefs[0])); 264 } 265 return ResultReg; 266} 267 268unsigned ARMFastISel::FastEmitInst_ri(unsigned MachineInstOpcode, 269 const TargetRegisterClass *RC, 270 unsigned Op0, bool Op0IsKill, 271 uint64_t Imm) { 272 unsigned ResultReg = createResultReg(RC); 273 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 274 275 if (II.getNumDefs() >= 1) 276 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 277 .addReg(Op0, Op0IsKill * RegState::Kill) 278 .addImm(Imm)); 279 else { 280 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 281 .addReg(Op0, Op0IsKill * RegState::Kill) 282 .addImm(Imm)); 283 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 284 TII.get(TargetOpcode::COPY), ResultReg) 285 .addReg(II.ImplicitDefs[0])); 286 } 287 return ResultReg; 288} 289 290unsigned ARMFastISel::FastEmitInst_rf(unsigned MachineInstOpcode, 291 const TargetRegisterClass *RC, 292 unsigned Op0, bool Op0IsKill, 293 const ConstantFP *FPImm) { 294 unsigned ResultReg = createResultReg(RC); 295 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 296 297 if (II.getNumDefs() >= 1) 298 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 299 .addReg(Op0, Op0IsKill * RegState::Kill) 300 .addFPImm(FPImm)); 301 else { 302 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 303 .addReg(Op0, Op0IsKill * RegState::Kill) 304 .addFPImm(FPImm)); 305 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 306 TII.get(TargetOpcode::COPY), ResultReg) 307 .addReg(II.ImplicitDefs[0])); 308 } 309 return ResultReg; 310} 311 312unsigned ARMFastISel::FastEmitInst_rri(unsigned MachineInstOpcode, 313 const TargetRegisterClass *RC, 314 unsigned Op0, bool Op0IsKill, 315 unsigned Op1, bool Op1IsKill, 316 uint64_t Imm) { 317 unsigned ResultReg = createResultReg(RC); 318 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 319 320 if (II.getNumDefs() >= 1) 321 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 322 .addReg(Op0, Op0IsKill * RegState::Kill) 323 .addReg(Op1, Op1IsKill * RegState::Kill) 324 .addImm(Imm)); 325 else { 326 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 327 .addReg(Op0, Op0IsKill * RegState::Kill) 328 .addReg(Op1, Op1IsKill * RegState::Kill) 329 .addImm(Imm)); 330 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 331 TII.get(TargetOpcode::COPY), ResultReg) 332 .addReg(II.ImplicitDefs[0])); 333 } 334 return ResultReg; 335} 336 337unsigned ARMFastISel::FastEmitInst_i(unsigned MachineInstOpcode, 338 const TargetRegisterClass *RC, 339 uint64_t Imm) { 340 unsigned ResultReg = createResultReg(RC); 341 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 342 343 if (II.getNumDefs() >= 1) 344 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 345 .addImm(Imm)); 346 else { 347 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 348 .addImm(Imm)); 349 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 350 TII.get(TargetOpcode::COPY), ResultReg) 351 .addReg(II.ImplicitDefs[0])); 352 } 353 return ResultReg; 354} 355 356unsigned ARMFastISel::FastEmitInst_extractsubreg(MVT RetVT, 357 unsigned Op0, bool Op0IsKill, 358 uint32_t Idx) { 359 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT)); 360 assert(TargetRegisterInfo::isVirtualRegister(Op0) && 361 "Cannot yet extract from physregs"); 362 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 363 DL, TII.get(TargetOpcode::COPY), ResultReg) 364 .addReg(Op0, getKillRegState(Op0IsKill), Idx)); 365 return ResultReg; 366} 367 368// TODO: Don't worry about 64-bit now, but when this is fixed remove the 369// checks from the various callers. 370unsigned ARMFastISel::ARMMoveToFPReg(EVT VT, unsigned SrcReg) { 371 if (VT.getSimpleVT().SimpleTy == MVT::f64) return 0; 372 373 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 374 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 375 TII.get(ARM::VMOVRS), MoveReg) 376 .addReg(SrcReg)); 377 return MoveReg; 378} 379 380unsigned ARMFastISel::ARMMoveToIntReg(EVT VT, unsigned SrcReg) { 381 if (VT.getSimpleVT().SimpleTy == MVT::i64) return 0; 382 383 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 384 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 385 TII.get(ARM::VMOVSR), MoveReg) 386 .addReg(SrcReg)); 387 return MoveReg; 388} 389 390// For double width floating point we need to materialize two constants 391// (the high and the low) into integer registers then use a move to get 392// the combined constant into an FP reg. 393unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, EVT VT) { 394 const APFloat Val = CFP->getValueAPF(); 395 bool is64bit = VT.getSimpleVT().SimpleTy == MVT::f64; 396 397 // This checks to see if we can use VFP3 instructions to materialize 398 // a constant, otherwise we have to go through the constant pool. 399 if (TLI.isFPImmLegal(Val, VT)) { 400 unsigned Opc = is64bit ? ARM::FCONSTD : ARM::FCONSTS; 401 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 402 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 403 DestReg) 404 .addFPImm(CFP)); 405 return DestReg; 406 } 407 408 // Require VFP2 for loading fp constants. 409 if (!Subtarget->hasVFP2()) return false; 410 411 // MachineConstantPool wants an explicit alignment. 412 unsigned Align = TD.getPrefTypeAlignment(CFP->getType()); 413 if (Align == 0) { 414 // TODO: Figure out if this is correct. 415 Align = TD.getTypeAllocSize(CFP->getType()); 416 } 417 unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align); 418 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 419 unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS; 420 421 // The extra reg is for addrmode5. 422 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 423 DestReg) 424 .addConstantPoolIndex(Idx) 425 .addReg(0)); 426 return DestReg; 427} 428 429unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, EVT VT) { 430 431 // For now 32-bit only. 432 if (VT.getSimpleVT().SimpleTy != MVT::i32) return false; 433 434 // MachineConstantPool wants an explicit alignment. 435 unsigned Align = TD.getPrefTypeAlignment(C->getType()); 436 if (Align == 0) { 437 // TODO: Figure out if this is correct. 438 Align = TD.getTypeAllocSize(C->getType()); 439 } 440 unsigned Idx = MCP.getConstantPoolIndex(C, Align); 441 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 442 443 if (isThumb) 444 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 445 TII.get(ARM::t2LDRpci), DestReg) 446 .addConstantPoolIndex(Idx)); 447 else 448 // The extra reg and immediate are for addrmode2. 449 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 450 TII.get(ARM::LDRcp), DestReg) 451 .addConstantPoolIndex(Idx) 452 .addReg(0).addImm(0)); 453 454 return DestReg; 455} 456 457unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, EVT VT) { 458 // For now 32-bit only. 459 if (VT.getSimpleVT().SimpleTy != MVT::i32) return 0; 460 461 Reloc::Model RelocM = TM.getRelocationModel(); 462 463 // TODO: No external globals for now. 464 if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) return 0; 465 466 // TODO: Need more magic for ARM PIC. 467 if (!isThumb && (RelocM == Reloc::PIC_)) return 0; 468 469 // MachineConstantPool wants an explicit alignment. 470 unsigned Align = TD.getPrefTypeAlignment(GV->getType()); 471 if (Align == 0) { 472 // TODO: Figure out if this is correct. 473 Align = TD.getTypeAllocSize(GV->getType()); 474 } 475 476 // Grab index. 477 unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : (Subtarget->isThumb() ? 4 : 8); 478 unsigned Id = AFI->createConstPoolEntryUId(); 479 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, Id, 480 ARMCP::CPValue, PCAdj); 481 unsigned Idx = MCP.getConstantPoolIndex(CPV, Align); 482 483 // Load value. 484 MachineInstrBuilder MIB; 485 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 486 if (isThumb) { 487 unsigned Opc = (RelocM != Reloc::PIC_) ? ARM::t2LDRpci : ARM::t2LDRpci_pic; 488 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg) 489 .addConstantPoolIndex(Idx); 490 if (RelocM == Reloc::PIC_) 491 MIB.addImm(Id); 492 } else { 493 // The extra reg and immediate are for addrmode2. 494 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRcp), 495 DestReg) 496 .addConstantPoolIndex(Idx) 497 .addReg(0).addImm(0); 498 } 499 AddOptionalDefs(MIB); 500 return DestReg; 501} 502 503unsigned ARMFastISel::TargetMaterializeConstant(const Constant *C) { 504 EVT VT = TLI.getValueType(C->getType(), true); 505 506 // Only handle simple types. 507 if (!VT.isSimple()) return 0; 508 509 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) 510 return ARMMaterializeFP(CFP, VT); 511 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C)) 512 return ARMMaterializeGV(GV, VT); 513 else if (isa<ConstantInt>(C)) 514 return ARMMaterializeInt(C, VT); 515 516 return 0; 517} 518 519unsigned ARMFastISel::TargetMaterializeAlloca(const AllocaInst *AI) { 520 // Don't handle dynamic allocas. 521 if (!FuncInfo.StaticAllocaMap.count(AI)) return 0; 522 523 EVT VT; 524 if (!isLoadTypeLegal(AI->getType(), VT)) return false; 525 526 DenseMap<const AllocaInst*, int>::iterator SI = 527 FuncInfo.StaticAllocaMap.find(AI); 528 529 // This will get lowered later into the correct offsets and registers 530 // via rewriteXFrameIndex. 531 if (SI != FuncInfo.StaticAllocaMap.end()) { 532 TargetRegisterClass* RC = TLI.getRegClassFor(VT); 533 unsigned ResultReg = createResultReg(RC); 534 unsigned Opc = isThumb ? ARM::t2ADDri : ARM::ADDri; 535 AddOptionalDefs(BuildMI(*FuncInfo.MBB, *FuncInfo.InsertPt, DL, 536 TII.get(Opc), ResultReg) 537 .addFrameIndex(SI->second) 538 .addImm(0)); 539 return ResultReg; 540 } 541 542 return 0; 543} 544 545bool ARMFastISel::isTypeLegal(const Type *Ty, EVT &VT) { 546 VT = TLI.getValueType(Ty, true); 547 548 // Only handle simple types. 549 if (VT == MVT::Other || !VT.isSimple()) return false; 550 551 // Handle all legal types, i.e. a register that will directly hold this 552 // value. 553 return TLI.isTypeLegal(VT); 554} 555 556bool ARMFastISel::isLoadTypeLegal(const Type *Ty, EVT &VT) { 557 if (isTypeLegal(Ty, VT)) return true; 558 559 // If this is a type than can be sign or zero-extended to a basic operation 560 // go ahead and accept it now. 561 if (VT == MVT::i8 || VT == MVT::i16) 562 return true; 563 564 return false; 565} 566 567// Computes the Reg+Offset to get to an object. 568bool ARMFastISel::ARMComputeRegOffset(const Value *Obj, unsigned &Base, 569 int &Offset) { 570 // Some boilerplate from the X86 FastISel. 571 const User *U = NULL; 572 unsigned Opcode = Instruction::UserOp1; 573 if (const Instruction *I = dyn_cast<Instruction>(Obj)) { 574 // Don't walk into other basic blocks; it's possible we haven't 575 // visited them yet, so the instructions may not yet be assigned 576 // virtual registers. 577 if (FuncInfo.MBBMap[I->getParent()] != FuncInfo.MBB) 578 return false; 579 Opcode = I->getOpcode(); 580 U = I; 581 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) { 582 Opcode = C->getOpcode(); 583 U = C; 584 } 585 586 if (const PointerType *Ty = dyn_cast<PointerType>(Obj->getType())) 587 if (Ty->getAddressSpace() > 255) 588 // Fast instruction selection doesn't support the special 589 // address spaces. 590 return false; 591 592 switch (Opcode) { 593 default: 594 break; 595 case Instruction::BitCast: { 596 // Look through bitcasts. 597 return ARMComputeRegOffset(U->getOperand(0), Base, Offset); 598 } 599 case Instruction::IntToPtr: { 600 // Look past no-op inttoptrs. 601 if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy()) 602 return ARMComputeRegOffset(U->getOperand(0), Base, Offset); 603 break; 604 } 605 case Instruction::PtrToInt: { 606 // Look past no-op ptrtoints. 607 if (TLI.getValueType(U->getType()) == TLI.getPointerTy()) 608 return ARMComputeRegOffset(U->getOperand(0), Base, Offset); 609 break; 610 } 611 case Instruction::GetElementPtr: { 612 int SavedOffset = Offset; 613 unsigned SavedBase = Base; 614 int TmpOffset = Offset; 615 616 // Iterate through the GEP folding the constants into offsets where 617 // we can. 618 gep_type_iterator GTI = gep_type_begin(U); 619 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); 620 i != e; ++i, ++GTI) { 621 const Value *Op = *i; 622 if (const StructType *STy = dyn_cast<StructType>(*GTI)) { 623 const StructLayout *SL = TD.getStructLayout(STy); 624 unsigned Idx = cast<ConstantInt>(Op)->getZExtValue(); 625 TmpOffset += SL->getElementOffset(Idx); 626 } else { 627 uint64_t S = TD.getTypeAllocSize(GTI.getIndexedType()); 628 SmallVector<const Value *, 4> Worklist; 629 Worklist.push_back(Op); 630 do { 631 Op = Worklist.pop_back_val(); 632 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) { 633 // Constant-offset addressing. 634 TmpOffset += CI->getSExtValue() * S; 635 } else if (isa<AddOperator>(Op) && 636 isa<ConstantInt>(cast<AddOperator>(Op)->getOperand(1))) { 637 // An add with a constant operand. Fold the constant. 638 ConstantInt *CI = 639 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1)); 640 TmpOffset += CI->getSExtValue() * S; 641 // Add the other operand back to the work list. 642 Worklist.push_back(cast<AddOperator>(Op)->getOperand(0)); 643 } else 644 goto unsupported_gep; 645 } while (!Worklist.empty()); 646 } 647 } 648 649 // Try to grab the base operand now. 650 Offset = TmpOffset; 651 if (ARMComputeRegOffset(U->getOperand(0), Base, Offset)) return true; 652 653 // We failed, restore everything and try the other options. 654 Offset = SavedOffset; 655 Base = SavedBase; 656 657 unsupported_gep: 658 break; 659 } 660 case Instruction::Alloca: { 661 const AllocaInst *AI = cast<AllocaInst>(Obj); 662 unsigned Reg = TargetMaterializeAlloca(AI); 663 664 if (Reg == 0) return false; 665 666 Base = Reg; 667 return true; 668 } 669 } 670 671 // Materialize the global variable's address into a reg which can 672 // then be used later to load the variable. 673 if (const GlobalValue *GV = dyn_cast<GlobalValue>(Obj)) { 674 unsigned Tmp = ARMMaterializeGV(GV, TLI.getValueType(Obj->getType())); 675 if (Tmp == 0) return false; 676 677 Base = Tmp; 678 return true; 679 } 680 681 // Try to get this in a register if nothing else has worked. 682 if (Base == 0) Base = getRegForValue(Obj); 683 return Base != 0; 684} 685 686void ARMFastISel::ARMSimplifyRegOffset(unsigned &Base, int &Offset, EVT VT) { 687 688 assert(VT.isSimple() && "Non-simple types are invalid here!"); 689 690 bool needsLowering = false; 691 switch (VT.getSimpleVT().SimpleTy) { 692 default: 693 assert(false && "Unhandled load/store type!"); 694 case MVT::i1: 695 case MVT::i8: 696 case MVT::i16: 697 case MVT::i32: 698 // Integer loads/stores handle 12-bit offsets. 699 needsLowering = ((Offset & 0xfff) != Offset); 700 break; 701 case MVT::f32: 702 case MVT::f64: 703 // Floating point operands handle 8-bit offsets. 704 needsLowering = ((Offset & 0xff) != Offset); 705 break; 706 } 707 708 // Since the offset is too large for the load/store instruction 709 // get the reg+offset into a register. 710 if (needsLowering) { 711 ARMCC::CondCodes Pred = ARMCC::AL; 712 unsigned PredReg = 0; 713 714 TargetRegisterClass *RC = isThumb ? ARM::tGPRRegisterClass : 715 ARM::GPRRegisterClass; 716 unsigned BaseReg = createResultReg(RC); 717 718 if (!isThumb) 719 emitARMRegPlusImmediate(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 720 BaseReg, Base, Offset, Pred, PredReg, 721 static_cast<const ARMBaseInstrInfo&>(TII)); 722 else { 723 assert(AFI->isThumb2Function()); 724 emitT2RegPlusImmediate(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 725 BaseReg, Base, Offset, Pred, PredReg, 726 static_cast<const ARMBaseInstrInfo&>(TII)); 727 } 728 Offset = 0; 729 Base = BaseReg; 730 } 731} 732 733bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, 734 unsigned Base, int Offset) { 735 736 assert(VT.isSimple() && "Non-simple types are invalid here!"); 737 unsigned Opc; 738 TargetRegisterClass *RC; 739 bool isFloat = false; 740 switch (VT.getSimpleVT().SimpleTy) { 741 default: 742 // This is mostly going to be Neon/vector support. 743 return false; 744 case MVT::i16: 745 Opc = isThumb ? ARM::t2LDRHi12 : ARM::LDRH; 746 RC = ARM::GPRRegisterClass; 747 break; 748 case MVT::i8: 749 Opc = isThumb ? ARM::t2LDRBi12 : ARM::LDRB; 750 RC = ARM::GPRRegisterClass; 751 break; 752 case MVT::i32: 753 Opc = isThumb ? ARM::t2LDRi12 : ARM::LDR; 754 RC = ARM::GPRRegisterClass; 755 break; 756 case MVT::f32: 757 Opc = ARM::VLDRS; 758 RC = TLI.getRegClassFor(VT); 759 isFloat = true; 760 break; 761 case MVT::f64: 762 Opc = ARM::VLDRD; 763 RC = TLI.getRegClassFor(VT); 764 isFloat = true; 765 break; 766 } 767 768 ResultReg = createResultReg(RC); 769 770 ARMSimplifyRegOffset(Base, Offset, VT); 771 772 // addrmode5 output depends on the selection dag addressing dividing the 773 // offset by 4 that it then later multiplies. Do this here as well. 774 if (isFloat) 775 Offset /= 4; 776 777 // The thumb and floating point instructions both take 2 operands, ARM takes 778 // another register. 779 if (isFloat || isThumb) 780 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 781 TII.get(Opc), ResultReg) 782 .addReg(Base).addImm(Offset)); 783 else 784 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 785 TII.get(Opc), ResultReg) 786 .addReg(Base).addReg(0).addImm(Offset)); 787 return true; 788} 789 790bool ARMFastISel::SelectLoad(const Instruction *I) { 791 // Verify we have a legal type before going any further. 792 EVT VT; 793 if (!isLoadTypeLegal(I->getType(), VT)) 794 return false; 795 796 // Our register and offset with innocuous defaults. 797 unsigned Base = 0; 798 int Offset = 0; 799 800 // See if we can handle this as Reg + Offset 801 if (!ARMComputeRegOffset(I->getOperand(0), Base, Offset)) 802 return false; 803 804 unsigned ResultReg; 805 if (!ARMEmitLoad(VT, ResultReg, Base, Offset)) return false; 806 807 UpdateValueMap(I, ResultReg); 808 return true; 809} 810 811bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, 812 unsigned Base, int Offset) { 813 unsigned StrOpc; 814 bool isFloat = false; 815 switch (VT.getSimpleVT().SimpleTy) { 816 default: return false; 817 case MVT::i1: 818 case MVT::i8: 819 StrOpc = isThumb ? ARM::t2STRBi12 : ARM::STRB; 820 break; 821 case MVT::i16: 822 StrOpc = isThumb ? ARM::t2STRHi12 : ARM::STRH; 823 break; 824 case MVT::i32: 825 StrOpc = isThumb ? ARM::t2STRi12 : ARM::STR; 826 break; 827 case MVT::f32: 828 if (!Subtarget->hasVFP2()) return false; 829 StrOpc = ARM::VSTRS; 830 isFloat = true; 831 break; 832 case MVT::f64: 833 if (!Subtarget->hasVFP2()) return false; 834 StrOpc = ARM::VSTRD; 835 isFloat = true; 836 break; 837 } 838 839 ARMSimplifyRegOffset(Base, Offset, VT); 840 841 // addrmode5 output depends on the selection dag addressing dividing the 842 // offset by 4 that it then later multiplies. Do this here as well. 843 if (isFloat) 844 Offset /= 4; 845 846 // The thumb addressing mode has operands swapped from the arm addressing 847 // mode, the floating point one only has two operands. 848 if (isFloat || isThumb) 849 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 850 TII.get(StrOpc)) 851 .addReg(SrcReg).addReg(Base).addImm(Offset)); 852 else 853 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 854 TII.get(StrOpc)) 855 .addReg(SrcReg).addReg(Base).addReg(0).addImm(Offset)); 856 857 return true; 858} 859 860bool ARMFastISel::SelectStore(const Instruction *I) { 861 Value *Op0 = I->getOperand(0); 862 unsigned SrcReg = 0; 863 864 // Yay type legalization 865 EVT VT; 866 if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT)) 867 return false; 868 869 // Get the value to be stored into a register. 870 SrcReg = getRegForValue(Op0); 871 if (SrcReg == 0) 872 return false; 873 874 // Our register and offset with innocuous defaults. 875 unsigned Base = 0; 876 int Offset = 0; 877 878 // See if we can handle this as Reg + Offset 879 if (!ARMComputeRegOffset(I->getOperand(1), Base, Offset)) 880 return false; 881 882 if (!ARMEmitStore(VT, SrcReg, Base, Offset)) return false; 883 884 return true; 885} 886 887static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred) { 888 switch (Pred) { 889 // Needs two compares... 890 case CmpInst::FCMP_ONE: 891 case CmpInst::FCMP_UEQ: 892 default: 893 assert(false && "Unhandled CmpInst::Predicate!"); 894 return ARMCC::AL; 895 case CmpInst::ICMP_EQ: 896 case CmpInst::FCMP_OEQ: 897 return ARMCC::EQ; 898 case CmpInst::ICMP_SGT: 899 case CmpInst::FCMP_OGT: 900 return ARMCC::GT; 901 case CmpInst::ICMP_SGE: 902 case CmpInst::FCMP_OGE: 903 return ARMCC::GE; 904 case CmpInst::ICMP_UGT: 905 case CmpInst::FCMP_UGT: 906 return ARMCC::HI; 907 case CmpInst::FCMP_OLT: 908 return ARMCC::MI; 909 case CmpInst::ICMP_ULE: 910 case CmpInst::FCMP_OLE: 911 return ARMCC::LS; 912 case CmpInst::FCMP_ORD: 913 return ARMCC::VC; 914 case CmpInst::FCMP_UNO: 915 return ARMCC::VS; 916 case CmpInst::FCMP_UGE: 917 return ARMCC::PL; 918 case CmpInst::ICMP_SLT: 919 case CmpInst::FCMP_ULT: 920 return ARMCC::LT; 921 case CmpInst::ICMP_SLE: 922 case CmpInst::FCMP_ULE: 923 return ARMCC::LE; 924 case CmpInst::FCMP_UNE: 925 case CmpInst::ICMP_NE: 926 return ARMCC::NE; 927 case CmpInst::ICMP_UGE: 928 return ARMCC::HS; 929 case CmpInst::ICMP_ULT: 930 return ARMCC::LO; 931 } 932} 933 934bool ARMFastISel::SelectBranch(const Instruction *I) { 935 const BranchInst *BI = cast<BranchInst>(I); 936 MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)]; 937 MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)]; 938 939 // Simple branch support. 940 // TODO: Try to avoid the re-computation in some places. 941 unsigned CondReg = getRegForValue(BI->getCondition()); 942 if (CondReg == 0) return false; 943 944 // Re-set the flags just in case. 945 unsigned CmpOpc = isThumb ? ARM::t2CMPri : ARM::CMPri; 946 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 947 .addReg(CondReg).addImm(1)); 948 949 unsigned BrOpc = isThumb ? ARM::t2Bcc : ARM::Bcc; 950 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 951 .addMBB(TBB).addImm(ARMCC::EQ).addReg(ARM::CPSR); 952 FastEmitBranch(FBB, DL); 953 FuncInfo.MBB->addSuccessor(TBB); 954 return true; 955} 956 957bool ARMFastISel::SelectCmp(const Instruction *I) { 958 const CmpInst *CI = cast<CmpInst>(I); 959 960 EVT VT; 961 const Type *Ty = CI->getOperand(0)->getType(); 962 if (!isTypeLegal(Ty, VT)) 963 return false; 964 965 bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy()); 966 if (isFloat && !Subtarget->hasVFP2()) 967 return false; 968 969 unsigned CmpOpc; 970 unsigned CondReg; 971 switch (VT.getSimpleVT().SimpleTy) { 972 default: return false; 973 // TODO: Verify compares. 974 case MVT::f32: 975 CmpOpc = ARM::VCMPES; 976 CondReg = ARM::FPSCR; 977 break; 978 case MVT::f64: 979 CmpOpc = ARM::VCMPED; 980 CondReg = ARM::FPSCR; 981 break; 982 case MVT::i32: 983 CmpOpc = isThumb ? ARM::t2CMPrr : ARM::CMPrr; 984 CondReg = ARM::CPSR; 985 break; 986 } 987 988 // Get the compare predicate. 989 ARMCC::CondCodes ARMPred = getComparePred(CI->getPredicate()); 990 991 // We may not handle every CC for now. 992 if (ARMPred == ARMCC::AL) return false; 993 994 unsigned Arg1 = getRegForValue(CI->getOperand(0)); 995 if (Arg1 == 0) return false; 996 997 unsigned Arg2 = getRegForValue(CI->getOperand(1)); 998 if (Arg2 == 0) return false; 999 1000 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1001 .addReg(Arg1).addReg(Arg2)); 1002 1003 // For floating point we need to move the result to a comparison register 1004 // that we can then use for branches. 1005 if (isFloat) 1006 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1007 TII.get(ARM::FMSTAT))); 1008 1009 // Now set a register based on the comparison. Explicitly set the predicates 1010 // here. 1011 unsigned MovCCOpc = isThumb ? ARM::t2MOVCCi : ARM::MOVCCi; 1012 TargetRegisterClass *RC = isThumb ? ARM::rGPRRegisterClass 1013 : ARM::GPRRegisterClass; 1014 unsigned DestReg = createResultReg(RC); 1015 Constant *Zero 1016 = ConstantInt::get(Type::getInt32Ty(*Context), 0); 1017 unsigned ZeroReg = TargetMaterializeConstant(Zero); 1018 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), DestReg) 1019 .addReg(ZeroReg).addImm(1) 1020 .addImm(ARMPred).addReg(CondReg); 1021 1022 UpdateValueMap(I, DestReg); 1023 return true; 1024} 1025 1026bool ARMFastISel::SelectFPExt(const Instruction *I) { 1027 // Make sure we have VFP and that we're extending float to double. 1028 if (!Subtarget->hasVFP2()) return false; 1029 1030 Value *V = I->getOperand(0); 1031 if (!I->getType()->isDoubleTy() || 1032 !V->getType()->isFloatTy()) return false; 1033 1034 unsigned Op = getRegForValue(V); 1035 if (Op == 0) return false; 1036 1037 unsigned Result = createResultReg(ARM::DPRRegisterClass); 1038 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1039 TII.get(ARM::VCVTDS), Result) 1040 .addReg(Op)); 1041 UpdateValueMap(I, Result); 1042 return true; 1043} 1044 1045bool ARMFastISel::SelectFPTrunc(const Instruction *I) { 1046 // Make sure we have VFP and that we're truncating double to float. 1047 if (!Subtarget->hasVFP2()) return false; 1048 1049 Value *V = I->getOperand(0); 1050 if (!(I->getType()->isFloatTy() && 1051 V->getType()->isDoubleTy())) return false; 1052 1053 unsigned Op = getRegForValue(V); 1054 if (Op == 0) return false; 1055 1056 unsigned Result = createResultReg(ARM::SPRRegisterClass); 1057 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1058 TII.get(ARM::VCVTSD), Result) 1059 .addReg(Op)); 1060 UpdateValueMap(I, Result); 1061 return true; 1062} 1063 1064bool ARMFastISel::SelectSIToFP(const Instruction *I) { 1065 // Make sure we have VFP. 1066 if (!Subtarget->hasVFP2()) return false; 1067 1068 EVT DstVT; 1069 const Type *Ty = I->getType(); 1070 if (!isTypeLegal(Ty, DstVT)) 1071 return false; 1072 1073 unsigned Op = getRegForValue(I->getOperand(0)); 1074 if (Op == 0) return false; 1075 1076 // The conversion routine works on fp-reg to fp-reg and the operand above 1077 // was an integer, move it to the fp registers if possible. 1078 unsigned FP = ARMMoveToFPReg(MVT::f32, Op); 1079 if (FP == 0) return false; 1080 1081 unsigned Opc; 1082 if (Ty->isFloatTy()) Opc = ARM::VSITOS; 1083 else if (Ty->isDoubleTy()) Opc = ARM::VSITOD; 1084 else return 0; 1085 1086 unsigned ResultReg = createResultReg(TLI.getRegClassFor(DstVT)); 1087 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1088 ResultReg) 1089 .addReg(FP)); 1090 UpdateValueMap(I, ResultReg); 1091 return true; 1092} 1093 1094bool ARMFastISel::SelectFPToSI(const Instruction *I) { 1095 // Make sure we have VFP. 1096 if (!Subtarget->hasVFP2()) return false; 1097 1098 EVT DstVT; 1099 const Type *RetTy = I->getType(); 1100 if (!isTypeLegal(RetTy, DstVT)) 1101 return false; 1102 1103 unsigned Op = getRegForValue(I->getOperand(0)); 1104 if (Op == 0) return false; 1105 1106 unsigned Opc; 1107 const Type *OpTy = I->getOperand(0)->getType(); 1108 if (OpTy->isFloatTy()) Opc = ARM::VTOSIZS; 1109 else if (OpTy->isDoubleTy()) Opc = ARM::VTOSIZD; 1110 else return 0; 1111 1112 // f64->s32 or f32->s32 both need an intermediate f32 reg. 1113 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32)); 1114 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1115 ResultReg) 1116 .addReg(Op)); 1117 1118 // This result needs to be in an integer register, but the conversion only 1119 // takes place in fp-regs. 1120 unsigned IntReg = ARMMoveToIntReg(DstVT, ResultReg); 1121 if (IntReg == 0) return false; 1122 1123 UpdateValueMap(I, IntReg); 1124 return true; 1125} 1126 1127bool ARMFastISel::SelectSelect(const Instruction *I) { 1128 EVT VT = TLI.getValueType(I->getType(), /*HandleUnknown=*/true); 1129 if (VT == MVT::Other || !isTypeLegal(I->getType(), VT)) 1130 return false; 1131 1132 // Things need to be register sized for register moves. 1133 if (VT.getSimpleVT().SimpleTy != MVT::i32) return false; 1134 const TargetRegisterClass *RC = TLI.getRegClassFor(VT); 1135 1136 unsigned CondReg = getRegForValue(I->getOperand(0)); 1137 if (CondReg == 0) return false; 1138 unsigned Op1Reg = getRegForValue(I->getOperand(1)); 1139 if (Op1Reg == 0) return false; 1140 unsigned Op2Reg = getRegForValue(I->getOperand(2)); 1141 if (Op2Reg == 0) return false; 1142 1143 unsigned CmpOpc = isThumb ? ARM::t2TSTri : ARM::TSTri; 1144 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1145 .addReg(CondReg).addImm(1)); 1146 unsigned ResultReg = createResultReg(RC); 1147 unsigned MovCCOpc = isThumb ? ARM::t2MOVCCr : ARM::MOVCCr; 1148 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg) 1149 .addReg(Op1Reg).addReg(Op2Reg) 1150 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 1151 UpdateValueMap(I, ResultReg); 1152 return true; 1153} 1154 1155bool ARMFastISel::SelectSDiv(const Instruction *I) { 1156 EVT VT; 1157 const Type *Ty = I->getType(); 1158 if (!isTypeLegal(Ty, VT)) 1159 return false; 1160 1161 // If we have integer div support we should have selected this automagically. 1162 // In case we have a real miss go ahead and return false and we'll pick 1163 // it up later. 1164 if (Subtarget->hasDivide()) return false; 1165 1166 // Otherwise emit a libcall. 1167 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1168 if (VT == MVT::i8) 1169 LC = RTLIB::SDIV_I8; 1170 else if (VT == MVT::i16) 1171 LC = RTLIB::SDIV_I16; 1172 else if (VT == MVT::i32) 1173 LC = RTLIB::SDIV_I32; 1174 else if (VT == MVT::i64) 1175 LC = RTLIB::SDIV_I64; 1176 else if (VT == MVT::i128) 1177 LC = RTLIB::SDIV_I128; 1178 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SDIV!"); 1179 1180 return ARMEmitLibcall(I, LC); 1181} 1182 1183bool ARMFastISel::SelectSRem(const Instruction *I) { 1184 EVT VT; 1185 const Type *Ty = I->getType(); 1186 if (!isTypeLegal(Ty, VT)) 1187 return false; 1188 1189 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1190 if (VT == MVT::i8) 1191 LC = RTLIB::SREM_I8; 1192 else if (VT == MVT::i16) 1193 LC = RTLIB::SREM_I16; 1194 else if (VT == MVT::i32) 1195 LC = RTLIB::SREM_I32; 1196 else if (VT == MVT::i64) 1197 LC = RTLIB::SREM_I64; 1198 else if (VT == MVT::i128) 1199 LC = RTLIB::SREM_I128; 1200 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SREM!"); 1201 1202 return ARMEmitLibcall(I, LC); 1203} 1204 1205bool ARMFastISel::SelectBinaryOp(const Instruction *I, unsigned ISDOpcode) { 1206 EVT VT = TLI.getValueType(I->getType(), true); 1207 1208 // We can get here in the case when we want to use NEON for our fp 1209 // operations, but can't figure out how to. Just use the vfp instructions 1210 // if we have them. 1211 // FIXME: It'd be nice to use NEON instructions. 1212 const Type *Ty = I->getType(); 1213 bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy()); 1214 if (isFloat && !Subtarget->hasVFP2()) 1215 return false; 1216 1217 unsigned Op1 = getRegForValue(I->getOperand(0)); 1218 if (Op1 == 0) return false; 1219 1220 unsigned Op2 = getRegForValue(I->getOperand(1)); 1221 if (Op2 == 0) return false; 1222 1223 unsigned Opc; 1224 bool is64bit = VT.getSimpleVT().SimpleTy == MVT::f64 || 1225 VT.getSimpleVT().SimpleTy == MVT::i64; 1226 switch (ISDOpcode) { 1227 default: return false; 1228 case ISD::FADD: 1229 Opc = is64bit ? ARM::VADDD : ARM::VADDS; 1230 break; 1231 case ISD::FSUB: 1232 Opc = is64bit ? ARM::VSUBD : ARM::VSUBS; 1233 break; 1234 case ISD::FMUL: 1235 Opc = is64bit ? ARM::VMULD : ARM::VMULS; 1236 break; 1237 } 1238 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT)); 1239 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1240 TII.get(Opc), ResultReg) 1241 .addReg(Op1).addReg(Op2)); 1242 UpdateValueMap(I, ResultReg); 1243 return true; 1244} 1245 1246// Call Handling Code 1247 1248bool ARMFastISel::FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, 1249 EVT SrcVT, unsigned &ResultReg) { 1250 unsigned RR = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc, 1251 Src, /*TODO: Kill=*/false); 1252 1253 if (RR != 0) { 1254 ResultReg = RR; 1255 return true; 1256 } else 1257 return false; 1258} 1259 1260// This is largely taken directly from CCAssignFnForNode - we don't support 1261// varargs in FastISel so that part has been removed. 1262// TODO: We may not support all of this. 1263CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC, bool Return) { 1264 switch (CC) { 1265 default: 1266 llvm_unreachable("Unsupported calling convention"); 1267 case CallingConv::Fast: 1268 // Ignore fastcc. Silence compiler warnings. 1269 (void)RetFastCC_ARM_APCS; 1270 (void)FastCC_ARM_APCS; 1271 // Fallthrough 1272 case CallingConv::C: 1273 // Use target triple & subtarget features to do actual dispatch. 1274 if (Subtarget->isAAPCS_ABI()) { 1275 if (Subtarget->hasVFP2() && 1276 FloatABIType == FloatABI::Hard) 1277 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1278 else 1279 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1280 } else 1281 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1282 case CallingConv::ARM_AAPCS_VFP: 1283 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1284 case CallingConv::ARM_AAPCS: 1285 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1286 case CallingConv::ARM_APCS: 1287 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1288 } 1289} 1290 1291bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args, 1292 SmallVectorImpl<unsigned> &ArgRegs, 1293 SmallVectorImpl<EVT> &ArgVTs, 1294 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 1295 SmallVectorImpl<unsigned> &RegArgs, 1296 CallingConv::ID CC, 1297 unsigned &NumBytes) { 1298 SmallVector<CCValAssign, 16> ArgLocs; 1299 CCState CCInfo(CC, false, TM, ArgLocs, *Context); 1300 CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CCAssignFnForCall(CC, false)); 1301 1302 // Get a count of how many bytes are to be pushed on the stack. 1303 NumBytes = CCInfo.getNextStackOffset(); 1304 1305 // Issue CALLSEQ_START 1306 unsigned AdjStackDown = TM.getRegisterInfo()->getCallFrameSetupOpcode(); 1307 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1308 TII.get(AdjStackDown)) 1309 .addImm(NumBytes)); 1310 1311 // Process the args. 1312 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1313 CCValAssign &VA = ArgLocs[i]; 1314 unsigned Arg = ArgRegs[VA.getValNo()]; 1315 EVT ArgVT = ArgVTs[VA.getValNo()]; 1316 1317 // We don't handle NEON parameters yet. 1318 if (VA.getLocVT().isVector() && VA.getLocVT().getSizeInBits() > 64) 1319 return false; 1320 1321 // Handle arg promotion, etc. 1322 switch (VA.getLocInfo()) { 1323 case CCValAssign::Full: break; 1324 case CCValAssign::SExt: { 1325 bool Emitted = FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(), 1326 Arg, ArgVT, Arg); 1327 assert(Emitted && "Failed to emit a sext!"); Emitted=Emitted; 1328 Emitted = true; 1329 ArgVT = VA.getLocVT(); 1330 break; 1331 } 1332 case CCValAssign::ZExt: { 1333 bool Emitted = FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(), 1334 Arg, ArgVT, Arg); 1335 assert(Emitted && "Failed to emit a zext!"); Emitted=Emitted; 1336 Emitted = true; 1337 ArgVT = VA.getLocVT(); 1338 break; 1339 } 1340 case CCValAssign::AExt: { 1341 bool Emitted = FastEmitExtend(ISD::ANY_EXTEND, VA.getLocVT(), 1342 Arg, ArgVT, Arg); 1343 if (!Emitted) 1344 Emitted = FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(), 1345 Arg, ArgVT, Arg); 1346 if (!Emitted) 1347 Emitted = FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(), 1348 Arg, ArgVT, Arg); 1349 1350 assert(Emitted && "Failed to emit a aext!"); Emitted=Emitted; 1351 ArgVT = VA.getLocVT(); 1352 break; 1353 } 1354 case CCValAssign::BCvt: { 1355 unsigned BC = FastEmit_r(ArgVT.getSimpleVT(), 1356 VA.getLocVT().getSimpleVT(), 1357 ISD::BIT_CONVERT, Arg, /*TODO: Kill=*/false); 1358 assert(BC != 0 && "Failed to emit a bitcast!"); 1359 Arg = BC; 1360 ArgVT = VA.getLocVT(); 1361 break; 1362 } 1363 default: llvm_unreachable("Unknown arg promotion!"); 1364 } 1365 1366 // Now copy/store arg to correct locations. 1367 if (VA.isRegLoc() && !VA.needsCustom()) { 1368 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1369 VA.getLocReg()) 1370 .addReg(Arg); 1371 RegArgs.push_back(VA.getLocReg()); 1372 } else if (VA.needsCustom()) { 1373 // TODO: We need custom lowering for vector (v2f64) args. 1374 if (VA.getLocVT() != MVT::f64) return false; 1375 1376 CCValAssign &NextVA = ArgLocs[++i]; 1377 1378 // TODO: Only handle register args for now. 1379 if(!(VA.isRegLoc() && NextVA.isRegLoc())) return false; 1380 1381 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1382 TII.get(ARM::VMOVRRD), VA.getLocReg()) 1383 .addReg(NextVA.getLocReg(), RegState::Define) 1384 .addReg(Arg)); 1385 RegArgs.push_back(VA.getLocReg()); 1386 RegArgs.push_back(NextVA.getLocReg()); 1387 } else { 1388 assert(VA.isMemLoc()); 1389 // Need to store on the stack. 1390 unsigned Base = ARM::SP; 1391 int Offset = VA.getLocMemOffset(); 1392 1393 if (!ARMEmitStore(ArgVT, Arg, Base, Offset)) return false; 1394 } 1395 } 1396 return true; 1397} 1398 1399bool ARMFastISel::FinishCall(EVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 1400 const Instruction *I, CallingConv::ID CC, 1401 unsigned &NumBytes) { 1402 // Issue CALLSEQ_END 1403 unsigned AdjStackUp = TM.getRegisterInfo()->getCallFrameDestroyOpcode(); 1404 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1405 TII.get(AdjStackUp)) 1406 .addImm(NumBytes).addImm(0)); 1407 1408 // Now the return value. 1409 if (RetVT.getSimpleVT().SimpleTy != MVT::isVoid) { 1410 SmallVector<CCValAssign, 16> RVLocs; 1411 CCState CCInfo(CC, false, TM, RVLocs, *Context); 1412 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true)); 1413 1414 // Copy all of the result registers out of their specified physreg. 1415 if (RVLocs.size() == 2 && RetVT.getSimpleVT().SimpleTy == MVT::f64) { 1416 // For this move we copy into two registers and then move into the 1417 // double fp reg we want. 1418 EVT DestVT = RVLocs[0].getValVT(); 1419 TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT); 1420 unsigned ResultReg = createResultReg(DstRC); 1421 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1422 TII.get(ARM::VMOVDRR), ResultReg) 1423 .addReg(RVLocs[0].getLocReg()) 1424 .addReg(RVLocs[1].getLocReg())); 1425 1426 UsedRegs.push_back(RVLocs[0].getLocReg()); 1427 UsedRegs.push_back(RVLocs[1].getLocReg()); 1428 1429 // Finally update the result. 1430 UpdateValueMap(I, ResultReg); 1431 } else { 1432 assert(RVLocs.size() == 1 &&"Can't handle non-double multi-reg retvals!"); 1433 EVT CopyVT = RVLocs[0].getValVT(); 1434 TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT); 1435 1436 unsigned ResultReg = createResultReg(DstRC); 1437 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1438 ResultReg).addReg(RVLocs[0].getLocReg()); 1439 UsedRegs.push_back(RVLocs[0].getLocReg()); 1440 1441 // Finally update the result. 1442 UpdateValueMap(I, ResultReg); 1443 } 1444 } 1445 1446 return true; 1447} 1448 1449bool ARMFastISel::SelectRet(const Instruction *I) { 1450 const ReturnInst *Ret = cast<ReturnInst>(I); 1451 const Function &F = *I->getParent()->getParent(); 1452 1453 if (!FuncInfo.CanLowerReturn) 1454 return false; 1455 1456 if (F.isVarArg()) 1457 return false; 1458 1459 CallingConv::ID CC = F.getCallingConv(); 1460 if (Ret->getNumOperands() > 0) { 1461 SmallVector<ISD::OutputArg, 4> Outs; 1462 GetReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(), 1463 Outs, TLI); 1464 1465 // Analyze operands of the call, assigning locations to each operand. 1466 SmallVector<CCValAssign, 16> ValLocs; 1467 CCState CCInfo(CC, F.isVarArg(), TM, ValLocs, I->getContext()); 1468 CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC, true /* is Ret */)); 1469 1470 const Value *RV = Ret->getOperand(0); 1471 unsigned Reg = getRegForValue(RV); 1472 if (Reg == 0) 1473 return false; 1474 1475 // Only handle a single return value for now. 1476 if (ValLocs.size() != 1) 1477 return false; 1478 1479 CCValAssign &VA = ValLocs[0]; 1480 1481 // Don't bother handling odd stuff for now. 1482 if (VA.getLocInfo() != CCValAssign::Full) 1483 return false; 1484 // Only handle register returns for now. 1485 if (!VA.isRegLoc()) 1486 return false; 1487 // TODO: For now, don't try to handle cases where getLocInfo() 1488 // says Full but the types don't match. 1489 if (VA.getValVT() != TLI.getValueType(RV->getType())) 1490 return false; 1491 1492 // Make the copy. 1493 unsigned SrcReg = Reg + VA.getValNo(); 1494 unsigned DstReg = VA.getLocReg(); 1495 const TargetRegisterClass* SrcRC = MRI.getRegClass(SrcReg); 1496 // Avoid a cross-class copy. This is very unlikely. 1497 if (!SrcRC->contains(DstReg)) 1498 return false; 1499 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1500 DstReg).addReg(SrcReg); 1501 1502 // Mark the register as live out of the function. 1503 MRI.addLiveOut(VA.getLocReg()); 1504 } 1505 1506 unsigned RetOpc = isThumb ? ARM::tBX_RET : ARM::BX_RET; 1507 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1508 TII.get(RetOpc))); 1509 return true; 1510} 1511 1512// A quick function that will emit a call for a named libcall in F with the 1513// vector of passed arguments for the Instruction in I. We can assume that we 1514// can emit a call for any libcall we can produce. This is an abridged version 1515// of the full call infrastructure since we won't need to worry about things 1516// like computed function pointers or strange arguments at call sites. 1517// TODO: Try to unify this and the normal call bits for ARM, then try to unify 1518// with X86. 1519bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) { 1520 CallingConv::ID CC = TLI.getLibcallCallingConv(Call); 1521 1522 // Handle *simple* calls for now. 1523 const Type *RetTy = I->getType(); 1524 EVT RetVT; 1525 if (RetTy->isVoidTy()) 1526 RetVT = MVT::isVoid; 1527 else if (!isTypeLegal(RetTy, RetVT)) 1528 return false; 1529 1530 // For now we're using BLX etc on the assumption that we have v5t ops. 1531 if (!Subtarget->hasV5TOps()) return false; 1532 1533 // Set up the argument vectors. 1534 SmallVector<Value*, 8> Args; 1535 SmallVector<unsigned, 8> ArgRegs; 1536 SmallVector<EVT, 8> ArgVTs; 1537 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 1538 Args.reserve(I->getNumOperands()); 1539 ArgRegs.reserve(I->getNumOperands()); 1540 ArgVTs.reserve(I->getNumOperands()); 1541 ArgFlags.reserve(I->getNumOperands()); 1542 for (unsigned i = 0; i < I->getNumOperands(); ++i) { 1543 Value *Op = I->getOperand(i); 1544 unsigned Arg = getRegForValue(Op); 1545 if (Arg == 0) return false; 1546 1547 const Type *ArgTy = Op->getType(); 1548 EVT ArgVT; 1549 if (!isTypeLegal(ArgTy, ArgVT)) return false; 1550 1551 ISD::ArgFlagsTy Flags; 1552 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 1553 Flags.setOrigAlign(OriginalAlignment); 1554 1555 Args.push_back(Op); 1556 ArgRegs.push_back(Arg); 1557 ArgVTs.push_back(ArgVT); 1558 ArgFlags.push_back(Flags); 1559 } 1560 1561 // Handle the arguments now that we've gotten them. 1562 SmallVector<unsigned, 4> RegArgs; 1563 unsigned NumBytes; 1564 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes)) 1565 return false; 1566 1567 // Issue the call, BLXr9 for darwin, BLX otherwise. This uses V5 ops. 1568 // TODO: Turn this into the table of arm call ops. 1569 MachineInstrBuilder MIB; 1570 unsigned CallOpc; 1571 if(isThumb) 1572 CallOpc = Subtarget->isTargetDarwin() ? ARM::tBLXi_r9 : ARM::tBLXi; 1573 else 1574 CallOpc = Subtarget->isTargetDarwin() ? ARM::BLr9 : ARM::BL; 1575 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CallOpc)) 1576 .addExternalSymbol(TLI.getLibcallName(Call)); 1577 1578 // Add implicit physical register uses to the call. 1579 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 1580 MIB.addReg(RegArgs[i]); 1581 1582 // Finish off the call including any return values. 1583 SmallVector<unsigned, 4> UsedRegs; 1584 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes)) return false; 1585 1586 // Set all unused physreg defs as dead. 1587 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 1588 1589 return true; 1590} 1591 1592bool ARMFastISel::SelectCall(const Instruction *I) { 1593 const CallInst *CI = cast<CallInst>(I); 1594 const Value *Callee = CI->getCalledValue(); 1595 1596 // Can't handle inline asm or worry about intrinsics yet. 1597 if (isa<InlineAsm>(Callee) || isa<IntrinsicInst>(CI)) return false; 1598 1599 // Only handle global variable Callees that are direct calls. 1600 const GlobalValue *GV = dyn_cast<GlobalValue>(Callee); 1601 if (!GV || Subtarget->GVIsIndirectSymbol(GV, TM.getRelocationModel())) 1602 return false; 1603 1604 // Check the calling convention. 1605 ImmutableCallSite CS(CI); 1606 CallingConv::ID CC = CS.getCallingConv(); 1607 1608 // TODO: Avoid some calling conventions? 1609 1610 // Let SDISel handle vararg functions. 1611 const PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType()); 1612 const FunctionType *FTy = cast<FunctionType>(PT->getElementType()); 1613 if (FTy->isVarArg()) 1614 return false; 1615 1616 // Handle *simple* calls for now. 1617 const Type *RetTy = I->getType(); 1618 EVT RetVT; 1619 if (RetTy->isVoidTy()) 1620 RetVT = MVT::isVoid; 1621 else if (!isTypeLegal(RetTy, RetVT)) 1622 return false; 1623 1624 // For now we're using BLX etc on the assumption that we have v5t ops. 1625 // TODO: Maybe? 1626 if (!Subtarget->hasV5TOps()) return false; 1627 1628 // Set up the argument vectors. 1629 SmallVector<Value*, 8> Args; 1630 SmallVector<unsigned, 8> ArgRegs; 1631 SmallVector<EVT, 8> ArgVTs; 1632 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 1633 Args.reserve(CS.arg_size()); 1634 ArgRegs.reserve(CS.arg_size()); 1635 ArgVTs.reserve(CS.arg_size()); 1636 ArgFlags.reserve(CS.arg_size()); 1637 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end(); 1638 i != e; ++i) { 1639 unsigned Arg = getRegForValue(*i); 1640 1641 if (Arg == 0) 1642 return false; 1643 ISD::ArgFlagsTy Flags; 1644 unsigned AttrInd = i - CS.arg_begin() + 1; 1645 if (CS.paramHasAttr(AttrInd, Attribute::SExt)) 1646 Flags.setSExt(); 1647 if (CS.paramHasAttr(AttrInd, Attribute::ZExt)) 1648 Flags.setZExt(); 1649 1650 // FIXME: Only handle *easy* calls for now. 1651 if (CS.paramHasAttr(AttrInd, Attribute::InReg) || 1652 CS.paramHasAttr(AttrInd, Attribute::StructRet) || 1653 CS.paramHasAttr(AttrInd, Attribute::Nest) || 1654 CS.paramHasAttr(AttrInd, Attribute::ByVal)) 1655 return false; 1656 1657 const Type *ArgTy = (*i)->getType(); 1658 EVT ArgVT; 1659 if (!isTypeLegal(ArgTy, ArgVT)) 1660 return false; 1661 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 1662 Flags.setOrigAlign(OriginalAlignment); 1663 1664 Args.push_back(*i); 1665 ArgRegs.push_back(Arg); 1666 ArgVTs.push_back(ArgVT); 1667 ArgFlags.push_back(Flags); 1668 } 1669 1670 // Handle the arguments now that we've gotten them. 1671 SmallVector<unsigned, 4> RegArgs; 1672 unsigned NumBytes; 1673 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes)) 1674 return false; 1675 1676 // Issue the call, BLXr9 for darwin, BLX otherwise. This uses V5 ops. 1677 // TODO: Turn this into the table of arm call ops. 1678 MachineInstrBuilder MIB; 1679 unsigned CallOpc; 1680 if(isThumb) 1681 CallOpc = Subtarget->isTargetDarwin() ? ARM::tBLXi_r9 : ARM::tBLXi; 1682 else 1683 CallOpc = Subtarget->isTargetDarwin() ? ARM::BLr9 : ARM::BL; 1684 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CallOpc)) 1685 .addGlobalAddress(GV, 0, 0); 1686 1687 // Add implicit physical register uses to the call. 1688 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 1689 MIB.addReg(RegArgs[i]); 1690 1691 // Finish off the call including any return values. 1692 SmallVector<unsigned, 4> UsedRegs; 1693 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes)) return false; 1694 1695 // Set all unused physreg defs as dead. 1696 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 1697 1698 return true; 1699 1700} 1701 1702// TODO: SoftFP support. 1703bool ARMFastISel::TargetSelectInstruction(const Instruction *I) { 1704 // No Thumb-1 for now. 1705 if (isThumb && !AFI->isThumb2Function()) return false; 1706 1707 switch (I->getOpcode()) { 1708 case Instruction::Load: 1709 return SelectLoad(I); 1710 case Instruction::Store: 1711 return SelectStore(I); 1712 case Instruction::Br: 1713 return SelectBranch(I); 1714 case Instruction::ICmp: 1715 case Instruction::FCmp: 1716 return SelectCmp(I); 1717 case Instruction::FPExt: 1718 return SelectFPExt(I); 1719 case Instruction::FPTrunc: 1720 return SelectFPTrunc(I); 1721 case Instruction::SIToFP: 1722 return SelectSIToFP(I); 1723 case Instruction::FPToSI: 1724 return SelectFPToSI(I); 1725 case Instruction::FAdd: 1726 return SelectBinaryOp(I, ISD::FADD); 1727 case Instruction::FSub: 1728 return SelectBinaryOp(I, ISD::FSUB); 1729 case Instruction::FMul: 1730 return SelectBinaryOp(I, ISD::FMUL); 1731 case Instruction::SDiv: 1732 return SelectSDiv(I); 1733 case Instruction::SRem: 1734 return SelectSRem(I); 1735 case Instruction::Call: 1736 return SelectCall(I); 1737 case Instruction::Select: 1738 return SelectSelect(I); 1739 case Instruction::Ret: 1740 return SelectRet(I); 1741 default: break; 1742 } 1743 return false; 1744} 1745 1746namespace llvm { 1747 llvm::FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo) { 1748 // Completely untested on non-darwin. 1749 const TargetMachine &TM = funcInfo.MF->getTarget(); 1750 const ARMSubtarget *Subtarget = &TM.getSubtarget<ARMSubtarget>(); 1751 if (Subtarget->isTargetDarwin() && !DisableARMFastISel) 1752 return new ARMFastISel(funcInfo); 1753 return 0; 1754 } 1755} 1756