ARMFastISel.cpp revision 564857f776084a85b6b4bf0c896fd60c69d0c521
1//===-- ARMFastISel.cpp - ARM FastISel implementation ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the ARM-specific support for the FastISel class. Some 11// of the target-specific code is generated by tablegen in the file 12// ARMGenFastISel.inc, which is #included here. 13// 14//===----------------------------------------------------------------------===// 15 16#include "ARM.h" 17#include "ARMBaseInstrInfo.h" 18#include "ARMCallingConv.h" 19#include "ARMRegisterInfo.h" 20#include "ARMTargetMachine.h" 21#include "ARMSubtarget.h" 22#include "ARMConstantPoolValue.h" 23#include "llvm/CallingConv.h" 24#include "llvm/DerivedTypes.h" 25#include "llvm/GlobalVariable.h" 26#include "llvm/Instructions.h" 27#include "llvm/IntrinsicInst.h" 28#include "llvm/Module.h" 29#include "llvm/CodeGen/Analysis.h" 30#include "llvm/CodeGen/FastISel.h" 31#include "llvm/CodeGen/FunctionLoweringInfo.h" 32#include "llvm/CodeGen/MachineInstrBuilder.h" 33#include "llvm/CodeGen/MachineModuleInfo.h" 34#include "llvm/CodeGen/MachineConstantPool.h" 35#include "llvm/CodeGen/MachineFrameInfo.h" 36#include "llvm/CodeGen/MachineMemOperand.h" 37#include "llvm/CodeGen/MachineRegisterInfo.h" 38#include "llvm/CodeGen/PseudoSourceValue.h" 39#include "llvm/Support/CallSite.h" 40#include "llvm/Support/CommandLine.h" 41#include "llvm/Support/ErrorHandling.h" 42#include "llvm/Support/GetElementPtrTypeIterator.h" 43#include "llvm/Target/TargetData.h" 44#include "llvm/Target/TargetInstrInfo.h" 45#include "llvm/Target/TargetLowering.h" 46#include "llvm/Target/TargetMachine.h" 47#include "llvm/Target/TargetOptions.h" 48using namespace llvm; 49 50static cl::opt<bool> 51DisableARMFastISel("disable-arm-fast-isel", 52 cl::desc("Turn off experimental ARM fast-isel support"), 53 cl::init(false), cl::Hidden); 54 55namespace { 56 57 // All possible address modes, plus some. 58 typedef struct Address { 59 enum { 60 RegBase, 61 FrameIndexBase 62 } BaseType; 63 64 union { 65 unsigned Reg; 66 int FI; 67 } Base; 68 69 int Offset; 70 unsigned Scale; 71 unsigned PlusReg; 72 73 // Innocuous defaults for our address. 74 Address() 75 : BaseType(RegBase), Offset(0), Scale(0), PlusReg(0) { 76 Base.Reg = 0; 77 } 78 } Address; 79 80class ARMFastISel : public FastISel { 81 82 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can 83 /// make the right decision when generating code for different targets. 84 const ARMSubtarget *Subtarget; 85 const TargetMachine &TM; 86 const TargetInstrInfo &TII; 87 const TargetLowering &TLI; 88 ARMFunctionInfo *AFI; 89 90 // Convenience variables to avoid some queries. 91 bool isThumb; 92 LLVMContext *Context; 93 94 public: 95 explicit ARMFastISel(FunctionLoweringInfo &funcInfo) 96 : FastISel(funcInfo), 97 TM(funcInfo.MF->getTarget()), 98 TII(*TM.getInstrInfo()), 99 TLI(*TM.getTargetLowering()) { 100 Subtarget = &TM.getSubtarget<ARMSubtarget>(); 101 AFI = funcInfo.MF->getInfo<ARMFunctionInfo>(); 102 isThumb = AFI->isThumbFunction(); 103 Context = &funcInfo.Fn->getContext(); 104 } 105 106 // Code from FastISel.cpp. 107 virtual unsigned FastEmitInst_(unsigned MachineInstOpcode, 108 const TargetRegisterClass *RC); 109 virtual unsigned FastEmitInst_r(unsigned MachineInstOpcode, 110 const TargetRegisterClass *RC, 111 unsigned Op0, bool Op0IsKill); 112 virtual unsigned FastEmitInst_rr(unsigned MachineInstOpcode, 113 const TargetRegisterClass *RC, 114 unsigned Op0, bool Op0IsKill, 115 unsigned Op1, bool Op1IsKill); 116 virtual unsigned FastEmitInst_ri(unsigned MachineInstOpcode, 117 const TargetRegisterClass *RC, 118 unsigned Op0, bool Op0IsKill, 119 uint64_t Imm); 120 virtual unsigned FastEmitInst_rf(unsigned MachineInstOpcode, 121 const TargetRegisterClass *RC, 122 unsigned Op0, bool Op0IsKill, 123 const ConstantFP *FPImm); 124 virtual unsigned FastEmitInst_i(unsigned MachineInstOpcode, 125 const TargetRegisterClass *RC, 126 uint64_t Imm); 127 virtual unsigned FastEmitInst_rri(unsigned MachineInstOpcode, 128 const TargetRegisterClass *RC, 129 unsigned Op0, bool Op0IsKill, 130 unsigned Op1, bool Op1IsKill, 131 uint64_t Imm); 132 virtual unsigned FastEmitInst_extractsubreg(MVT RetVT, 133 unsigned Op0, bool Op0IsKill, 134 uint32_t Idx); 135 136 // Backend specific FastISel code. 137 virtual bool TargetSelectInstruction(const Instruction *I); 138 virtual unsigned TargetMaterializeConstant(const Constant *C); 139 virtual unsigned TargetMaterializeAlloca(const AllocaInst *AI); 140 141 #include "ARMGenFastISel.inc" 142 143 // Instruction selection routines. 144 private: 145 bool SelectLoad(const Instruction *I); 146 bool SelectStore(const Instruction *I); 147 bool SelectBranch(const Instruction *I); 148 bool SelectCmp(const Instruction *I); 149 bool SelectFPExt(const Instruction *I); 150 bool SelectFPTrunc(const Instruction *I); 151 bool SelectBinaryOp(const Instruction *I, unsigned ISDOpcode); 152 bool SelectSIToFP(const Instruction *I); 153 bool SelectFPToSI(const Instruction *I); 154 bool SelectSDiv(const Instruction *I); 155 bool SelectSRem(const Instruction *I); 156 bool SelectCall(const Instruction *I); 157 bool SelectSelect(const Instruction *I); 158 bool SelectRet(const Instruction *I); 159 160 // Utility routines. 161 private: 162 bool isTypeLegal(const Type *Ty, MVT &VT); 163 bool isLoadTypeLegal(const Type *Ty, MVT &VT); 164 bool ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr); 165 bool ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr); 166 bool ARMComputeAddress(const Value *Obj, Address &Addr); 167 void ARMSimplifyAddress(Address &Addr, EVT VT); 168 unsigned ARMMaterializeFP(const ConstantFP *CFP, EVT VT); 169 unsigned ARMMaterializeInt(const Constant *C, EVT VT); 170 unsigned ARMMaterializeGV(const GlobalValue *GV, EVT VT); 171 unsigned ARMMoveToFPReg(EVT VT, unsigned SrcReg); 172 unsigned ARMMoveToIntReg(EVT VT, unsigned SrcReg); 173 174 // Call handling routines. 175 private: 176 bool FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, EVT SrcVT, 177 unsigned &ResultReg); 178 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool Return); 179 bool ProcessCallArgs(SmallVectorImpl<Value*> &Args, 180 SmallVectorImpl<unsigned> &ArgRegs, 181 SmallVectorImpl<MVT> &ArgVTs, 182 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 183 SmallVectorImpl<unsigned> &RegArgs, 184 CallingConv::ID CC, 185 unsigned &NumBytes); 186 bool FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 187 const Instruction *I, CallingConv::ID CC, 188 unsigned &NumBytes); 189 bool ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call); 190 191 // OptionalDef handling routines. 192 private: 193 bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR); 194 const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB); 195 void AddLoadStoreOperands(EVT VT, Address &Addr, 196 const MachineInstrBuilder &MIB); 197}; 198 199} // end anonymous namespace 200 201#include "ARMGenCallingConv.inc" 202 203// DefinesOptionalPredicate - This is different from DefinesPredicate in that 204// we don't care about implicit defs here, just places we'll need to add a 205// default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR. 206bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) { 207 const TargetInstrDesc &TID = MI->getDesc(); 208 if (!TID.hasOptionalDef()) 209 return false; 210 211 // Look to see if our OptionalDef is defining CPSR or CCR. 212 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 213 const MachineOperand &MO = MI->getOperand(i); 214 if (!MO.isReg() || !MO.isDef()) continue; 215 if (MO.getReg() == ARM::CPSR) 216 *CPSR = true; 217 } 218 return true; 219} 220 221// If the machine is predicable go ahead and add the predicate operands, if 222// it needs default CC operands add those. 223// TODO: If we want to support thumb1 then we'll need to deal with optional 224// CPSR defs that need to be added before the remaining operands. See s_cc_out 225// for descriptions why. 226const MachineInstrBuilder & 227ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) { 228 MachineInstr *MI = &*MIB; 229 230 // Do we use a predicate? 231 if (TII.isPredicable(MI)) 232 AddDefaultPred(MIB); 233 234 // Do we optionally set a predicate? Preds is size > 0 iff the predicate 235 // defines CPSR. All other OptionalDefines in ARM are the CCR register. 236 bool CPSR = false; 237 if (DefinesOptionalPredicate(MI, &CPSR)) { 238 if (CPSR) 239 AddDefaultT1CC(MIB); 240 else 241 AddDefaultCC(MIB); 242 } 243 return MIB; 244} 245 246unsigned ARMFastISel::FastEmitInst_(unsigned MachineInstOpcode, 247 const TargetRegisterClass* RC) { 248 unsigned ResultReg = createResultReg(RC); 249 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 250 251 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)); 252 return ResultReg; 253} 254 255unsigned ARMFastISel::FastEmitInst_r(unsigned MachineInstOpcode, 256 const TargetRegisterClass *RC, 257 unsigned Op0, bool Op0IsKill) { 258 unsigned ResultReg = createResultReg(RC); 259 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 260 261 if (II.getNumDefs() >= 1) 262 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 263 .addReg(Op0, Op0IsKill * RegState::Kill)); 264 else { 265 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 266 .addReg(Op0, Op0IsKill * RegState::Kill)); 267 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 268 TII.get(TargetOpcode::COPY), ResultReg) 269 .addReg(II.ImplicitDefs[0])); 270 } 271 return ResultReg; 272} 273 274unsigned ARMFastISel::FastEmitInst_rr(unsigned MachineInstOpcode, 275 const TargetRegisterClass *RC, 276 unsigned Op0, bool Op0IsKill, 277 unsigned Op1, bool Op1IsKill) { 278 unsigned ResultReg = createResultReg(RC); 279 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 280 281 if (II.getNumDefs() >= 1) 282 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 283 .addReg(Op0, Op0IsKill * RegState::Kill) 284 .addReg(Op1, Op1IsKill * RegState::Kill)); 285 else { 286 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 287 .addReg(Op0, Op0IsKill * RegState::Kill) 288 .addReg(Op1, Op1IsKill * RegState::Kill)); 289 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 290 TII.get(TargetOpcode::COPY), ResultReg) 291 .addReg(II.ImplicitDefs[0])); 292 } 293 return ResultReg; 294} 295 296unsigned ARMFastISel::FastEmitInst_ri(unsigned MachineInstOpcode, 297 const TargetRegisterClass *RC, 298 unsigned Op0, bool Op0IsKill, 299 uint64_t Imm) { 300 unsigned ResultReg = createResultReg(RC); 301 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 302 303 if (II.getNumDefs() >= 1) 304 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 305 .addReg(Op0, Op0IsKill * RegState::Kill) 306 .addImm(Imm)); 307 else { 308 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 309 .addReg(Op0, Op0IsKill * RegState::Kill) 310 .addImm(Imm)); 311 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 312 TII.get(TargetOpcode::COPY), ResultReg) 313 .addReg(II.ImplicitDefs[0])); 314 } 315 return ResultReg; 316} 317 318unsigned ARMFastISel::FastEmitInst_rf(unsigned MachineInstOpcode, 319 const TargetRegisterClass *RC, 320 unsigned Op0, bool Op0IsKill, 321 const ConstantFP *FPImm) { 322 unsigned ResultReg = createResultReg(RC); 323 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 324 325 if (II.getNumDefs() >= 1) 326 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 327 .addReg(Op0, Op0IsKill * RegState::Kill) 328 .addFPImm(FPImm)); 329 else { 330 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 331 .addReg(Op0, Op0IsKill * RegState::Kill) 332 .addFPImm(FPImm)); 333 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 334 TII.get(TargetOpcode::COPY), ResultReg) 335 .addReg(II.ImplicitDefs[0])); 336 } 337 return ResultReg; 338} 339 340unsigned ARMFastISel::FastEmitInst_rri(unsigned MachineInstOpcode, 341 const TargetRegisterClass *RC, 342 unsigned Op0, bool Op0IsKill, 343 unsigned Op1, bool Op1IsKill, 344 uint64_t Imm) { 345 unsigned ResultReg = createResultReg(RC); 346 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 347 348 if (II.getNumDefs() >= 1) 349 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 350 .addReg(Op0, Op0IsKill * RegState::Kill) 351 .addReg(Op1, Op1IsKill * RegState::Kill) 352 .addImm(Imm)); 353 else { 354 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 355 .addReg(Op0, Op0IsKill * RegState::Kill) 356 .addReg(Op1, Op1IsKill * RegState::Kill) 357 .addImm(Imm)); 358 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 359 TII.get(TargetOpcode::COPY), ResultReg) 360 .addReg(II.ImplicitDefs[0])); 361 } 362 return ResultReg; 363} 364 365unsigned ARMFastISel::FastEmitInst_i(unsigned MachineInstOpcode, 366 const TargetRegisterClass *RC, 367 uint64_t Imm) { 368 unsigned ResultReg = createResultReg(RC); 369 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 370 371 if (II.getNumDefs() >= 1) 372 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 373 .addImm(Imm)); 374 else { 375 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 376 .addImm(Imm)); 377 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 378 TII.get(TargetOpcode::COPY), ResultReg) 379 .addReg(II.ImplicitDefs[0])); 380 } 381 return ResultReg; 382} 383 384unsigned ARMFastISel::FastEmitInst_extractsubreg(MVT RetVT, 385 unsigned Op0, bool Op0IsKill, 386 uint32_t Idx) { 387 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT)); 388 assert(TargetRegisterInfo::isVirtualRegister(Op0) && 389 "Cannot yet extract from physregs"); 390 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 391 DL, TII.get(TargetOpcode::COPY), ResultReg) 392 .addReg(Op0, getKillRegState(Op0IsKill), Idx)); 393 return ResultReg; 394} 395 396// TODO: Don't worry about 64-bit now, but when this is fixed remove the 397// checks from the various callers. 398unsigned ARMFastISel::ARMMoveToFPReg(EVT VT, unsigned SrcReg) { 399 if (VT == MVT::f64) return 0; 400 401 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 402 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 403 TII.get(ARM::VMOVRS), MoveReg) 404 .addReg(SrcReg)); 405 return MoveReg; 406} 407 408unsigned ARMFastISel::ARMMoveToIntReg(EVT VT, unsigned SrcReg) { 409 if (VT == MVT::i64) return 0; 410 411 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 412 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 413 TII.get(ARM::VMOVSR), MoveReg) 414 .addReg(SrcReg)); 415 return MoveReg; 416} 417 418// For double width floating point we need to materialize two constants 419// (the high and the low) into integer registers then use a move to get 420// the combined constant into an FP reg. 421unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, EVT VT) { 422 const APFloat Val = CFP->getValueAPF(); 423 bool is64bit = VT == MVT::f64; 424 425 // This checks to see if we can use VFP3 instructions to materialize 426 // a constant, otherwise we have to go through the constant pool. 427 if (TLI.isFPImmLegal(Val, VT)) { 428 unsigned Opc = is64bit ? ARM::FCONSTD : ARM::FCONSTS; 429 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 430 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 431 DestReg) 432 .addFPImm(CFP)); 433 return DestReg; 434 } 435 436 // Require VFP2 for loading fp constants. 437 if (!Subtarget->hasVFP2()) return false; 438 439 // MachineConstantPool wants an explicit alignment. 440 unsigned Align = TD.getPrefTypeAlignment(CFP->getType()); 441 if (Align == 0) { 442 // TODO: Figure out if this is correct. 443 Align = TD.getTypeAllocSize(CFP->getType()); 444 } 445 unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align); 446 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 447 unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS; 448 449 // The extra reg is for addrmode5. 450 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 451 DestReg) 452 .addConstantPoolIndex(Idx) 453 .addReg(0)); 454 return DestReg; 455} 456 457unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, EVT VT) { 458 459 // For now 32-bit only. 460 if (VT != MVT::i32) return false; 461 462 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 463 464 // If we can do this in a single instruction without a constant pool entry 465 // do so now. 466 const ConstantInt *CI = cast<ConstantInt>(C); 467 if (Subtarget->hasV6T2Ops() && isUInt<16>(CI->getSExtValue())) { 468 unsigned Opc = isThumb ? ARM::t2MOVi16 : ARM::MOVi16; 469 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 470 TII.get(Opc), DestReg) 471 .addImm(CI->getSExtValue())); 472 return DestReg; 473 } 474 475 // MachineConstantPool wants an explicit alignment. 476 unsigned Align = TD.getPrefTypeAlignment(C->getType()); 477 if (Align == 0) { 478 // TODO: Figure out if this is correct. 479 Align = TD.getTypeAllocSize(C->getType()); 480 } 481 unsigned Idx = MCP.getConstantPoolIndex(C, Align); 482 483 if (isThumb) 484 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 485 TII.get(ARM::t2LDRpci), DestReg) 486 .addConstantPoolIndex(Idx)); 487 else 488 // The extra immediate is for addrmode2. 489 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 490 TII.get(ARM::LDRcp), DestReg) 491 .addConstantPoolIndex(Idx) 492 .addImm(0)); 493 494 return DestReg; 495} 496 497unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, EVT VT) { 498 // For now 32-bit only. 499 if (VT != MVT::i32) return 0; 500 501 Reloc::Model RelocM = TM.getRelocationModel(); 502 503 // TODO: No external globals for now. 504 if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) return 0; 505 506 // TODO: Need more magic for ARM PIC. 507 if (!isThumb && (RelocM == Reloc::PIC_)) return 0; 508 509 // MachineConstantPool wants an explicit alignment. 510 unsigned Align = TD.getPrefTypeAlignment(GV->getType()); 511 if (Align == 0) { 512 // TODO: Figure out if this is correct. 513 Align = TD.getTypeAllocSize(GV->getType()); 514 } 515 516 // Grab index. 517 unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : (Subtarget->isThumb() ? 4 : 8); 518 unsigned Id = AFI->createConstPoolEntryUId(); 519 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, Id, 520 ARMCP::CPValue, PCAdj); 521 unsigned Idx = MCP.getConstantPoolIndex(CPV, Align); 522 523 // Load value. 524 MachineInstrBuilder MIB; 525 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 526 if (isThumb) { 527 unsigned Opc = (RelocM != Reloc::PIC_) ? ARM::t2LDRpci : ARM::t2LDRpci_pic; 528 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg) 529 .addConstantPoolIndex(Idx); 530 if (RelocM == Reloc::PIC_) 531 MIB.addImm(Id); 532 } else { 533 // The extra immediate is for addrmode2. 534 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRcp), 535 DestReg) 536 .addConstantPoolIndex(Idx) 537 .addImm(0); 538 } 539 AddOptionalDefs(MIB); 540 return DestReg; 541} 542 543unsigned ARMFastISel::TargetMaterializeConstant(const Constant *C) { 544 EVT VT = TLI.getValueType(C->getType(), true); 545 546 // Only handle simple types. 547 if (!VT.isSimple()) return 0; 548 549 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) 550 return ARMMaterializeFP(CFP, VT); 551 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C)) 552 return ARMMaterializeGV(GV, VT); 553 else if (isa<ConstantInt>(C)) 554 return ARMMaterializeInt(C, VT); 555 556 return 0; 557} 558 559unsigned ARMFastISel::TargetMaterializeAlloca(const AllocaInst *AI) { 560 // Don't handle dynamic allocas. 561 if (!FuncInfo.StaticAllocaMap.count(AI)) return 0; 562 563 MVT VT; 564 if (!isLoadTypeLegal(AI->getType(), VT)) return false; 565 566 DenseMap<const AllocaInst*, int>::iterator SI = 567 FuncInfo.StaticAllocaMap.find(AI); 568 569 // This will get lowered later into the correct offsets and registers 570 // via rewriteXFrameIndex. 571 if (SI != FuncInfo.StaticAllocaMap.end()) { 572 TargetRegisterClass* RC = TLI.getRegClassFor(VT); 573 unsigned ResultReg = createResultReg(RC); 574 unsigned Opc = isThumb ? ARM::t2ADDri : ARM::ADDri; 575 AddOptionalDefs(BuildMI(*FuncInfo.MBB, *FuncInfo.InsertPt, DL, 576 TII.get(Opc), ResultReg) 577 .addFrameIndex(SI->second) 578 .addImm(0)); 579 return ResultReg; 580 } 581 582 return 0; 583} 584 585bool ARMFastISel::isTypeLegal(const Type *Ty, MVT &VT) { 586 EVT evt = TLI.getValueType(Ty, true); 587 588 // Only handle simple types. 589 if (evt == MVT::Other || !evt.isSimple()) return false; 590 VT = evt.getSimpleVT(); 591 592 // Handle all legal types, i.e. a register that will directly hold this 593 // value. 594 return TLI.isTypeLegal(VT); 595} 596 597bool ARMFastISel::isLoadTypeLegal(const Type *Ty, MVT &VT) { 598 if (isTypeLegal(Ty, VT)) return true; 599 600 // If this is a type than can be sign or zero-extended to a basic operation 601 // go ahead and accept it now. 602 if (VT == MVT::i8 || VT == MVT::i16) 603 return true; 604 605 return false; 606} 607 608// Computes the address to get to an object. 609bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) { 610 // Some boilerplate from the X86 FastISel. 611 const User *U = NULL; 612 unsigned Opcode = Instruction::UserOp1; 613 if (const Instruction *I = dyn_cast<Instruction>(Obj)) { 614 // Don't walk into other basic blocks unless the object is an alloca from 615 // another block, otherwise it may not have a virtual register assigned. 616 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) || 617 FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) { 618 Opcode = I->getOpcode(); 619 U = I; 620 } 621 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) { 622 Opcode = C->getOpcode(); 623 U = C; 624 } 625 626 if (const PointerType *Ty = dyn_cast<PointerType>(Obj->getType())) 627 if (Ty->getAddressSpace() > 255) 628 // Fast instruction selection doesn't support the special 629 // address spaces. 630 return false; 631 632 switch (Opcode) { 633 default: 634 break; 635 case Instruction::BitCast: { 636 // Look through bitcasts. 637 return ARMComputeAddress(U->getOperand(0), Addr); 638 } 639 case Instruction::IntToPtr: { 640 // Look past no-op inttoptrs. 641 if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy()) 642 return ARMComputeAddress(U->getOperand(0), Addr); 643 break; 644 } 645 case Instruction::PtrToInt: { 646 // Look past no-op ptrtoints. 647 if (TLI.getValueType(U->getType()) == TLI.getPointerTy()) 648 return ARMComputeAddress(U->getOperand(0), Addr); 649 break; 650 } 651 case Instruction::GetElementPtr: { 652 Address SavedAddr = Addr; 653 int TmpOffset = Addr.Offset; 654 655 // Iterate through the GEP folding the constants into offsets where 656 // we can. 657 gep_type_iterator GTI = gep_type_begin(U); 658 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); 659 i != e; ++i, ++GTI) { 660 const Value *Op = *i; 661 if (const StructType *STy = dyn_cast<StructType>(*GTI)) { 662 const StructLayout *SL = TD.getStructLayout(STy); 663 unsigned Idx = cast<ConstantInt>(Op)->getZExtValue(); 664 TmpOffset += SL->getElementOffset(Idx); 665 } else { 666 uint64_t S = TD.getTypeAllocSize(GTI.getIndexedType()); 667 SmallVector<const Value *, 4> Worklist; 668 Worklist.push_back(Op); 669 do { 670 Op = Worklist.pop_back_val(); 671 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) { 672 // Constant-offset addressing. 673 TmpOffset += CI->getSExtValue() * S; 674 } else if (isa<AddOperator>(Op) && 675 isa<ConstantInt>(cast<AddOperator>(Op)->getOperand(1))) { 676 // An add with a constant operand. Fold the constant. 677 ConstantInt *CI = 678 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1)); 679 TmpOffset += CI->getSExtValue() * S; 680 // Add the other operand back to the work list. 681 Worklist.push_back(cast<AddOperator>(Op)->getOperand(0)); 682 } else 683 goto unsupported_gep; 684 } while (!Worklist.empty()); 685 } 686 } 687 688 // Try to grab the base operand now. 689 Addr.Offset = TmpOffset; 690 if (ARMComputeAddress(U->getOperand(0), Addr)) return true; 691 692 // We failed, restore everything and try the other options. 693 Addr = SavedAddr; 694 695 unsupported_gep: 696 break; 697 } 698 case Instruction::Alloca: { 699 const AllocaInst *AI = cast<AllocaInst>(Obj); 700 DenseMap<const AllocaInst*, int>::iterator SI = 701 FuncInfo.StaticAllocaMap.find(AI); 702 if (SI != FuncInfo.StaticAllocaMap.end()) { 703 Addr.BaseType = Address::FrameIndexBase; 704 Addr.Base.FI = SI->second; 705 return true; 706 } 707 break; 708 } 709 } 710 711 // Materialize the global variable's address into a reg which can 712 // then be used later to load the variable. 713 if (const GlobalValue *GV = dyn_cast<GlobalValue>(Obj)) { 714 unsigned Tmp = ARMMaterializeGV(GV, TLI.getValueType(Obj->getType())); 715 if (Tmp == 0) return false; 716 717 Addr.Base.Reg = Tmp; 718 return true; 719 } 720 721 // Try to get this in a register if nothing else has worked. 722 if (Addr.Base.Reg == 0) Addr.Base.Reg = getRegForValue(Obj); 723 return Addr.Base.Reg != 0; 724} 725 726void ARMFastISel::ARMSimplifyAddress(Address &Addr, EVT VT) { 727 728 assert(VT.isSimple() && "Non-simple types are invalid here!"); 729 730 bool needsLowering = false; 731 switch (VT.getSimpleVT().SimpleTy) { 732 default: 733 assert(false && "Unhandled load/store type!"); 734 case MVT::i1: 735 case MVT::i8: 736 case MVT::i16: 737 case MVT::i32: 738 // Integer loads/stores handle 12-bit offsets. 739 needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset); 740 break; 741 case MVT::f32: 742 case MVT::f64: 743 // Floating point operands handle 8-bit offsets. 744 needsLowering = ((Addr.Offset & 0xff) != Addr.Offset); 745 break; 746 } 747 748 // If this is a stack pointer and the offset needs to be simplified then 749 // put the alloca address into a register, set the base type back to 750 // register and continue. This should almost never happen. 751 if (needsLowering && Addr.BaseType == Address::FrameIndexBase) { 752 TargetRegisterClass *RC = isThumb ? ARM::tGPRRegisterClass : 753 ARM::GPRRegisterClass; 754 unsigned ResultReg = createResultReg(RC); 755 unsigned Opc = isThumb ? ARM::t2ADDri : ARM::ADDri; 756 AddOptionalDefs(BuildMI(*FuncInfo.MBB, *FuncInfo.InsertPt, DL, 757 TII.get(Opc), ResultReg) 758 .addFrameIndex(Addr.Base.FI) 759 .addImm(0)); 760 Addr.Base.Reg = ResultReg; 761 Addr.BaseType = Address::RegBase; 762 } 763 764 // Since the offset is too large for the load/store instruction 765 // get the reg+offset into a register. 766 if (needsLowering) { 767 ARMCC::CondCodes Pred = ARMCC::AL; 768 unsigned PredReg = 0; 769 770 TargetRegisterClass *RC = isThumb ? ARM::tGPRRegisterClass : 771 ARM::GPRRegisterClass; 772 unsigned BaseReg = createResultReg(RC); 773 774 if (!isThumb) 775 emitARMRegPlusImmediate(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 776 BaseReg, Addr.Base.Reg, Addr.Offset, 777 Pred, PredReg, 778 static_cast<const ARMBaseInstrInfo&>(TII)); 779 else { 780 assert(AFI->isThumb2Function()); 781 emitT2RegPlusImmediate(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 782 BaseReg, Addr.Base.Reg, Addr.Offset, Pred, PredReg, 783 static_cast<const ARMBaseInstrInfo&>(TII)); 784 } 785 Addr.Offset = 0; 786 Addr.Base.Reg = BaseReg; 787 } 788} 789 790void ARMFastISel::AddLoadStoreOperands(EVT VT, Address &Addr, 791 const MachineInstrBuilder &MIB) { 792 // addrmode5 output depends on the selection dag addressing dividing the 793 // offset by 4 that it then later multiplies. Do this here as well. 794 if (VT.getSimpleVT().SimpleTy == MVT::f32 || 795 VT.getSimpleVT().SimpleTy == MVT::f64) 796 Addr.Offset /= 4; 797 798 // Frame base works a bit differently. Handle it separately. 799 if (Addr.BaseType == Address::FrameIndexBase) { 800 int FI = Addr.Base.FI; 801 int Offset = Addr.Offset; 802 MachineMemOperand *MMO = 803 FuncInfo.MF->getMachineMemOperand( 804 MachinePointerInfo::getFixedStack(FI, Offset), 805 MachineMemOperand::MOLoad, 806 MFI.getObjectSize(FI), 807 MFI.getObjectAlignment(FI)); 808 // Now add the rest of the operands. 809 MIB.addFrameIndex(FI); 810 811 // ARM halfword load/stores need an additional operand. 812 if (!isThumb && VT.getSimpleVT().SimpleTy == MVT::i16) MIB.addReg(0); 813 814 MIB.addImm(Addr.Offset); 815 MIB.addMemOperand(MMO); 816 } else { 817 // Now add the rest of the operands. 818 MIB.addReg(Addr.Base.Reg); 819 820 // ARM halfword load/stores need an additional operand. 821 if (!isThumb && VT.getSimpleVT().SimpleTy == MVT::i16) MIB.addReg(0); 822 823 MIB.addImm(Addr.Offset); 824 } 825 AddOptionalDefs(MIB); 826} 827 828bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr) { 829 830 assert(VT.isSimple() && "Non-simple types are invalid here!"); 831 unsigned Opc; 832 TargetRegisterClass *RC; 833 switch (VT.getSimpleVT().SimpleTy) { 834 // This is mostly going to be Neon/vector support. 835 default: return false; 836 case MVT::i16: 837 Opc = isThumb ? ARM::t2LDRHi12 : ARM::LDRH; 838 RC = ARM::GPRRegisterClass; 839 break; 840 case MVT::i8: 841 Opc = isThumb ? ARM::t2LDRBi12 : ARM::LDRBi12; 842 RC = ARM::GPRRegisterClass; 843 break; 844 case MVT::i32: 845 Opc = isThumb ? ARM::t2LDRi12 : ARM::LDRi12; 846 RC = ARM::GPRRegisterClass; 847 break; 848 case MVT::f32: 849 Opc = ARM::VLDRS; 850 RC = TLI.getRegClassFor(VT); 851 break; 852 case MVT::f64: 853 Opc = ARM::VLDRD; 854 RC = TLI.getRegClassFor(VT); 855 break; 856 } 857 // Simplify this down to something we can handle. 858 ARMSimplifyAddress(Addr, VT); 859 860 // Create the base instruction, then add the operands. 861 ResultReg = createResultReg(RC); 862 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 863 TII.get(Opc), ResultReg); 864 AddLoadStoreOperands(VT, Addr, MIB); 865 return true; 866} 867 868bool ARMFastISel::SelectLoad(const Instruction *I) { 869 // Verify we have a legal type before going any further. 870 MVT VT; 871 if (!isLoadTypeLegal(I->getType(), VT)) 872 return false; 873 874 // See if we can handle this address. 875 Address Addr; 876 if (!ARMComputeAddress(I->getOperand(0), Addr)) return false; 877 878 unsigned ResultReg; 879 if (!ARMEmitLoad(VT, ResultReg, Addr)) return false; 880 UpdateValueMap(I, ResultReg); 881 return true; 882} 883 884bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr) { 885 unsigned StrOpc; 886 switch (VT.getSimpleVT().SimpleTy) { 887 // This is mostly going to be Neon/vector support. 888 default: return false; 889 case MVT::i1: { 890 unsigned Res = createResultReg(isThumb ? ARM::tGPRRegisterClass : 891 ARM::GPRRegisterClass); 892 unsigned Opc = isThumb ? ARM::t2ANDri : ARM::ANDri; 893 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 894 TII.get(Opc), Res) 895 .addReg(SrcReg).addImm(1)); 896 SrcReg = Res; 897 } // Fallthrough here. 898 case MVT::i8: 899 StrOpc = isThumb ? ARM::t2STRBi12 : ARM::STRBi12; 900 break; 901 case MVT::i16: 902 StrOpc = isThumb ? ARM::t2STRHi12 : ARM::STRH; 903 break; 904 case MVT::i32: 905 StrOpc = isThumb ? ARM::t2STRi12 : ARM::STRi12; 906 break; 907 case MVT::f32: 908 if (!Subtarget->hasVFP2()) return false; 909 StrOpc = ARM::VSTRS; 910 break; 911 case MVT::f64: 912 if (!Subtarget->hasVFP2()) return false; 913 StrOpc = ARM::VSTRD; 914 break; 915 } 916 // Simplify this down to something we can handle. 917 ARMSimplifyAddress(Addr, VT); 918 919 // Create the base instruction, then add the operands. 920 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 921 TII.get(StrOpc)) 922 .addReg(SrcReg, getKillRegState(true)); 923 AddLoadStoreOperands(VT, Addr, MIB); 924 return true; 925} 926 927bool ARMFastISel::SelectStore(const Instruction *I) { 928 Value *Op0 = I->getOperand(0); 929 unsigned SrcReg = 0; 930 931 // Verify we have a legal type before going any further. 932 MVT VT; 933 if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT)) 934 return false; 935 936 // Get the value to be stored into a register. 937 SrcReg = getRegForValue(Op0); 938 if (SrcReg == 0) return false; 939 940 // See if we can handle this address. 941 Address Addr; 942 if (!ARMComputeAddress(I->getOperand(1), Addr)) 943 return false; 944 945 if (!ARMEmitStore(VT, SrcReg, Addr)) return false; 946 return true; 947} 948 949static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred) { 950 switch (Pred) { 951 // Needs two compares... 952 case CmpInst::FCMP_ONE: 953 case CmpInst::FCMP_UEQ: 954 default: 955 // AL is our "false" for now. The other two need more compares. 956 return ARMCC::AL; 957 case CmpInst::ICMP_EQ: 958 case CmpInst::FCMP_OEQ: 959 return ARMCC::EQ; 960 case CmpInst::ICMP_SGT: 961 case CmpInst::FCMP_OGT: 962 return ARMCC::GT; 963 case CmpInst::ICMP_SGE: 964 case CmpInst::FCMP_OGE: 965 return ARMCC::GE; 966 case CmpInst::ICMP_UGT: 967 case CmpInst::FCMP_UGT: 968 return ARMCC::HI; 969 case CmpInst::FCMP_OLT: 970 return ARMCC::MI; 971 case CmpInst::ICMP_ULE: 972 case CmpInst::FCMP_OLE: 973 return ARMCC::LS; 974 case CmpInst::FCMP_ORD: 975 return ARMCC::VC; 976 case CmpInst::FCMP_UNO: 977 return ARMCC::VS; 978 case CmpInst::FCMP_UGE: 979 return ARMCC::PL; 980 case CmpInst::ICMP_SLT: 981 case CmpInst::FCMP_ULT: 982 return ARMCC::LT; 983 case CmpInst::ICMP_SLE: 984 case CmpInst::FCMP_ULE: 985 return ARMCC::LE; 986 case CmpInst::FCMP_UNE: 987 case CmpInst::ICMP_NE: 988 return ARMCC::NE; 989 case CmpInst::ICMP_UGE: 990 return ARMCC::HS; 991 case CmpInst::ICMP_ULT: 992 return ARMCC::LO; 993 } 994} 995 996bool ARMFastISel::SelectBranch(const Instruction *I) { 997 const BranchInst *BI = cast<BranchInst>(I); 998 MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)]; 999 MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)]; 1000 1001 // Simple branch support. 1002 1003 // If we can, avoid recomputing the compare - redoing it could lead to wonky 1004 // behavior. 1005 // TODO: Factor this out. 1006 if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) { 1007 if (CI->hasOneUse() && (CI->getParent() == I->getParent())) { 1008 MVT VT; 1009 const Type *Ty = CI->getOperand(0)->getType(); 1010 if (!isTypeLegal(Ty, VT)) 1011 return false; 1012 1013 bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy()); 1014 if (isFloat && !Subtarget->hasVFP2()) 1015 return false; 1016 1017 unsigned CmpOpc; 1018 unsigned CondReg; 1019 switch (VT.SimpleTy) { 1020 default: return false; 1021 // TODO: Verify compares. 1022 case MVT::f32: 1023 CmpOpc = ARM::VCMPES; 1024 CondReg = ARM::FPSCR; 1025 break; 1026 case MVT::f64: 1027 CmpOpc = ARM::VCMPED; 1028 CondReg = ARM::FPSCR; 1029 break; 1030 case MVT::i32: 1031 CmpOpc = isThumb ? ARM::t2CMPrr : ARM::CMPrr; 1032 CondReg = ARM::CPSR; 1033 break; 1034 } 1035 1036 // Get the compare predicate. 1037 ARMCC::CondCodes ARMPred = getComparePred(CI->getPredicate()); 1038 1039 // We may not handle every CC for now. 1040 if (ARMPred == ARMCC::AL) return false; 1041 1042 unsigned Arg1 = getRegForValue(CI->getOperand(0)); 1043 if (Arg1 == 0) return false; 1044 1045 unsigned Arg2 = getRegForValue(CI->getOperand(1)); 1046 if (Arg2 == 0) return false; 1047 1048 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1049 TII.get(CmpOpc)) 1050 .addReg(Arg1).addReg(Arg2)); 1051 1052 // For floating point we need to move the result to a comparison register 1053 // that we can then use for branches. 1054 if (isFloat) 1055 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1056 TII.get(ARM::FMSTAT))); 1057 1058 unsigned BrOpc = isThumb ? ARM::t2Bcc : ARM::Bcc; 1059 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1060 .addMBB(TBB).addImm(ARMPred).addReg(ARM::CPSR); 1061 FastEmitBranch(FBB, DL); 1062 FuncInfo.MBB->addSuccessor(TBB); 1063 return true; 1064 } 1065 } 1066 1067 unsigned CmpReg = getRegForValue(BI->getCondition()); 1068 if (CmpReg == 0) return false; 1069 1070 // Re-set the flags just in case. 1071 unsigned CmpOpc = isThumb ? ARM::t2CMPri : ARM::CMPri; 1072 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1073 .addReg(CmpReg).addImm(0)); 1074 1075 unsigned BrOpc = isThumb ? ARM::t2Bcc : ARM::Bcc; 1076 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1077 .addMBB(TBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 1078 FastEmitBranch(FBB, DL); 1079 FuncInfo.MBB->addSuccessor(TBB); 1080 return true; 1081} 1082 1083bool ARMFastISel::SelectCmp(const Instruction *I) { 1084 const CmpInst *CI = cast<CmpInst>(I); 1085 1086 MVT VT; 1087 const Type *Ty = CI->getOperand(0)->getType(); 1088 if (!isTypeLegal(Ty, VT)) 1089 return false; 1090 1091 bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy()); 1092 if (isFloat && !Subtarget->hasVFP2()) 1093 return false; 1094 1095 unsigned CmpOpc; 1096 unsigned CondReg; 1097 switch (VT.SimpleTy) { 1098 default: return false; 1099 // TODO: Verify compares. 1100 case MVT::f32: 1101 CmpOpc = ARM::VCMPES; 1102 CondReg = ARM::FPSCR; 1103 break; 1104 case MVT::f64: 1105 CmpOpc = ARM::VCMPED; 1106 CondReg = ARM::FPSCR; 1107 break; 1108 case MVT::i32: 1109 CmpOpc = isThumb ? ARM::t2CMPrr : ARM::CMPrr; 1110 CondReg = ARM::CPSR; 1111 break; 1112 } 1113 1114 // Get the compare predicate. 1115 ARMCC::CondCodes ARMPred = getComparePred(CI->getPredicate()); 1116 1117 // We may not handle every CC for now. 1118 if (ARMPred == ARMCC::AL) return false; 1119 1120 unsigned Arg1 = getRegForValue(CI->getOperand(0)); 1121 if (Arg1 == 0) return false; 1122 1123 unsigned Arg2 = getRegForValue(CI->getOperand(1)); 1124 if (Arg2 == 0) return false; 1125 1126 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1127 .addReg(Arg1).addReg(Arg2)); 1128 1129 // For floating point we need to move the result to a comparison register 1130 // that we can then use for branches. 1131 if (isFloat) 1132 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1133 TII.get(ARM::FMSTAT))); 1134 1135 // Now set a register based on the comparison. Explicitly set the predicates 1136 // here. 1137 unsigned MovCCOpc = isThumb ? ARM::t2MOVCCi : ARM::MOVCCi; 1138 TargetRegisterClass *RC = isThumb ? ARM::rGPRRegisterClass 1139 : ARM::GPRRegisterClass; 1140 unsigned DestReg = createResultReg(RC); 1141 Constant *Zero 1142 = ConstantInt::get(Type::getInt32Ty(*Context), 0); 1143 unsigned ZeroReg = TargetMaterializeConstant(Zero); 1144 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), DestReg) 1145 .addReg(ZeroReg).addImm(1) 1146 .addImm(ARMPred).addReg(CondReg); 1147 1148 UpdateValueMap(I, DestReg); 1149 return true; 1150} 1151 1152bool ARMFastISel::SelectFPExt(const Instruction *I) { 1153 // Make sure we have VFP and that we're extending float to double. 1154 if (!Subtarget->hasVFP2()) return false; 1155 1156 Value *V = I->getOperand(0); 1157 if (!I->getType()->isDoubleTy() || 1158 !V->getType()->isFloatTy()) return false; 1159 1160 unsigned Op = getRegForValue(V); 1161 if (Op == 0) return false; 1162 1163 unsigned Result = createResultReg(ARM::DPRRegisterClass); 1164 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1165 TII.get(ARM::VCVTDS), Result) 1166 .addReg(Op)); 1167 UpdateValueMap(I, Result); 1168 return true; 1169} 1170 1171bool ARMFastISel::SelectFPTrunc(const Instruction *I) { 1172 // Make sure we have VFP and that we're truncating double to float. 1173 if (!Subtarget->hasVFP2()) return false; 1174 1175 Value *V = I->getOperand(0); 1176 if (!(I->getType()->isFloatTy() && 1177 V->getType()->isDoubleTy())) return false; 1178 1179 unsigned Op = getRegForValue(V); 1180 if (Op == 0) return false; 1181 1182 unsigned Result = createResultReg(ARM::SPRRegisterClass); 1183 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1184 TII.get(ARM::VCVTSD), Result) 1185 .addReg(Op)); 1186 UpdateValueMap(I, Result); 1187 return true; 1188} 1189 1190bool ARMFastISel::SelectSIToFP(const Instruction *I) { 1191 // Make sure we have VFP. 1192 if (!Subtarget->hasVFP2()) return false; 1193 1194 MVT DstVT; 1195 const Type *Ty = I->getType(); 1196 if (!isTypeLegal(Ty, DstVT)) 1197 return false; 1198 1199 unsigned Op = getRegForValue(I->getOperand(0)); 1200 if (Op == 0) return false; 1201 1202 // The conversion routine works on fp-reg to fp-reg and the operand above 1203 // was an integer, move it to the fp registers if possible. 1204 unsigned FP = ARMMoveToFPReg(MVT::f32, Op); 1205 if (FP == 0) return false; 1206 1207 unsigned Opc; 1208 if (Ty->isFloatTy()) Opc = ARM::VSITOS; 1209 else if (Ty->isDoubleTy()) Opc = ARM::VSITOD; 1210 else return 0; 1211 1212 unsigned ResultReg = createResultReg(TLI.getRegClassFor(DstVT)); 1213 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1214 ResultReg) 1215 .addReg(FP)); 1216 UpdateValueMap(I, ResultReg); 1217 return true; 1218} 1219 1220bool ARMFastISel::SelectFPToSI(const Instruction *I) { 1221 // Make sure we have VFP. 1222 if (!Subtarget->hasVFP2()) return false; 1223 1224 MVT DstVT; 1225 const Type *RetTy = I->getType(); 1226 if (!isTypeLegal(RetTy, DstVT)) 1227 return false; 1228 1229 unsigned Op = getRegForValue(I->getOperand(0)); 1230 if (Op == 0) return false; 1231 1232 unsigned Opc; 1233 const Type *OpTy = I->getOperand(0)->getType(); 1234 if (OpTy->isFloatTy()) Opc = ARM::VTOSIZS; 1235 else if (OpTy->isDoubleTy()) Opc = ARM::VTOSIZD; 1236 else return 0; 1237 1238 // f64->s32 or f32->s32 both need an intermediate f32 reg. 1239 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32)); 1240 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1241 ResultReg) 1242 .addReg(Op)); 1243 1244 // This result needs to be in an integer register, but the conversion only 1245 // takes place in fp-regs. 1246 unsigned IntReg = ARMMoveToIntReg(DstVT, ResultReg); 1247 if (IntReg == 0) return false; 1248 1249 UpdateValueMap(I, IntReg); 1250 return true; 1251} 1252 1253bool ARMFastISel::SelectSelect(const Instruction *I) { 1254 MVT VT; 1255 if (!isTypeLegal(I->getType(), VT)) 1256 return false; 1257 1258 // Things need to be register sized for register moves. 1259 if (VT != MVT::i32) return false; 1260 const TargetRegisterClass *RC = TLI.getRegClassFor(VT); 1261 1262 unsigned CondReg = getRegForValue(I->getOperand(0)); 1263 if (CondReg == 0) return false; 1264 unsigned Op1Reg = getRegForValue(I->getOperand(1)); 1265 if (Op1Reg == 0) return false; 1266 unsigned Op2Reg = getRegForValue(I->getOperand(2)); 1267 if (Op2Reg == 0) return false; 1268 1269 unsigned CmpOpc = isThumb ? ARM::t2TSTri : ARM::TSTri; 1270 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1271 .addReg(CondReg).addImm(1)); 1272 unsigned ResultReg = createResultReg(RC); 1273 unsigned MovCCOpc = isThumb ? ARM::t2MOVCCr : ARM::MOVCCr; 1274 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg) 1275 .addReg(Op1Reg).addReg(Op2Reg) 1276 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 1277 UpdateValueMap(I, ResultReg); 1278 return true; 1279} 1280 1281bool ARMFastISel::SelectSDiv(const Instruction *I) { 1282 MVT VT; 1283 const Type *Ty = I->getType(); 1284 if (!isTypeLegal(Ty, VT)) 1285 return false; 1286 1287 // If we have integer div support we should have selected this automagically. 1288 // In case we have a real miss go ahead and return false and we'll pick 1289 // it up later. 1290 if (Subtarget->hasDivide()) return false; 1291 1292 // Otherwise emit a libcall. 1293 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1294 if (VT == MVT::i8) 1295 LC = RTLIB::SDIV_I8; 1296 else if (VT == MVT::i16) 1297 LC = RTLIB::SDIV_I16; 1298 else if (VT == MVT::i32) 1299 LC = RTLIB::SDIV_I32; 1300 else if (VT == MVT::i64) 1301 LC = RTLIB::SDIV_I64; 1302 else if (VT == MVT::i128) 1303 LC = RTLIB::SDIV_I128; 1304 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SDIV!"); 1305 1306 return ARMEmitLibcall(I, LC); 1307} 1308 1309bool ARMFastISel::SelectSRem(const Instruction *I) { 1310 MVT VT; 1311 const Type *Ty = I->getType(); 1312 if (!isTypeLegal(Ty, VT)) 1313 return false; 1314 1315 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1316 if (VT == MVT::i8) 1317 LC = RTLIB::SREM_I8; 1318 else if (VT == MVT::i16) 1319 LC = RTLIB::SREM_I16; 1320 else if (VT == MVT::i32) 1321 LC = RTLIB::SREM_I32; 1322 else if (VT == MVT::i64) 1323 LC = RTLIB::SREM_I64; 1324 else if (VT == MVT::i128) 1325 LC = RTLIB::SREM_I128; 1326 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SREM!"); 1327 1328 return ARMEmitLibcall(I, LC); 1329} 1330 1331bool ARMFastISel::SelectBinaryOp(const Instruction *I, unsigned ISDOpcode) { 1332 EVT VT = TLI.getValueType(I->getType(), true); 1333 1334 // We can get here in the case when we want to use NEON for our fp 1335 // operations, but can't figure out how to. Just use the vfp instructions 1336 // if we have them. 1337 // FIXME: It'd be nice to use NEON instructions. 1338 const Type *Ty = I->getType(); 1339 bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy()); 1340 if (isFloat && !Subtarget->hasVFP2()) 1341 return false; 1342 1343 unsigned Op1 = getRegForValue(I->getOperand(0)); 1344 if (Op1 == 0) return false; 1345 1346 unsigned Op2 = getRegForValue(I->getOperand(1)); 1347 if (Op2 == 0) return false; 1348 1349 unsigned Opc; 1350 bool is64bit = VT == MVT::f64 || VT == MVT::i64; 1351 switch (ISDOpcode) { 1352 default: return false; 1353 case ISD::FADD: 1354 Opc = is64bit ? ARM::VADDD : ARM::VADDS; 1355 break; 1356 case ISD::FSUB: 1357 Opc = is64bit ? ARM::VSUBD : ARM::VSUBS; 1358 break; 1359 case ISD::FMUL: 1360 Opc = is64bit ? ARM::VMULD : ARM::VMULS; 1361 break; 1362 } 1363 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT)); 1364 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1365 TII.get(Opc), ResultReg) 1366 .addReg(Op1).addReg(Op2)); 1367 UpdateValueMap(I, ResultReg); 1368 return true; 1369} 1370 1371// Call Handling Code 1372 1373bool ARMFastISel::FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, 1374 EVT SrcVT, unsigned &ResultReg) { 1375 unsigned RR = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc, 1376 Src, /*TODO: Kill=*/false); 1377 1378 if (RR != 0) { 1379 ResultReg = RR; 1380 return true; 1381 } else 1382 return false; 1383} 1384 1385// This is largely taken directly from CCAssignFnForNode - we don't support 1386// varargs in FastISel so that part has been removed. 1387// TODO: We may not support all of this. 1388CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC, bool Return) { 1389 switch (CC) { 1390 default: 1391 llvm_unreachable("Unsupported calling convention"); 1392 case CallingConv::Fast: 1393 // Ignore fastcc. Silence compiler warnings. 1394 (void)RetFastCC_ARM_APCS; 1395 (void)FastCC_ARM_APCS; 1396 // Fallthrough 1397 case CallingConv::C: 1398 // Use target triple & subtarget features to do actual dispatch. 1399 if (Subtarget->isAAPCS_ABI()) { 1400 if (Subtarget->hasVFP2() && 1401 FloatABIType == FloatABI::Hard) 1402 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1403 else 1404 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1405 } else 1406 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1407 case CallingConv::ARM_AAPCS_VFP: 1408 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1409 case CallingConv::ARM_AAPCS: 1410 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1411 case CallingConv::ARM_APCS: 1412 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1413 } 1414} 1415 1416bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args, 1417 SmallVectorImpl<unsigned> &ArgRegs, 1418 SmallVectorImpl<MVT> &ArgVTs, 1419 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 1420 SmallVectorImpl<unsigned> &RegArgs, 1421 CallingConv::ID CC, 1422 unsigned &NumBytes) { 1423 SmallVector<CCValAssign, 16> ArgLocs; 1424 CCState CCInfo(CC, false, TM, ArgLocs, *Context); 1425 CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CCAssignFnForCall(CC, false)); 1426 1427 // Get a count of how many bytes are to be pushed on the stack. 1428 NumBytes = CCInfo.getNextStackOffset(); 1429 1430 // Issue CALLSEQ_START 1431 unsigned AdjStackDown = TM.getRegisterInfo()->getCallFrameSetupOpcode(); 1432 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1433 TII.get(AdjStackDown)) 1434 .addImm(NumBytes)); 1435 1436 // Process the args. 1437 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1438 CCValAssign &VA = ArgLocs[i]; 1439 unsigned Arg = ArgRegs[VA.getValNo()]; 1440 MVT ArgVT = ArgVTs[VA.getValNo()]; 1441 1442 // We don't handle NEON parameters yet. 1443 if (VA.getLocVT().isVector() && VA.getLocVT().getSizeInBits() > 64) 1444 return false; 1445 1446 // Handle arg promotion, etc. 1447 switch (VA.getLocInfo()) { 1448 case CCValAssign::Full: break; 1449 case CCValAssign::SExt: { 1450 bool Emitted = FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(), 1451 Arg, ArgVT, Arg); 1452 assert(Emitted && "Failed to emit a sext!"); Emitted=Emitted; 1453 Emitted = true; 1454 ArgVT = VA.getLocVT(); 1455 break; 1456 } 1457 case CCValAssign::ZExt: { 1458 bool Emitted = FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(), 1459 Arg, ArgVT, Arg); 1460 assert(Emitted && "Failed to emit a zext!"); Emitted=Emitted; 1461 Emitted = true; 1462 ArgVT = VA.getLocVT(); 1463 break; 1464 } 1465 case CCValAssign::AExt: { 1466 bool Emitted = FastEmitExtend(ISD::ANY_EXTEND, VA.getLocVT(), 1467 Arg, ArgVT, Arg); 1468 if (!Emitted) 1469 Emitted = FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(), 1470 Arg, ArgVT, Arg); 1471 if (!Emitted) 1472 Emitted = FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(), 1473 Arg, ArgVT, Arg); 1474 1475 assert(Emitted && "Failed to emit a aext!"); Emitted=Emitted; 1476 ArgVT = VA.getLocVT(); 1477 break; 1478 } 1479 case CCValAssign::BCvt: { 1480 unsigned BC = FastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg, 1481 /*TODO: Kill=*/false); 1482 assert(BC != 0 && "Failed to emit a bitcast!"); 1483 Arg = BC; 1484 ArgVT = VA.getLocVT(); 1485 break; 1486 } 1487 default: llvm_unreachable("Unknown arg promotion!"); 1488 } 1489 1490 // Now copy/store arg to correct locations. 1491 if (VA.isRegLoc() && !VA.needsCustom()) { 1492 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1493 VA.getLocReg()) 1494 .addReg(Arg); 1495 RegArgs.push_back(VA.getLocReg()); 1496 } else if (VA.needsCustom()) { 1497 // TODO: We need custom lowering for vector (v2f64) args. 1498 if (VA.getLocVT() != MVT::f64) return false; 1499 1500 CCValAssign &NextVA = ArgLocs[++i]; 1501 1502 // TODO: Only handle register args for now. 1503 if(!(VA.isRegLoc() && NextVA.isRegLoc())) return false; 1504 1505 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1506 TII.get(ARM::VMOVRRD), VA.getLocReg()) 1507 .addReg(NextVA.getLocReg(), RegState::Define) 1508 .addReg(Arg)); 1509 RegArgs.push_back(VA.getLocReg()); 1510 RegArgs.push_back(NextVA.getLocReg()); 1511 } else { 1512 assert(VA.isMemLoc()); 1513 // Need to store on the stack. 1514 Address Addr; 1515 Addr.BaseType = Address::RegBase; 1516 Addr.Base.Reg = ARM::SP; 1517 Addr.Offset = VA.getLocMemOffset(); 1518 1519 if (!ARMEmitStore(ArgVT, Arg, Addr)) return false; 1520 } 1521 } 1522 return true; 1523} 1524 1525bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 1526 const Instruction *I, CallingConv::ID CC, 1527 unsigned &NumBytes) { 1528 // Issue CALLSEQ_END 1529 unsigned AdjStackUp = TM.getRegisterInfo()->getCallFrameDestroyOpcode(); 1530 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1531 TII.get(AdjStackUp)) 1532 .addImm(NumBytes).addImm(0)); 1533 1534 // Now the return value. 1535 if (RetVT != MVT::isVoid) { 1536 SmallVector<CCValAssign, 16> RVLocs; 1537 CCState CCInfo(CC, false, TM, RVLocs, *Context); 1538 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true)); 1539 1540 // Copy all of the result registers out of their specified physreg. 1541 if (RVLocs.size() == 2 && RetVT == MVT::f64) { 1542 // For this move we copy into two registers and then move into the 1543 // double fp reg we want. 1544 EVT DestVT = RVLocs[0].getValVT(); 1545 TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT); 1546 unsigned ResultReg = createResultReg(DstRC); 1547 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1548 TII.get(ARM::VMOVDRR), ResultReg) 1549 .addReg(RVLocs[0].getLocReg()) 1550 .addReg(RVLocs[1].getLocReg())); 1551 1552 UsedRegs.push_back(RVLocs[0].getLocReg()); 1553 UsedRegs.push_back(RVLocs[1].getLocReg()); 1554 1555 // Finally update the result. 1556 UpdateValueMap(I, ResultReg); 1557 } else { 1558 assert(RVLocs.size() == 1 &&"Can't handle non-double multi-reg retvals!"); 1559 EVT CopyVT = RVLocs[0].getValVT(); 1560 TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT); 1561 1562 unsigned ResultReg = createResultReg(DstRC); 1563 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1564 ResultReg).addReg(RVLocs[0].getLocReg()); 1565 UsedRegs.push_back(RVLocs[0].getLocReg()); 1566 1567 // Finally update the result. 1568 UpdateValueMap(I, ResultReg); 1569 } 1570 } 1571 1572 return true; 1573} 1574 1575bool ARMFastISel::SelectRet(const Instruction *I) { 1576 const ReturnInst *Ret = cast<ReturnInst>(I); 1577 const Function &F = *I->getParent()->getParent(); 1578 1579 if (!FuncInfo.CanLowerReturn) 1580 return false; 1581 1582 if (F.isVarArg()) 1583 return false; 1584 1585 CallingConv::ID CC = F.getCallingConv(); 1586 if (Ret->getNumOperands() > 0) { 1587 SmallVector<ISD::OutputArg, 4> Outs; 1588 GetReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(), 1589 Outs, TLI); 1590 1591 // Analyze operands of the call, assigning locations to each operand. 1592 SmallVector<CCValAssign, 16> ValLocs; 1593 CCState CCInfo(CC, F.isVarArg(), TM, ValLocs, I->getContext()); 1594 CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC, true /* is Ret */)); 1595 1596 const Value *RV = Ret->getOperand(0); 1597 unsigned Reg = getRegForValue(RV); 1598 if (Reg == 0) 1599 return false; 1600 1601 // Only handle a single return value for now. 1602 if (ValLocs.size() != 1) 1603 return false; 1604 1605 CCValAssign &VA = ValLocs[0]; 1606 1607 // Don't bother handling odd stuff for now. 1608 if (VA.getLocInfo() != CCValAssign::Full) 1609 return false; 1610 // Only handle register returns for now. 1611 if (!VA.isRegLoc()) 1612 return false; 1613 // TODO: For now, don't try to handle cases where getLocInfo() 1614 // says Full but the types don't match. 1615 if (TLI.getValueType(RV->getType()) != VA.getValVT()) 1616 return false; 1617 1618 // Make the copy. 1619 unsigned SrcReg = Reg + VA.getValNo(); 1620 unsigned DstReg = VA.getLocReg(); 1621 const TargetRegisterClass* SrcRC = MRI.getRegClass(SrcReg); 1622 // Avoid a cross-class copy. This is very unlikely. 1623 if (!SrcRC->contains(DstReg)) 1624 return false; 1625 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1626 DstReg).addReg(SrcReg); 1627 1628 // Mark the register as live out of the function. 1629 MRI.addLiveOut(VA.getLocReg()); 1630 } 1631 1632 unsigned RetOpc = isThumb ? ARM::tBX_RET : ARM::BX_RET; 1633 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1634 TII.get(RetOpc))); 1635 return true; 1636} 1637 1638// A quick function that will emit a call for a named libcall in F with the 1639// vector of passed arguments for the Instruction in I. We can assume that we 1640// can emit a call for any libcall we can produce. This is an abridged version 1641// of the full call infrastructure since we won't need to worry about things 1642// like computed function pointers or strange arguments at call sites. 1643// TODO: Try to unify this and the normal call bits for ARM, then try to unify 1644// with X86. 1645bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) { 1646 CallingConv::ID CC = TLI.getLibcallCallingConv(Call); 1647 1648 // Handle *simple* calls for now. 1649 const Type *RetTy = I->getType(); 1650 MVT RetVT; 1651 if (RetTy->isVoidTy()) 1652 RetVT = MVT::isVoid; 1653 else if (!isTypeLegal(RetTy, RetVT)) 1654 return false; 1655 1656 // For now we're using BLX etc on the assumption that we have v5t ops. 1657 if (!Subtarget->hasV5TOps()) return false; 1658 1659 // Set up the argument vectors. 1660 SmallVector<Value*, 8> Args; 1661 SmallVector<unsigned, 8> ArgRegs; 1662 SmallVector<MVT, 8> ArgVTs; 1663 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 1664 Args.reserve(I->getNumOperands()); 1665 ArgRegs.reserve(I->getNumOperands()); 1666 ArgVTs.reserve(I->getNumOperands()); 1667 ArgFlags.reserve(I->getNumOperands()); 1668 for (unsigned i = 0; i < I->getNumOperands(); ++i) { 1669 Value *Op = I->getOperand(i); 1670 unsigned Arg = getRegForValue(Op); 1671 if (Arg == 0) return false; 1672 1673 const Type *ArgTy = Op->getType(); 1674 MVT ArgVT; 1675 if (!isTypeLegal(ArgTy, ArgVT)) return false; 1676 1677 ISD::ArgFlagsTy Flags; 1678 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 1679 Flags.setOrigAlign(OriginalAlignment); 1680 1681 Args.push_back(Op); 1682 ArgRegs.push_back(Arg); 1683 ArgVTs.push_back(ArgVT); 1684 ArgFlags.push_back(Flags); 1685 } 1686 1687 // Handle the arguments now that we've gotten them. 1688 SmallVector<unsigned, 4> RegArgs; 1689 unsigned NumBytes; 1690 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes)) 1691 return false; 1692 1693 // Issue the call, BLXr9 for darwin, BLX otherwise. This uses V5 ops. 1694 // TODO: Turn this into the table of arm call ops. 1695 MachineInstrBuilder MIB; 1696 unsigned CallOpc; 1697 if(isThumb) 1698 CallOpc = Subtarget->isTargetDarwin() ? ARM::tBLXi_r9 : ARM::tBLXi; 1699 else 1700 CallOpc = Subtarget->isTargetDarwin() ? ARM::BLr9 : ARM::BL; 1701 // Explicitly adding the predicate here. 1702 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1703 TII.get(CallOpc))) 1704 .addExternalSymbol(TLI.getLibcallName(Call)); 1705 1706 // Add implicit physical register uses to the call. 1707 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 1708 MIB.addReg(RegArgs[i]); 1709 1710 // Finish off the call including any return values. 1711 SmallVector<unsigned, 4> UsedRegs; 1712 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes)) return false; 1713 1714 // Set all unused physreg defs as dead. 1715 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 1716 1717 return true; 1718} 1719 1720bool ARMFastISel::SelectCall(const Instruction *I) { 1721 const CallInst *CI = cast<CallInst>(I); 1722 const Value *Callee = CI->getCalledValue(); 1723 1724 // Can't handle inline asm or worry about intrinsics yet. 1725 if (isa<InlineAsm>(Callee) || isa<IntrinsicInst>(CI)) return false; 1726 1727 // Only handle global variable Callees that are direct calls. 1728 const GlobalValue *GV = dyn_cast<GlobalValue>(Callee); 1729 if (!GV || Subtarget->GVIsIndirectSymbol(GV, TM.getRelocationModel())) 1730 return false; 1731 1732 // Check the calling convention. 1733 ImmutableCallSite CS(CI); 1734 CallingConv::ID CC = CS.getCallingConv(); 1735 1736 // TODO: Avoid some calling conventions? 1737 1738 // Let SDISel handle vararg functions. 1739 const PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType()); 1740 const FunctionType *FTy = cast<FunctionType>(PT->getElementType()); 1741 if (FTy->isVarArg()) 1742 return false; 1743 1744 // Handle *simple* calls for now. 1745 const Type *RetTy = I->getType(); 1746 MVT RetVT; 1747 if (RetTy->isVoidTy()) 1748 RetVT = MVT::isVoid; 1749 else if (!isTypeLegal(RetTy, RetVT)) 1750 return false; 1751 1752 // For now we're using BLX etc on the assumption that we have v5t ops. 1753 // TODO: Maybe? 1754 if (!Subtarget->hasV5TOps()) return false; 1755 1756 // Set up the argument vectors. 1757 SmallVector<Value*, 8> Args; 1758 SmallVector<unsigned, 8> ArgRegs; 1759 SmallVector<MVT, 8> ArgVTs; 1760 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 1761 Args.reserve(CS.arg_size()); 1762 ArgRegs.reserve(CS.arg_size()); 1763 ArgVTs.reserve(CS.arg_size()); 1764 ArgFlags.reserve(CS.arg_size()); 1765 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end(); 1766 i != e; ++i) { 1767 unsigned Arg = getRegForValue(*i); 1768 1769 if (Arg == 0) 1770 return false; 1771 ISD::ArgFlagsTy Flags; 1772 unsigned AttrInd = i - CS.arg_begin() + 1; 1773 if (CS.paramHasAttr(AttrInd, Attribute::SExt)) 1774 Flags.setSExt(); 1775 if (CS.paramHasAttr(AttrInd, Attribute::ZExt)) 1776 Flags.setZExt(); 1777 1778 // FIXME: Only handle *easy* calls for now. 1779 if (CS.paramHasAttr(AttrInd, Attribute::InReg) || 1780 CS.paramHasAttr(AttrInd, Attribute::StructRet) || 1781 CS.paramHasAttr(AttrInd, Attribute::Nest) || 1782 CS.paramHasAttr(AttrInd, Attribute::ByVal)) 1783 return false; 1784 1785 const Type *ArgTy = (*i)->getType(); 1786 MVT ArgVT; 1787 if (!isTypeLegal(ArgTy, ArgVT)) 1788 return false; 1789 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 1790 Flags.setOrigAlign(OriginalAlignment); 1791 1792 Args.push_back(*i); 1793 ArgRegs.push_back(Arg); 1794 ArgVTs.push_back(ArgVT); 1795 ArgFlags.push_back(Flags); 1796 } 1797 1798 // Handle the arguments now that we've gotten them. 1799 SmallVector<unsigned, 4> RegArgs; 1800 unsigned NumBytes; 1801 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes)) 1802 return false; 1803 1804 // Issue the call, BLXr9 for darwin, BLX otherwise. This uses V5 ops. 1805 // TODO: Turn this into the table of arm call ops. 1806 MachineInstrBuilder MIB; 1807 unsigned CallOpc; 1808 if(isThumb) 1809 CallOpc = Subtarget->isTargetDarwin() ? ARM::tBLXi_r9 : ARM::tBLXi; 1810 else 1811 CallOpc = Subtarget->isTargetDarwin() ? ARM::BLr9 : ARM::BL; 1812 // Explicitly adding the predicate here. 1813 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1814 TII.get(CallOpc))) 1815 .addGlobalAddress(GV, 0, 0); 1816 1817 // Add implicit physical register uses to the call. 1818 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 1819 MIB.addReg(RegArgs[i]); 1820 1821 // Finish off the call including any return values. 1822 SmallVector<unsigned, 4> UsedRegs; 1823 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes)) return false; 1824 1825 // Set all unused physreg defs as dead. 1826 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 1827 1828 return true; 1829 1830} 1831 1832// TODO: SoftFP support. 1833bool ARMFastISel::TargetSelectInstruction(const Instruction *I) { 1834 1835 switch (I->getOpcode()) { 1836 case Instruction::Load: 1837 return SelectLoad(I); 1838 case Instruction::Store: 1839 return SelectStore(I); 1840 case Instruction::Br: 1841 return SelectBranch(I); 1842 case Instruction::ICmp: 1843 case Instruction::FCmp: 1844 return SelectCmp(I); 1845 case Instruction::FPExt: 1846 return SelectFPExt(I); 1847 case Instruction::FPTrunc: 1848 return SelectFPTrunc(I); 1849 case Instruction::SIToFP: 1850 return SelectSIToFP(I); 1851 case Instruction::FPToSI: 1852 return SelectFPToSI(I); 1853 case Instruction::FAdd: 1854 return SelectBinaryOp(I, ISD::FADD); 1855 case Instruction::FSub: 1856 return SelectBinaryOp(I, ISD::FSUB); 1857 case Instruction::FMul: 1858 return SelectBinaryOp(I, ISD::FMUL); 1859 case Instruction::SDiv: 1860 return SelectSDiv(I); 1861 case Instruction::SRem: 1862 return SelectSRem(I); 1863 case Instruction::Call: 1864 return SelectCall(I); 1865 case Instruction::Select: 1866 return SelectSelect(I); 1867 case Instruction::Ret: 1868 return SelectRet(I); 1869 default: break; 1870 } 1871 return false; 1872} 1873 1874namespace llvm { 1875 llvm::FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo) { 1876 // Completely untested on non-darwin. 1877 const TargetMachine &TM = funcInfo.MF->getTarget(); 1878 1879 // Darwin and thumb1 only for now. 1880 const ARMSubtarget *Subtarget = &TM.getSubtarget<ARMSubtarget>(); 1881 if (Subtarget->isTargetDarwin() && !Subtarget->isThumb1Only() && 1882 !DisableARMFastISel) 1883 return new ARMFastISel(funcInfo); 1884 return 0; 1885 } 1886} 1887