ARMFastISel.cpp revision 836c6245ad7e8f2b9f72c2a9e4cb1df101eaf2c7
1//===-- ARMFastISel.cpp - ARM FastISel implementation ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the ARM-specific support for the FastISel class. Some 11// of the target-specific code is generated by tablegen in the file 12// ARMGenFastISel.inc, which is #included here. 13// 14//===----------------------------------------------------------------------===// 15 16#include "ARM.h" 17#include "ARMBaseInstrInfo.h" 18#include "ARMCallingConv.h" 19#include "ARMRegisterInfo.h" 20#include "ARMTargetMachine.h" 21#include "ARMSubtarget.h" 22#include "ARMConstantPoolValue.h" 23#include "llvm/CallingConv.h" 24#include "llvm/DerivedTypes.h" 25#include "llvm/GlobalVariable.h" 26#include "llvm/Instructions.h" 27#include "llvm/IntrinsicInst.h" 28#include "llvm/Module.h" 29#include "llvm/CodeGen/Analysis.h" 30#include "llvm/CodeGen/FastISel.h" 31#include "llvm/CodeGen/FunctionLoweringInfo.h" 32#include "llvm/CodeGen/MachineInstrBuilder.h" 33#include "llvm/CodeGen/MachineModuleInfo.h" 34#include "llvm/CodeGen/MachineConstantPool.h" 35#include "llvm/CodeGen/MachineFrameInfo.h" 36#include "llvm/CodeGen/MachineMemOperand.h" 37#include "llvm/CodeGen/MachineRegisterInfo.h" 38#include "llvm/CodeGen/PseudoSourceValue.h" 39#include "llvm/Support/CallSite.h" 40#include "llvm/Support/CommandLine.h" 41#include "llvm/Support/ErrorHandling.h" 42#include "llvm/Support/GetElementPtrTypeIterator.h" 43#include "llvm/Target/TargetData.h" 44#include "llvm/Target/TargetInstrInfo.h" 45#include "llvm/Target/TargetLowering.h" 46#include "llvm/Target/TargetMachine.h" 47#include "llvm/Target/TargetOptions.h" 48using namespace llvm; 49 50static cl::opt<bool> 51DisableARMFastISel("disable-arm-fast-isel", 52 cl::desc("Turn off experimental ARM fast-isel support"), 53 cl::init(false), cl::Hidden); 54 55extern cl::opt<bool> EnableARMLongCalls; 56 57namespace { 58 59 // All possible address modes, plus some. 60 typedef struct Address { 61 enum { 62 RegBase, 63 FrameIndexBase 64 } BaseType; 65 66 union { 67 unsigned Reg; 68 int FI; 69 } Base; 70 71 int Offset; 72 unsigned Scale; 73 unsigned PlusReg; 74 75 // Innocuous defaults for our address. 76 Address() 77 : BaseType(RegBase), Offset(0), Scale(0), PlusReg(0) { 78 Base.Reg = 0; 79 } 80 } Address; 81 82class ARMFastISel : public FastISel { 83 84 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can 85 /// make the right decision when generating code for different targets. 86 const ARMSubtarget *Subtarget; 87 const TargetMachine &TM; 88 const TargetInstrInfo &TII; 89 const TargetLowering &TLI; 90 ARMFunctionInfo *AFI; 91 92 // Convenience variables to avoid some queries. 93 bool isThumb; 94 LLVMContext *Context; 95 96 public: 97 explicit ARMFastISel(FunctionLoweringInfo &funcInfo) 98 : FastISel(funcInfo), 99 TM(funcInfo.MF->getTarget()), 100 TII(*TM.getInstrInfo()), 101 TLI(*TM.getTargetLowering()) { 102 Subtarget = &TM.getSubtarget<ARMSubtarget>(); 103 AFI = funcInfo.MF->getInfo<ARMFunctionInfo>(); 104 isThumb = AFI->isThumbFunction(); 105 Context = &funcInfo.Fn->getContext(); 106 } 107 108 // Code from FastISel.cpp. 109 virtual unsigned FastEmitInst_(unsigned MachineInstOpcode, 110 const TargetRegisterClass *RC); 111 virtual unsigned FastEmitInst_r(unsigned MachineInstOpcode, 112 const TargetRegisterClass *RC, 113 unsigned Op0, bool Op0IsKill); 114 virtual unsigned FastEmitInst_rr(unsigned MachineInstOpcode, 115 const TargetRegisterClass *RC, 116 unsigned Op0, bool Op0IsKill, 117 unsigned Op1, bool Op1IsKill); 118 virtual unsigned FastEmitInst_ri(unsigned MachineInstOpcode, 119 const TargetRegisterClass *RC, 120 unsigned Op0, bool Op0IsKill, 121 uint64_t Imm); 122 virtual unsigned FastEmitInst_rf(unsigned MachineInstOpcode, 123 const TargetRegisterClass *RC, 124 unsigned Op0, bool Op0IsKill, 125 const ConstantFP *FPImm); 126 virtual unsigned FastEmitInst_i(unsigned MachineInstOpcode, 127 const TargetRegisterClass *RC, 128 uint64_t Imm); 129 virtual unsigned FastEmitInst_rri(unsigned MachineInstOpcode, 130 const TargetRegisterClass *RC, 131 unsigned Op0, bool Op0IsKill, 132 unsigned Op1, bool Op1IsKill, 133 uint64_t Imm); 134 virtual unsigned FastEmitInst_extractsubreg(MVT RetVT, 135 unsigned Op0, bool Op0IsKill, 136 uint32_t Idx); 137 138 // Backend specific FastISel code. 139 virtual bool TargetSelectInstruction(const Instruction *I); 140 virtual unsigned TargetMaterializeConstant(const Constant *C); 141 virtual unsigned TargetMaterializeAlloca(const AllocaInst *AI); 142 143 #include "ARMGenFastISel.inc" 144 145 // Instruction selection routines. 146 private: 147 bool SelectLoad(const Instruction *I); 148 bool SelectStore(const Instruction *I); 149 bool SelectBranch(const Instruction *I); 150 bool SelectCmp(const Instruction *I); 151 bool SelectFPExt(const Instruction *I); 152 bool SelectFPTrunc(const Instruction *I); 153 bool SelectBinaryOp(const Instruction *I, unsigned ISDOpcode); 154 bool SelectSIToFP(const Instruction *I); 155 bool SelectFPToSI(const Instruction *I); 156 bool SelectSDiv(const Instruction *I); 157 bool SelectSRem(const Instruction *I); 158 bool SelectCall(const Instruction *I); 159 bool SelectSelect(const Instruction *I); 160 bool SelectRet(const Instruction *I); 161 162 // Utility routines. 163 private: 164 bool isTypeLegal(const Type *Ty, MVT &VT); 165 bool isLoadTypeLegal(const Type *Ty, MVT &VT); 166 bool ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr); 167 bool ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr); 168 bool ARMComputeAddress(const Value *Obj, Address &Addr); 169 void ARMSimplifyAddress(Address &Addr, EVT VT); 170 unsigned ARMMaterializeFP(const ConstantFP *CFP, EVT VT); 171 unsigned ARMMaterializeInt(const Constant *C, EVT VT); 172 unsigned ARMMaterializeGV(const GlobalValue *GV, EVT VT); 173 unsigned ARMMoveToFPReg(EVT VT, unsigned SrcReg); 174 unsigned ARMMoveToIntReg(EVT VT, unsigned SrcReg); 175 176 // Call handling routines. 177 private: 178 bool FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, EVT SrcVT, 179 unsigned &ResultReg); 180 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool Return); 181 bool ProcessCallArgs(SmallVectorImpl<Value*> &Args, 182 SmallVectorImpl<unsigned> &ArgRegs, 183 SmallVectorImpl<MVT> &ArgVTs, 184 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 185 SmallVectorImpl<unsigned> &RegArgs, 186 CallingConv::ID CC, 187 unsigned &NumBytes); 188 bool FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 189 const Instruction *I, CallingConv::ID CC, 190 unsigned &NumBytes); 191 bool ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call); 192 193 // OptionalDef handling routines. 194 private: 195 bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR); 196 const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB); 197 void AddLoadStoreOperands(EVT VT, Address &Addr, 198 const MachineInstrBuilder &MIB); 199}; 200 201} // end anonymous namespace 202 203#include "ARMGenCallingConv.inc" 204 205// DefinesOptionalPredicate - This is different from DefinesPredicate in that 206// we don't care about implicit defs here, just places we'll need to add a 207// default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR. 208bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) { 209 const TargetInstrDesc &TID = MI->getDesc(); 210 if (!TID.hasOptionalDef()) 211 return false; 212 213 // Look to see if our OptionalDef is defining CPSR or CCR. 214 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 215 const MachineOperand &MO = MI->getOperand(i); 216 if (!MO.isReg() || !MO.isDef()) continue; 217 if (MO.getReg() == ARM::CPSR) 218 *CPSR = true; 219 } 220 return true; 221} 222 223// If the machine is predicable go ahead and add the predicate operands, if 224// it needs default CC operands add those. 225// TODO: If we want to support thumb1 then we'll need to deal with optional 226// CPSR defs that need to be added before the remaining operands. See s_cc_out 227// for descriptions why. 228const MachineInstrBuilder & 229ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) { 230 MachineInstr *MI = &*MIB; 231 232 // Do we use a predicate? 233 if (TII.isPredicable(MI)) 234 AddDefaultPred(MIB); 235 236 // Do we optionally set a predicate? Preds is size > 0 iff the predicate 237 // defines CPSR. All other OptionalDefines in ARM are the CCR register. 238 bool CPSR = false; 239 if (DefinesOptionalPredicate(MI, &CPSR)) { 240 if (CPSR) 241 AddDefaultT1CC(MIB); 242 else 243 AddDefaultCC(MIB); 244 } 245 return MIB; 246} 247 248unsigned ARMFastISel::FastEmitInst_(unsigned MachineInstOpcode, 249 const TargetRegisterClass* RC) { 250 unsigned ResultReg = createResultReg(RC); 251 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 252 253 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)); 254 return ResultReg; 255} 256 257unsigned ARMFastISel::FastEmitInst_r(unsigned MachineInstOpcode, 258 const TargetRegisterClass *RC, 259 unsigned Op0, bool Op0IsKill) { 260 unsigned ResultReg = createResultReg(RC); 261 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 262 263 if (II.getNumDefs() >= 1) 264 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 265 .addReg(Op0, Op0IsKill * RegState::Kill)); 266 else { 267 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 268 .addReg(Op0, Op0IsKill * RegState::Kill)); 269 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 270 TII.get(TargetOpcode::COPY), ResultReg) 271 .addReg(II.ImplicitDefs[0])); 272 } 273 return ResultReg; 274} 275 276unsigned ARMFastISel::FastEmitInst_rr(unsigned MachineInstOpcode, 277 const TargetRegisterClass *RC, 278 unsigned Op0, bool Op0IsKill, 279 unsigned Op1, bool Op1IsKill) { 280 unsigned ResultReg = createResultReg(RC); 281 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 282 283 if (II.getNumDefs() >= 1) 284 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 285 .addReg(Op0, Op0IsKill * RegState::Kill) 286 .addReg(Op1, Op1IsKill * RegState::Kill)); 287 else { 288 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 289 .addReg(Op0, Op0IsKill * RegState::Kill) 290 .addReg(Op1, Op1IsKill * RegState::Kill)); 291 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 292 TII.get(TargetOpcode::COPY), ResultReg) 293 .addReg(II.ImplicitDefs[0])); 294 } 295 return ResultReg; 296} 297 298unsigned ARMFastISel::FastEmitInst_ri(unsigned MachineInstOpcode, 299 const TargetRegisterClass *RC, 300 unsigned Op0, bool Op0IsKill, 301 uint64_t Imm) { 302 unsigned ResultReg = createResultReg(RC); 303 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 304 305 if (II.getNumDefs() >= 1) 306 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 307 .addReg(Op0, Op0IsKill * RegState::Kill) 308 .addImm(Imm)); 309 else { 310 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 311 .addReg(Op0, Op0IsKill * RegState::Kill) 312 .addImm(Imm)); 313 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 314 TII.get(TargetOpcode::COPY), ResultReg) 315 .addReg(II.ImplicitDefs[0])); 316 } 317 return ResultReg; 318} 319 320unsigned ARMFastISel::FastEmitInst_rf(unsigned MachineInstOpcode, 321 const TargetRegisterClass *RC, 322 unsigned Op0, bool Op0IsKill, 323 const ConstantFP *FPImm) { 324 unsigned ResultReg = createResultReg(RC); 325 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 326 327 if (II.getNumDefs() >= 1) 328 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 329 .addReg(Op0, Op0IsKill * RegState::Kill) 330 .addFPImm(FPImm)); 331 else { 332 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 333 .addReg(Op0, Op0IsKill * RegState::Kill) 334 .addFPImm(FPImm)); 335 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 336 TII.get(TargetOpcode::COPY), ResultReg) 337 .addReg(II.ImplicitDefs[0])); 338 } 339 return ResultReg; 340} 341 342unsigned ARMFastISel::FastEmitInst_rri(unsigned MachineInstOpcode, 343 const TargetRegisterClass *RC, 344 unsigned Op0, bool Op0IsKill, 345 unsigned Op1, bool Op1IsKill, 346 uint64_t Imm) { 347 unsigned ResultReg = createResultReg(RC); 348 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 349 350 if (II.getNumDefs() >= 1) 351 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 352 .addReg(Op0, Op0IsKill * RegState::Kill) 353 .addReg(Op1, Op1IsKill * RegState::Kill) 354 .addImm(Imm)); 355 else { 356 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 357 .addReg(Op0, Op0IsKill * RegState::Kill) 358 .addReg(Op1, Op1IsKill * RegState::Kill) 359 .addImm(Imm)); 360 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 361 TII.get(TargetOpcode::COPY), ResultReg) 362 .addReg(II.ImplicitDefs[0])); 363 } 364 return ResultReg; 365} 366 367unsigned ARMFastISel::FastEmitInst_i(unsigned MachineInstOpcode, 368 const TargetRegisterClass *RC, 369 uint64_t Imm) { 370 unsigned ResultReg = createResultReg(RC); 371 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 372 373 if (II.getNumDefs() >= 1) 374 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 375 .addImm(Imm)); 376 else { 377 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 378 .addImm(Imm)); 379 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 380 TII.get(TargetOpcode::COPY), ResultReg) 381 .addReg(II.ImplicitDefs[0])); 382 } 383 return ResultReg; 384} 385 386unsigned ARMFastISel::FastEmitInst_extractsubreg(MVT RetVT, 387 unsigned Op0, bool Op0IsKill, 388 uint32_t Idx) { 389 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT)); 390 assert(TargetRegisterInfo::isVirtualRegister(Op0) && 391 "Cannot yet extract from physregs"); 392 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 393 DL, TII.get(TargetOpcode::COPY), ResultReg) 394 .addReg(Op0, getKillRegState(Op0IsKill), Idx)); 395 return ResultReg; 396} 397 398// TODO: Don't worry about 64-bit now, but when this is fixed remove the 399// checks from the various callers. 400unsigned ARMFastISel::ARMMoveToFPReg(EVT VT, unsigned SrcReg) { 401 if (VT == MVT::f64) return 0; 402 403 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 404 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 405 TII.get(ARM::VMOVRS), MoveReg) 406 .addReg(SrcReg)); 407 return MoveReg; 408} 409 410unsigned ARMFastISel::ARMMoveToIntReg(EVT VT, unsigned SrcReg) { 411 if (VT == MVT::i64) return 0; 412 413 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 414 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 415 TII.get(ARM::VMOVSR), MoveReg) 416 .addReg(SrcReg)); 417 return MoveReg; 418} 419 420// For double width floating point we need to materialize two constants 421// (the high and the low) into integer registers then use a move to get 422// the combined constant into an FP reg. 423unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, EVT VT) { 424 const APFloat Val = CFP->getValueAPF(); 425 bool is64bit = VT == MVT::f64; 426 427 // This checks to see if we can use VFP3 instructions to materialize 428 // a constant, otherwise we have to go through the constant pool. 429 if (TLI.isFPImmLegal(Val, VT)) { 430 unsigned Opc = is64bit ? ARM::FCONSTD : ARM::FCONSTS; 431 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 432 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 433 DestReg) 434 .addFPImm(CFP)); 435 return DestReg; 436 } 437 438 // Require VFP2 for loading fp constants. 439 if (!Subtarget->hasVFP2()) return false; 440 441 // MachineConstantPool wants an explicit alignment. 442 unsigned Align = TD.getPrefTypeAlignment(CFP->getType()); 443 if (Align == 0) { 444 // TODO: Figure out if this is correct. 445 Align = TD.getTypeAllocSize(CFP->getType()); 446 } 447 unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align); 448 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 449 unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS; 450 451 // The extra reg is for addrmode5. 452 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 453 DestReg) 454 .addConstantPoolIndex(Idx) 455 .addReg(0)); 456 return DestReg; 457} 458 459unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, EVT VT) { 460 461 // For now 32-bit only. 462 if (VT != MVT::i32) return false; 463 464 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 465 466 // If we can do this in a single instruction without a constant pool entry 467 // do so now. 468 const ConstantInt *CI = cast<ConstantInt>(C); 469 if (Subtarget->hasV6T2Ops() && isUInt<16>(CI->getSExtValue())) { 470 unsigned Opc = isThumb ? ARM::t2MOVi16 : ARM::MOVi16; 471 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 472 TII.get(Opc), DestReg) 473 .addImm(CI->getSExtValue())); 474 return DestReg; 475 } 476 477 // MachineConstantPool wants an explicit alignment. 478 unsigned Align = TD.getPrefTypeAlignment(C->getType()); 479 if (Align == 0) { 480 // TODO: Figure out if this is correct. 481 Align = TD.getTypeAllocSize(C->getType()); 482 } 483 unsigned Idx = MCP.getConstantPoolIndex(C, Align); 484 485 if (isThumb) 486 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 487 TII.get(ARM::t2LDRpci), DestReg) 488 .addConstantPoolIndex(Idx)); 489 else 490 // The extra immediate is for addrmode2. 491 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 492 TII.get(ARM::LDRcp), DestReg) 493 .addConstantPoolIndex(Idx) 494 .addImm(0)); 495 496 return DestReg; 497} 498 499unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, EVT VT) { 500 // For now 32-bit only. 501 if (VT != MVT::i32) return 0; 502 503 Reloc::Model RelocM = TM.getRelocationModel(); 504 505 // TODO: No external globals for now. 506 if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) return 0; 507 508 // TODO: Need more magic for ARM PIC. 509 if (!isThumb && (RelocM == Reloc::PIC_)) return 0; 510 511 // MachineConstantPool wants an explicit alignment. 512 unsigned Align = TD.getPrefTypeAlignment(GV->getType()); 513 if (Align == 0) { 514 // TODO: Figure out if this is correct. 515 Align = TD.getTypeAllocSize(GV->getType()); 516 } 517 518 // Grab index. 519 unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : (Subtarget->isThumb() ? 4 : 8); 520 unsigned Id = AFI->createConstPoolEntryUId(); 521 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, Id, 522 ARMCP::CPValue, PCAdj); 523 unsigned Idx = MCP.getConstantPoolIndex(CPV, Align); 524 525 // Load value. 526 MachineInstrBuilder MIB; 527 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 528 if (isThumb) { 529 unsigned Opc = (RelocM != Reloc::PIC_) ? ARM::t2LDRpci : ARM::t2LDRpci_pic; 530 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg) 531 .addConstantPoolIndex(Idx); 532 if (RelocM == Reloc::PIC_) 533 MIB.addImm(Id); 534 } else { 535 // The extra immediate is for addrmode2. 536 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRcp), 537 DestReg) 538 .addConstantPoolIndex(Idx) 539 .addImm(0); 540 } 541 AddOptionalDefs(MIB); 542 return DestReg; 543} 544 545unsigned ARMFastISel::TargetMaterializeConstant(const Constant *C) { 546 EVT VT = TLI.getValueType(C->getType(), true); 547 548 // Only handle simple types. 549 if (!VT.isSimple()) return 0; 550 551 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) 552 return ARMMaterializeFP(CFP, VT); 553 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C)) 554 return ARMMaterializeGV(GV, VT); 555 else if (isa<ConstantInt>(C)) 556 return ARMMaterializeInt(C, VT); 557 558 return 0; 559} 560 561unsigned ARMFastISel::TargetMaterializeAlloca(const AllocaInst *AI) { 562 // Don't handle dynamic allocas. 563 if (!FuncInfo.StaticAllocaMap.count(AI)) return 0; 564 565 MVT VT; 566 if (!isLoadTypeLegal(AI->getType(), VT)) return false; 567 568 DenseMap<const AllocaInst*, int>::iterator SI = 569 FuncInfo.StaticAllocaMap.find(AI); 570 571 // This will get lowered later into the correct offsets and registers 572 // via rewriteXFrameIndex. 573 if (SI != FuncInfo.StaticAllocaMap.end()) { 574 TargetRegisterClass* RC = TLI.getRegClassFor(VT); 575 unsigned ResultReg = createResultReg(RC); 576 unsigned Opc = isThumb ? ARM::t2ADDri : ARM::ADDri; 577 AddOptionalDefs(BuildMI(*FuncInfo.MBB, *FuncInfo.InsertPt, DL, 578 TII.get(Opc), ResultReg) 579 .addFrameIndex(SI->second) 580 .addImm(0)); 581 return ResultReg; 582 } 583 584 return 0; 585} 586 587bool ARMFastISel::isTypeLegal(const Type *Ty, MVT &VT) { 588 EVT evt = TLI.getValueType(Ty, true); 589 590 // Only handle simple types. 591 if (evt == MVT::Other || !evt.isSimple()) return false; 592 VT = evt.getSimpleVT(); 593 594 // Handle all legal types, i.e. a register that will directly hold this 595 // value. 596 return TLI.isTypeLegal(VT); 597} 598 599bool ARMFastISel::isLoadTypeLegal(const Type *Ty, MVT &VT) { 600 if (isTypeLegal(Ty, VT)) return true; 601 602 // If this is a type than can be sign or zero-extended to a basic operation 603 // go ahead and accept it now. 604 if (VT == MVT::i8 || VT == MVT::i16) 605 return true; 606 607 return false; 608} 609 610// Computes the address to get to an object. 611bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) { 612 // Some boilerplate from the X86 FastISel. 613 const User *U = NULL; 614 unsigned Opcode = Instruction::UserOp1; 615 if (const Instruction *I = dyn_cast<Instruction>(Obj)) { 616 // Don't walk into other basic blocks unless the object is an alloca from 617 // another block, otherwise it may not have a virtual register assigned. 618 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) || 619 FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) { 620 Opcode = I->getOpcode(); 621 U = I; 622 } 623 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) { 624 Opcode = C->getOpcode(); 625 U = C; 626 } 627 628 if (const PointerType *Ty = dyn_cast<PointerType>(Obj->getType())) 629 if (Ty->getAddressSpace() > 255) 630 // Fast instruction selection doesn't support the special 631 // address spaces. 632 return false; 633 634 switch (Opcode) { 635 default: 636 break; 637 case Instruction::BitCast: { 638 // Look through bitcasts. 639 return ARMComputeAddress(U->getOperand(0), Addr); 640 } 641 case Instruction::IntToPtr: { 642 // Look past no-op inttoptrs. 643 if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy()) 644 return ARMComputeAddress(U->getOperand(0), Addr); 645 break; 646 } 647 case Instruction::PtrToInt: { 648 // Look past no-op ptrtoints. 649 if (TLI.getValueType(U->getType()) == TLI.getPointerTy()) 650 return ARMComputeAddress(U->getOperand(0), Addr); 651 break; 652 } 653 case Instruction::GetElementPtr: { 654 Address SavedAddr = Addr; 655 int TmpOffset = Addr.Offset; 656 657 // Iterate through the GEP folding the constants into offsets where 658 // we can. 659 gep_type_iterator GTI = gep_type_begin(U); 660 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); 661 i != e; ++i, ++GTI) { 662 const Value *Op = *i; 663 if (const StructType *STy = dyn_cast<StructType>(*GTI)) { 664 const StructLayout *SL = TD.getStructLayout(STy); 665 unsigned Idx = cast<ConstantInt>(Op)->getZExtValue(); 666 TmpOffset += SL->getElementOffset(Idx); 667 } else { 668 uint64_t S = TD.getTypeAllocSize(GTI.getIndexedType()); 669 SmallVector<const Value *, 4> Worklist; 670 Worklist.push_back(Op); 671 do { 672 Op = Worklist.pop_back_val(); 673 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) { 674 // Constant-offset addressing. 675 TmpOffset += CI->getSExtValue() * S; 676 } else if (isa<AddOperator>(Op) && 677 isa<ConstantInt>(cast<AddOperator>(Op)->getOperand(1))) { 678 // An add with a constant operand. Fold the constant. 679 ConstantInt *CI = 680 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1)); 681 TmpOffset += CI->getSExtValue() * S; 682 // Add the other operand back to the work list. 683 Worklist.push_back(cast<AddOperator>(Op)->getOperand(0)); 684 } else 685 goto unsupported_gep; 686 } while (!Worklist.empty()); 687 } 688 } 689 690 // Try to grab the base operand now. 691 Addr.Offset = TmpOffset; 692 if (ARMComputeAddress(U->getOperand(0), Addr)) return true; 693 694 // We failed, restore everything and try the other options. 695 Addr = SavedAddr; 696 697 unsupported_gep: 698 break; 699 } 700 case Instruction::Alloca: { 701 const AllocaInst *AI = cast<AllocaInst>(Obj); 702 DenseMap<const AllocaInst*, int>::iterator SI = 703 FuncInfo.StaticAllocaMap.find(AI); 704 if (SI != FuncInfo.StaticAllocaMap.end()) { 705 Addr.BaseType = Address::FrameIndexBase; 706 Addr.Base.FI = SI->second; 707 return true; 708 } 709 break; 710 } 711 } 712 713 // Materialize the global variable's address into a reg which can 714 // then be used later to load the variable. 715 if (const GlobalValue *GV = dyn_cast<GlobalValue>(Obj)) { 716 unsigned Tmp = ARMMaterializeGV(GV, TLI.getValueType(Obj->getType())); 717 if (Tmp == 0) return false; 718 719 Addr.Base.Reg = Tmp; 720 return true; 721 } 722 723 // Try to get this in a register if nothing else has worked. 724 if (Addr.Base.Reg == 0) Addr.Base.Reg = getRegForValue(Obj); 725 return Addr.Base.Reg != 0; 726} 727 728void ARMFastISel::ARMSimplifyAddress(Address &Addr, EVT VT) { 729 730 assert(VT.isSimple() && "Non-simple types are invalid here!"); 731 732 bool needsLowering = false; 733 switch (VT.getSimpleVT().SimpleTy) { 734 default: 735 assert(false && "Unhandled load/store type!"); 736 case MVT::i1: 737 case MVT::i8: 738 case MVT::i16: 739 case MVT::i32: 740 // Integer loads/stores handle 12-bit offsets. 741 needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset); 742 break; 743 case MVT::f32: 744 case MVT::f64: 745 // Floating point operands handle 8-bit offsets. 746 needsLowering = ((Addr.Offset & 0xff) != Addr.Offset); 747 break; 748 } 749 750 // If this is a stack pointer and the offset needs to be simplified then 751 // put the alloca address into a register, set the base type back to 752 // register and continue. This should almost never happen. 753 if (needsLowering && Addr.BaseType == Address::FrameIndexBase) { 754 TargetRegisterClass *RC = isThumb ? ARM::tGPRRegisterClass : 755 ARM::GPRRegisterClass; 756 unsigned ResultReg = createResultReg(RC); 757 unsigned Opc = isThumb ? ARM::t2ADDri : ARM::ADDri; 758 AddOptionalDefs(BuildMI(*FuncInfo.MBB, *FuncInfo.InsertPt, DL, 759 TII.get(Opc), ResultReg) 760 .addFrameIndex(Addr.Base.FI) 761 .addImm(0)); 762 Addr.Base.Reg = ResultReg; 763 Addr.BaseType = Address::RegBase; 764 } 765 766 // Since the offset is too large for the load/store instruction 767 // get the reg+offset into a register. 768 if (needsLowering) { 769 ARMCC::CondCodes Pred = ARMCC::AL; 770 unsigned PredReg = 0; 771 772 TargetRegisterClass *RC = isThumb ? ARM::tGPRRegisterClass : 773 ARM::GPRRegisterClass; 774 unsigned BaseReg = createResultReg(RC); 775 776 if (!isThumb) 777 emitARMRegPlusImmediate(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 778 BaseReg, Addr.Base.Reg, Addr.Offset, 779 Pred, PredReg, 780 static_cast<const ARMBaseInstrInfo&>(TII)); 781 else { 782 assert(AFI->isThumb2Function()); 783 emitT2RegPlusImmediate(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 784 BaseReg, Addr.Base.Reg, Addr.Offset, Pred, PredReg, 785 static_cast<const ARMBaseInstrInfo&>(TII)); 786 } 787 Addr.Offset = 0; 788 Addr.Base.Reg = BaseReg; 789 } 790} 791 792void ARMFastISel::AddLoadStoreOperands(EVT VT, Address &Addr, 793 const MachineInstrBuilder &MIB) { 794 // addrmode5 output depends on the selection dag addressing dividing the 795 // offset by 4 that it then later multiplies. Do this here as well. 796 if (VT.getSimpleVT().SimpleTy == MVT::f32 || 797 VT.getSimpleVT().SimpleTy == MVT::f64) 798 Addr.Offset /= 4; 799 800 // Frame base works a bit differently. Handle it separately. 801 if (Addr.BaseType == Address::FrameIndexBase) { 802 int FI = Addr.Base.FI; 803 int Offset = Addr.Offset; 804 MachineMemOperand *MMO = 805 FuncInfo.MF->getMachineMemOperand( 806 MachinePointerInfo::getFixedStack(FI, Offset), 807 MachineMemOperand::MOLoad, 808 MFI.getObjectSize(FI), 809 MFI.getObjectAlignment(FI)); 810 // Now add the rest of the operands. 811 MIB.addFrameIndex(FI); 812 813 // ARM halfword load/stores need an additional operand. 814 if (!isThumb && VT.getSimpleVT().SimpleTy == MVT::i16) MIB.addReg(0); 815 816 MIB.addImm(Addr.Offset); 817 MIB.addMemOperand(MMO); 818 } else { 819 // Now add the rest of the operands. 820 MIB.addReg(Addr.Base.Reg); 821 822 // ARM halfword load/stores need an additional operand. 823 if (!isThumb && VT.getSimpleVT().SimpleTy == MVT::i16) MIB.addReg(0); 824 825 MIB.addImm(Addr.Offset); 826 } 827 AddOptionalDefs(MIB); 828} 829 830bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr) { 831 832 assert(VT.isSimple() && "Non-simple types are invalid here!"); 833 unsigned Opc; 834 TargetRegisterClass *RC; 835 switch (VT.getSimpleVT().SimpleTy) { 836 // This is mostly going to be Neon/vector support. 837 default: return false; 838 case MVT::i16: 839 Opc = isThumb ? ARM::t2LDRHi12 : ARM::LDRH; 840 RC = ARM::GPRRegisterClass; 841 break; 842 case MVT::i8: 843 Opc = isThumb ? ARM::t2LDRBi12 : ARM::LDRBi12; 844 RC = ARM::GPRRegisterClass; 845 break; 846 case MVT::i32: 847 Opc = isThumb ? ARM::t2LDRi12 : ARM::LDRi12; 848 RC = ARM::GPRRegisterClass; 849 break; 850 case MVT::f32: 851 Opc = ARM::VLDRS; 852 RC = TLI.getRegClassFor(VT); 853 break; 854 case MVT::f64: 855 Opc = ARM::VLDRD; 856 RC = TLI.getRegClassFor(VT); 857 break; 858 } 859 // Simplify this down to something we can handle. 860 ARMSimplifyAddress(Addr, VT); 861 862 // Create the base instruction, then add the operands. 863 ResultReg = createResultReg(RC); 864 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 865 TII.get(Opc), ResultReg); 866 AddLoadStoreOperands(VT, Addr, MIB); 867 return true; 868} 869 870bool ARMFastISel::SelectLoad(const Instruction *I) { 871 // Verify we have a legal type before going any further. 872 MVT VT; 873 if (!isLoadTypeLegal(I->getType(), VT)) 874 return false; 875 876 // See if we can handle this address. 877 Address Addr; 878 if (!ARMComputeAddress(I->getOperand(0), Addr)) return false; 879 880 unsigned ResultReg; 881 if (!ARMEmitLoad(VT, ResultReg, Addr)) return false; 882 UpdateValueMap(I, ResultReg); 883 return true; 884} 885 886bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr) { 887 unsigned StrOpc; 888 switch (VT.getSimpleVT().SimpleTy) { 889 // This is mostly going to be Neon/vector support. 890 default: return false; 891 case MVT::i1: { 892 unsigned Res = createResultReg(isThumb ? ARM::tGPRRegisterClass : 893 ARM::GPRRegisterClass); 894 unsigned Opc = isThumb ? ARM::t2ANDri : ARM::ANDri; 895 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 896 TII.get(Opc), Res) 897 .addReg(SrcReg).addImm(1)); 898 SrcReg = Res; 899 } // Fallthrough here. 900 case MVT::i8: 901 StrOpc = isThumb ? ARM::t2STRBi12 : ARM::STRBi12; 902 break; 903 case MVT::i16: 904 StrOpc = isThumb ? ARM::t2STRHi12 : ARM::STRH; 905 break; 906 case MVT::i32: 907 StrOpc = isThumb ? ARM::t2STRi12 : ARM::STRi12; 908 break; 909 case MVT::f32: 910 if (!Subtarget->hasVFP2()) return false; 911 StrOpc = ARM::VSTRS; 912 break; 913 case MVT::f64: 914 if (!Subtarget->hasVFP2()) return false; 915 StrOpc = ARM::VSTRD; 916 break; 917 } 918 // Simplify this down to something we can handle. 919 ARMSimplifyAddress(Addr, VT); 920 921 // Create the base instruction, then add the operands. 922 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 923 TII.get(StrOpc)) 924 .addReg(SrcReg, getKillRegState(true)); 925 AddLoadStoreOperands(VT, Addr, MIB); 926 return true; 927} 928 929bool ARMFastISel::SelectStore(const Instruction *I) { 930 Value *Op0 = I->getOperand(0); 931 unsigned SrcReg = 0; 932 933 // Verify we have a legal type before going any further. 934 MVT VT; 935 if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT)) 936 return false; 937 938 // Get the value to be stored into a register. 939 SrcReg = getRegForValue(Op0); 940 if (SrcReg == 0) return false; 941 942 // See if we can handle this address. 943 Address Addr; 944 if (!ARMComputeAddress(I->getOperand(1), Addr)) 945 return false; 946 947 if (!ARMEmitStore(VT, SrcReg, Addr)) return false; 948 return true; 949} 950 951static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred) { 952 switch (Pred) { 953 // Needs two compares... 954 case CmpInst::FCMP_ONE: 955 case CmpInst::FCMP_UEQ: 956 default: 957 // AL is our "false" for now. The other two need more compares. 958 return ARMCC::AL; 959 case CmpInst::ICMP_EQ: 960 case CmpInst::FCMP_OEQ: 961 return ARMCC::EQ; 962 case CmpInst::ICMP_SGT: 963 case CmpInst::FCMP_OGT: 964 return ARMCC::GT; 965 case CmpInst::ICMP_SGE: 966 case CmpInst::FCMP_OGE: 967 return ARMCC::GE; 968 case CmpInst::ICMP_UGT: 969 case CmpInst::FCMP_UGT: 970 return ARMCC::HI; 971 case CmpInst::FCMP_OLT: 972 return ARMCC::MI; 973 case CmpInst::ICMP_ULE: 974 case CmpInst::FCMP_OLE: 975 return ARMCC::LS; 976 case CmpInst::FCMP_ORD: 977 return ARMCC::VC; 978 case CmpInst::FCMP_UNO: 979 return ARMCC::VS; 980 case CmpInst::FCMP_UGE: 981 return ARMCC::PL; 982 case CmpInst::ICMP_SLT: 983 case CmpInst::FCMP_ULT: 984 return ARMCC::LT; 985 case CmpInst::ICMP_SLE: 986 case CmpInst::FCMP_ULE: 987 return ARMCC::LE; 988 case CmpInst::FCMP_UNE: 989 case CmpInst::ICMP_NE: 990 return ARMCC::NE; 991 case CmpInst::ICMP_UGE: 992 return ARMCC::HS; 993 case CmpInst::ICMP_ULT: 994 return ARMCC::LO; 995 } 996} 997 998bool ARMFastISel::SelectBranch(const Instruction *I) { 999 const BranchInst *BI = cast<BranchInst>(I); 1000 MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)]; 1001 MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)]; 1002 1003 // Simple branch support. 1004 1005 // If we can, avoid recomputing the compare - redoing it could lead to wonky 1006 // behavior. 1007 // TODO: Factor this out. 1008 if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) { 1009 if (CI->hasOneUse() && (CI->getParent() == I->getParent())) { 1010 MVT VT; 1011 const Type *Ty = CI->getOperand(0)->getType(); 1012 if (!isTypeLegal(Ty, VT)) 1013 return false; 1014 1015 bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy()); 1016 if (isFloat && !Subtarget->hasVFP2()) 1017 return false; 1018 1019 unsigned CmpOpc; 1020 unsigned CondReg; 1021 switch (VT.SimpleTy) { 1022 default: return false; 1023 // TODO: Verify compares. 1024 case MVT::f32: 1025 CmpOpc = ARM::VCMPES; 1026 CondReg = ARM::FPSCR; 1027 break; 1028 case MVT::f64: 1029 CmpOpc = ARM::VCMPED; 1030 CondReg = ARM::FPSCR; 1031 break; 1032 case MVT::i32: 1033 CmpOpc = isThumb ? ARM::t2CMPrr : ARM::CMPrr; 1034 CondReg = ARM::CPSR; 1035 break; 1036 } 1037 1038 // Get the compare predicate. 1039 ARMCC::CondCodes ARMPred = getComparePred(CI->getPredicate()); 1040 1041 // We may not handle every CC for now. 1042 if (ARMPred == ARMCC::AL) return false; 1043 1044 unsigned Arg1 = getRegForValue(CI->getOperand(0)); 1045 if (Arg1 == 0) return false; 1046 1047 unsigned Arg2 = getRegForValue(CI->getOperand(1)); 1048 if (Arg2 == 0) return false; 1049 1050 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1051 TII.get(CmpOpc)) 1052 .addReg(Arg1).addReg(Arg2)); 1053 1054 // For floating point we need to move the result to a comparison register 1055 // that we can then use for branches. 1056 if (isFloat) 1057 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1058 TII.get(ARM::FMSTAT))); 1059 1060 unsigned BrOpc = isThumb ? ARM::t2Bcc : ARM::Bcc; 1061 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1062 .addMBB(TBB).addImm(ARMPred).addReg(ARM::CPSR); 1063 FastEmitBranch(FBB, DL); 1064 FuncInfo.MBB->addSuccessor(TBB); 1065 return true; 1066 } 1067 } 1068 1069 unsigned CmpReg = getRegForValue(BI->getCondition()); 1070 if (CmpReg == 0) return false; 1071 1072 // Re-set the flags just in case. 1073 unsigned CmpOpc = isThumb ? ARM::t2CMPri : ARM::CMPri; 1074 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1075 .addReg(CmpReg).addImm(0)); 1076 1077 unsigned BrOpc = isThumb ? ARM::t2Bcc : ARM::Bcc; 1078 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1079 .addMBB(TBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 1080 FastEmitBranch(FBB, DL); 1081 FuncInfo.MBB->addSuccessor(TBB); 1082 return true; 1083} 1084 1085bool ARMFastISel::SelectCmp(const Instruction *I) { 1086 const CmpInst *CI = cast<CmpInst>(I); 1087 1088 MVT VT; 1089 const Type *Ty = CI->getOperand(0)->getType(); 1090 if (!isTypeLegal(Ty, VT)) 1091 return false; 1092 1093 bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy()); 1094 if (isFloat && !Subtarget->hasVFP2()) 1095 return false; 1096 1097 unsigned CmpOpc; 1098 unsigned CondReg; 1099 switch (VT.SimpleTy) { 1100 default: return false; 1101 // TODO: Verify compares. 1102 case MVT::f32: 1103 CmpOpc = ARM::VCMPES; 1104 CondReg = ARM::FPSCR; 1105 break; 1106 case MVT::f64: 1107 CmpOpc = ARM::VCMPED; 1108 CondReg = ARM::FPSCR; 1109 break; 1110 case MVT::i32: 1111 CmpOpc = isThumb ? ARM::t2CMPrr : ARM::CMPrr; 1112 CondReg = ARM::CPSR; 1113 break; 1114 } 1115 1116 // Get the compare predicate. 1117 ARMCC::CondCodes ARMPred = getComparePred(CI->getPredicate()); 1118 1119 // We may not handle every CC for now. 1120 if (ARMPred == ARMCC::AL) return false; 1121 1122 unsigned Arg1 = getRegForValue(CI->getOperand(0)); 1123 if (Arg1 == 0) return false; 1124 1125 unsigned Arg2 = getRegForValue(CI->getOperand(1)); 1126 if (Arg2 == 0) return false; 1127 1128 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1129 .addReg(Arg1).addReg(Arg2)); 1130 1131 // For floating point we need to move the result to a comparison register 1132 // that we can then use for branches. 1133 if (isFloat) 1134 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1135 TII.get(ARM::FMSTAT))); 1136 1137 // Now set a register based on the comparison. Explicitly set the predicates 1138 // here. 1139 unsigned MovCCOpc = isThumb ? ARM::t2MOVCCi : ARM::MOVCCi; 1140 TargetRegisterClass *RC = isThumb ? ARM::rGPRRegisterClass 1141 : ARM::GPRRegisterClass; 1142 unsigned DestReg = createResultReg(RC); 1143 Constant *Zero 1144 = ConstantInt::get(Type::getInt32Ty(*Context), 0); 1145 unsigned ZeroReg = TargetMaterializeConstant(Zero); 1146 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), DestReg) 1147 .addReg(ZeroReg).addImm(1) 1148 .addImm(ARMPred).addReg(CondReg); 1149 1150 UpdateValueMap(I, DestReg); 1151 return true; 1152} 1153 1154bool ARMFastISel::SelectFPExt(const Instruction *I) { 1155 // Make sure we have VFP and that we're extending float to double. 1156 if (!Subtarget->hasVFP2()) return false; 1157 1158 Value *V = I->getOperand(0); 1159 if (!I->getType()->isDoubleTy() || 1160 !V->getType()->isFloatTy()) return false; 1161 1162 unsigned Op = getRegForValue(V); 1163 if (Op == 0) return false; 1164 1165 unsigned Result = createResultReg(ARM::DPRRegisterClass); 1166 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1167 TII.get(ARM::VCVTDS), Result) 1168 .addReg(Op)); 1169 UpdateValueMap(I, Result); 1170 return true; 1171} 1172 1173bool ARMFastISel::SelectFPTrunc(const Instruction *I) { 1174 // Make sure we have VFP and that we're truncating double to float. 1175 if (!Subtarget->hasVFP2()) return false; 1176 1177 Value *V = I->getOperand(0); 1178 if (!(I->getType()->isFloatTy() && 1179 V->getType()->isDoubleTy())) return false; 1180 1181 unsigned Op = getRegForValue(V); 1182 if (Op == 0) return false; 1183 1184 unsigned Result = createResultReg(ARM::SPRRegisterClass); 1185 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1186 TII.get(ARM::VCVTSD), Result) 1187 .addReg(Op)); 1188 UpdateValueMap(I, Result); 1189 return true; 1190} 1191 1192bool ARMFastISel::SelectSIToFP(const Instruction *I) { 1193 // Make sure we have VFP. 1194 if (!Subtarget->hasVFP2()) return false; 1195 1196 MVT DstVT; 1197 const Type *Ty = I->getType(); 1198 if (!isTypeLegal(Ty, DstVT)) 1199 return false; 1200 1201 unsigned Op = getRegForValue(I->getOperand(0)); 1202 if (Op == 0) return false; 1203 1204 // The conversion routine works on fp-reg to fp-reg and the operand above 1205 // was an integer, move it to the fp registers if possible. 1206 unsigned FP = ARMMoveToFPReg(MVT::f32, Op); 1207 if (FP == 0) return false; 1208 1209 unsigned Opc; 1210 if (Ty->isFloatTy()) Opc = ARM::VSITOS; 1211 else if (Ty->isDoubleTy()) Opc = ARM::VSITOD; 1212 else return 0; 1213 1214 unsigned ResultReg = createResultReg(TLI.getRegClassFor(DstVT)); 1215 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1216 ResultReg) 1217 .addReg(FP)); 1218 UpdateValueMap(I, ResultReg); 1219 return true; 1220} 1221 1222bool ARMFastISel::SelectFPToSI(const Instruction *I) { 1223 // Make sure we have VFP. 1224 if (!Subtarget->hasVFP2()) return false; 1225 1226 MVT DstVT; 1227 const Type *RetTy = I->getType(); 1228 if (!isTypeLegal(RetTy, DstVT)) 1229 return false; 1230 1231 unsigned Op = getRegForValue(I->getOperand(0)); 1232 if (Op == 0) return false; 1233 1234 unsigned Opc; 1235 const Type *OpTy = I->getOperand(0)->getType(); 1236 if (OpTy->isFloatTy()) Opc = ARM::VTOSIZS; 1237 else if (OpTy->isDoubleTy()) Opc = ARM::VTOSIZD; 1238 else return 0; 1239 1240 // f64->s32 or f32->s32 both need an intermediate f32 reg. 1241 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32)); 1242 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1243 ResultReg) 1244 .addReg(Op)); 1245 1246 // This result needs to be in an integer register, but the conversion only 1247 // takes place in fp-regs. 1248 unsigned IntReg = ARMMoveToIntReg(DstVT, ResultReg); 1249 if (IntReg == 0) return false; 1250 1251 UpdateValueMap(I, IntReg); 1252 return true; 1253} 1254 1255bool ARMFastISel::SelectSelect(const Instruction *I) { 1256 MVT VT; 1257 if (!isTypeLegal(I->getType(), VT)) 1258 return false; 1259 1260 // Things need to be register sized for register moves. 1261 if (VT != MVT::i32) return false; 1262 const TargetRegisterClass *RC = TLI.getRegClassFor(VT); 1263 1264 unsigned CondReg = getRegForValue(I->getOperand(0)); 1265 if (CondReg == 0) return false; 1266 unsigned Op1Reg = getRegForValue(I->getOperand(1)); 1267 if (Op1Reg == 0) return false; 1268 unsigned Op2Reg = getRegForValue(I->getOperand(2)); 1269 if (Op2Reg == 0) return false; 1270 1271 unsigned CmpOpc = isThumb ? ARM::t2TSTri : ARM::TSTri; 1272 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1273 .addReg(CondReg).addImm(1)); 1274 unsigned ResultReg = createResultReg(RC); 1275 unsigned MovCCOpc = isThumb ? ARM::t2MOVCCr : ARM::MOVCCr; 1276 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg) 1277 .addReg(Op1Reg).addReg(Op2Reg) 1278 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 1279 UpdateValueMap(I, ResultReg); 1280 return true; 1281} 1282 1283bool ARMFastISel::SelectSDiv(const Instruction *I) { 1284 MVT VT; 1285 const Type *Ty = I->getType(); 1286 if (!isTypeLegal(Ty, VT)) 1287 return false; 1288 1289 // If we have integer div support we should have selected this automagically. 1290 // In case we have a real miss go ahead and return false and we'll pick 1291 // it up later. 1292 if (Subtarget->hasDivide()) return false; 1293 1294 // Otherwise emit a libcall. 1295 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1296 if (VT == MVT::i8) 1297 LC = RTLIB::SDIV_I8; 1298 else if (VT == MVT::i16) 1299 LC = RTLIB::SDIV_I16; 1300 else if (VT == MVT::i32) 1301 LC = RTLIB::SDIV_I32; 1302 else if (VT == MVT::i64) 1303 LC = RTLIB::SDIV_I64; 1304 else if (VT == MVT::i128) 1305 LC = RTLIB::SDIV_I128; 1306 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SDIV!"); 1307 1308 return ARMEmitLibcall(I, LC); 1309} 1310 1311bool ARMFastISel::SelectSRem(const Instruction *I) { 1312 MVT VT; 1313 const Type *Ty = I->getType(); 1314 if (!isTypeLegal(Ty, VT)) 1315 return false; 1316 1317 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1318 if (VT == MVT::i8) 1319 LC = RTLIB::SREM_I8; 1320 else if (VT == MVT::i16) 1321 LC = RTLIB::SREM_I16; 1322 else if (VT == MVT::i32) 1323 LC = RTLIB::SREM_I32; 1324 else if (VT == MVT::i64) 1325 LC = RTLIB::SREM_I64; 1326 else if (VT == MVT::i128) 1327 LC = RTLIB::SREM_I128; 1328 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SREM!"); 1329 1330 return ARMEmitLibcall(I, LC); 1331} 1332 1333bool ARMFastISel::SelectBinaryOp(const Instruction *I, unsigned ISDOpcode) { 1334 EVT VT = TLI.getValueType(I->getType(), true); 1335 1336 // We can get here in the case when we want to use NEON for our fp 1337 // operations, but can't figure out how to. Just use the vfp instructions 1338 // if we have them. 1339 // FIXME: It'd be nice to use NEON instructions. 1340 const Type *Ty = I->getType(); 1341 bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy()); 1342 if (isFloat && !Subtarget->hasVFP2()) 1343 return false; 1344 1345 unsigned Op1 = getRegForValue(I->getOperand(0)); 1346 if (Op1 == 0) return false; 1347 1348 unsigned Op2 = getRegForValue(I->getOperand(1)); 1349 if (Op2 == 0) return false; 1350 1351 unsigned Opc; 1352 bool is64bit = VT == MVT::f64 || VT == MVT::i64; 1353 switch (ISDOpcode) { 1354 default: return false; 1355 case ISD::FADD: 1356 Opc = is64bit ? ARM::VADDD : ARM::VADDS; 1357 break; 1358 case ISD::FSUB: 1359 Opc = is64bit ? ARM::VSUBD : ARM::VSUBS; 1360 break; 1361 case ISD::FMUL: 1362 Opc = is64bit ? ARM::VMULD : ARM::VMULS; 1363 break; 1364 } 1365 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT)); 1366 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1367 TII.get(Opc), ResultReg) 1368 .addReg(Op1).addReg(Op2)); 1369 UpdateValueMap(I, ResultReg); 1370 return true; 1371} 1372 1373// Call Handling Code 1374 1375bool ARMFastISel::FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, 1376 EVT SrcVT, unsigned &ResultReg) { 1377 unsigned RR = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc, 1378 Src, /*TODO: Kill=*/false); 1379 1380 if (RR != 0) { 1381 ResultReg = RR; 1382 return true; 1383 } else 1384 return false; 1385} 1386 1387// This is largely taken directly from CCAssignFnForNode - we don't support 1388// varargs in FastISel so that part has been removed. 1389// TODO: We may not support all of this. 1390CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC, bool Return) { 1391 switch (CC) { 1392 default: 1393 llvm_unreachable("Unsupported calling convention"); 1394 case CallingConv::Fast: 1395 // Ignore fastcc. Silence compiler warnings. 1396 (void)RetFastCC_ARM_APCS; 1397 (void)FastCC_ARM_APCS; 1398 // Fallthrough 1399 case CallingConv::C: 1400 // Use target triple & subtarget features to do actual dispatch. 1401 if (Subtarget->isAAPCS_ABI()) { 1402 if (Subtarget->hasVFP2() && 1403 FloatABIType == FloatABI::Hard) 1404 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1405 else 1406 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1407 } else 1408 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1409 case CallingConv::ARM_AAPCS_VFP: 1410 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1411 case CallingConv::ARM_AAPCS: 1412 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1413 case CallingConv::ARM_APCS: 1414 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1415 } 1416} 1417 1418bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args, 1419 SmallVectorImpl<unsigned> &ArgRegs, 1420 SmallVectorImpl<MVT> &ArgVTs, 1421 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 1422 SmallVectorImpl<unsigned> &RegArgs, 1423 CallingConv::ID CC, 1424 unsigned &NumBytes) { 1425 SmallVector<CCValAssign, 16> ArgLocs; 1426 CCState CCInfo(CC, false, TM, ArgLocs, *Context); 1427 CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CCAssignFnForCall(CC, false)); 1428 1429 // Get a count of how many bytes are to be pushed on the stack. 1430 NumBytes = CCInfo.getNextStackOffset(); 1431 1432 // Issue CALLSEQ_START 1433 unsigned AdjStackDown = TM.getRegisterInfo()->getCallFrameSetupOpcode(); 1434 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1435 TII.get(AdjStackDown)) 1436 .addImm(NumBytes)); 1437 1438 // Process the args. 1439 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1440 CCValAssign &VA = ArgLocs[i]; 1441 unsigned Arg = ArgRegs[VA.getValNo()]; 1442 MVT ArgVT = ArgVTs[VA.getValNo()]; 1443 1444 // We don't handle NEON parameters yet. 1445 if (VA.getLocVT().isVector() && VA.getLocVT().getSizeInBits() > 64) 1446 return false; 1447 1448 // Handle arg promotion, etc. 1449 switch (VA.getLocInfo()) { 1450 case CCValAssign::Full: break; 1451 case CCValAssign::SExt: { 1452 bool Emitted = FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(), 1453 Arg, ArgVT, Arg); 1454 assert(Emitted && "Failed to emit a sext!"); Emitted=Emitted; 1455 Emitted = true; 1456 ArgVT = VA.getLocVT(); 1457 break; 1458 } 1459 case CCValAssign::ZExt: { 1460 bool Emitted = FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(), 1461 Arg, ArgVT, Arg); 1462 assert(Emitted && "Failed to emit a zext!"); Emitted=Emitted; 1463 Emitted = true; 1464 ArgVT = VA.getLocVT(); 1465 break; 1466 } 1467 case CCValAssign::AExt: { 1468 bool Emitted = FastEmitExtend(ISD::ANY_EXTEND, VA.getLocVT(), 1469 Arg, ArgVT, Arg); 1470 if (!Emitted) 1471 Emitted = FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(), 1472 Arg, ArgVT, Arg); 1473 if (!Emitted) 1474 Emitted = FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(), 1475 Arg, ArgVT, Arg); 1476 1477 assert(Emitted && "Failed to emit a aext!"); Emitted=Emitted; 1478 ArgVT = VA.getLocVT(); 1479 break; 1480 } 1481 case CCValAssign::BCvt: { 1482 unsigned BC = FastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg, 1483 /*TODO: Kill=*/false); 1484 assert(BC != 0 && "Failed to emit a bitcast!"); 1485 Arg = BC; 1486 ArgVT = VA.getLocVT(); 1487 break; 1488 } 1489 default: llvm_unreachable("Unknown arg promotion!"); 1490 } 1491 1492 // Now copy/store arg to correct locations. 1493 if (VA.isRegLoc() && !VA.needsCustom()) { 1494 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1495 VA.getLocReg()) 1496 .addReg(Arg); 1497 RegArgs.push_back(VA.getLocReg()); 1498 } else if (VA.needsCustom()) { 1499 // TODO: We need custom lowering for vector (v2f64) args. 1500 if (VA.getLocVT() != MVT::f64) return false; 1501 1502 CCValAssign &NextVA = ArgLocs[++i]; 1503 1504 // TODO: Only handle register args for now. 1505 if(!(VA.isRegLoc() && NextVA.isRegLoc())) return false; 1506 1507 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1508 TII.get(ARM::VMOVRRD), VA.getLocReg()) 1509 .addReg(NextVA.getLocReg(), RegState::Define) 1510 .addReg(Arg)); 1511 RegArgs.push_back(VA.getLocReg()); 1512 RegArgs.push_back(NextVA.getLocReg()); 1513 } else { 1514 assert(VA.isMemLoc()); 1515 // Need to store on the stack. 1516 Address Addr; 1517 Addr.BaseType = Address::RegBase; 1518 Addr.Base.Reg = ARM::SP; 1519 Addr.Offset = VA.getLocMemOffset(); 1520 1521 if (!ARMEmitStore(ArgVT, Arg, Addr)) return false; 1522 } 1523 } 1524 return true; 1525} 1526 1527bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 1528 const Instruction *I, CallingConv::ID CC, 1529 unsigned &NumBytes) { 1530 // Issue CALLSEQ_END 1531 unsigned AdjStackUp = TM.getRegisterInfo()->getCallFrameDestroyOpcode(); 1532 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1533 TII.get(AdjStackUp)) 1534 .addImm(NumBytes).addImm(0)); 1535 1536 // Now the return value. 1537 if (RetVT != MVT::isVoid) { 1538 SmallVector<CCValAssign, 16> RVLocs; 1539 CCState CCInfo(CC, false, TM, RVLocs, *Context); 1540 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true)); 1541 1542 // Copy all of the result registers out of their specified physreg. 1543 if (RVLocs.size() == 2 && RetVT == MVT::f64) { 1544 // For this move we copy into two registers and then move into the 1545 // double fp reg we want. 1546 EVT DestVT = RVLocs[0].getValVT(); 1547 TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT); 1548 unsigned ResultReg = createResultReg(DstRC); 1549 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1550 TII.get(ARM::VMOVDRR), ResultReg) 1551 .addReg(RVLocs[0].getLocReg()) 1552 .addReg(RVLocs[1].getLocReg())); 1553 1554 UsedRegs.push_back(RVLocs[0].getLocReg()); 1555 UsedRegs.push_back(RVLocs[1].getLocReg()); 1556 1557 // Finally update the result. 1558 UpdateValueMap(I, ResultReg); 1559 } else { 1560 assert(RVLocs.size() == 1 &&"Can't handle non-double multi-reg retvals!"); 1561 EVT CopyVT = RVLocs[0].getValVT(); 1562 TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT); 1563 1564 unsigned ResultReg = createResultReg(DstRC); 1565 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1566 ResultReg).addReg(RVLocs[0].getLocReg()); 1567 UsedRegs.push_back(RVLocs[0].getLocReg()); 1568 1569 // Finally update the result. 1570 UpdateValueMap(I, ResultReg); 1571 } 1572 } 1573 1574 return true; 1575} 1576 1577bool ARMFastISel::SelectRet(const Instruction *I) { 1578 const ReturnInst *Ret = cast<ReturnInst>(I); 1579 const Function &F = *I->getParent()->getParent(); 1580 1581 if (!FuncInfo.CanLowerReturn) 1582 return false; 1583 1584 if (F.isVarArg()) 1585 return false; 1586 1587 CallingConv::ID CC = F.getCallingConv(); 1588 if (Ret->getNumOperands() > 0) { 1589 SmallVector<ISD::OutputArg, 4> Outs; 1590 GetReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(), 1591 Outs, TLI); 1592 1593 // Analyze operands of the call, assigning locations to each operand. 1594 SmallVector<CCValAssign, 16> ValLocs; 1595 CCState CCInfo(CC, F.isVarArg(), TM, ValLocs, I->getContext()); 1596 CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC, true /* is Ret */)); 1597 1598 const Value *RV = Ret->getOperand(0); 1599 unsigned Reg = getRegForValue(RV); 1600 if (Reg == 0) 1601 return false; 1602 1603 // Only handle a single return value for now. 1604 if (ValLocs.size() != 1) 1605 return false; 1606 1607 CCValAssign &VA = ValLocs[0]; 1608 1609 // Don't bother handling odd stuff for now. 1610 if (VA.getLocInfo() != CCValAssign::Full) 1611 return false; 1612 // Only handle register returns for now. 1613 if (!VA.isRegLoc()) 1614 return false; 1615 // TODO: For now, don't try to handle cases where getLocInfo() 1616 // says Full but the types don't match. 1617 if (TLI.getValueType(RV->getType()) != VA.getValVT()) 1618 return false; 1619 1620 // Make the copy. 1621 unsigned SrcReg = Reg + VA.getValNo(); 1622 unsigned DstReg = VA.getLocReg(); 1623 const TargetRegisterClass* SrcRC = MRI.getRegClass(SrcReg); 1624 // Avoid a cross-class copy. This is very unlikely. 1625 if (!SrcRC->contains(DstReg)) 1626 return false; 1627 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1628 DstReg).addReg(SrcReg); 1629 1630 // Mark the register as live out of the function. 1631 MRI.addLiveOut(VA.getLocReg()); 1632 } 1633 1634 unsigned RetOpc = isThumb ? ARM::tBX_RET : ARM::BX_RET; 1635 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1636 TII.get(RetOpc))); 1637 return true; 1638} 1639 1640// A quick function that will emit a call for a named libcall in F with the 1641// vector of passed arguments for the Instruction in I. We can assume that we 1642// can emit a call for any libcall we can produce. This is an abridged version 1643// of the full call infrastructure since we won't need to worry about things 1644// like computed function pointers or strange arguments at call sites. 1645// TODO: Try to unify this and the normal call bits for ARM, then try to unify 1646// with X86. 1647bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) { 1648 CallingConv::ID CC = TLI.getLibcallCallingConv(Call); 1649 1650 // Handle *simple* calls for now. 1651 const Type *RetTy = I->getType(); 1652 MVT RetVT; 1653 if (RetTy->isVoidTy()) 1654 RetVT = MVT::isVoid; 1655 else if (!isTypeLegal(RetTy, RetVT)) 1656 return false; 1657 1658 // For now we're using BLX etc on the assumption that we have v5t ops. 1659 if (!Subtarget->hasV5TOps()) return false; 1660 1661 // TODO: For now if we have long calls specified we don't handle the call. 1662 if (EnableARMLongCalls) return false; 1663 1664 // Set up the argument vectors. 1665 SmallVector<Value*, 8> Args; 1666 SmallVector<unsigned, 8> ArgRegs; 1667 SmallVector<MVT, 8> ArgVTs; 1668 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 1669 Args.reserve(I->getNumOperands()); 1670 ArgRegs.reserve(I->getNumOperands()); 1671 ArgVTs.reserve(I->getNumOperands()); 1672 ArgFlags.reserve(I->getNumOperands()); 1673 for (unsigned i = 0; i < I->getNumOperands(); ++i) { 1674 Value *Op = I->getOperand(i); 1675 unsigned Arg = getRegForValue(Op); 1676 if (Arg == 0) return false; 1677 1678 const Type *ArgTy = Op->getType(); 1679 MVT ArgVT; 1680 if (!isTypeLegal(ArgTy, ArgVT)) return false; 1681 1682 ISD::ArgFlagsTy Flags; 1683 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 1684 Flags.setOrigAlign(OriginalAlignment); 1685 1686 Args.push_back(Op); 1687 ArgRegs.push_back(Arg); 1688 ArgVTs.push_back(ArgVT); 1689 ArgFlags.push_back(Flags); 1690 } 1691 1692 // Handle the arguments now that we've gotten them. 1693 SmallVector<unsigned, 4> RegArgs; 1694 unsigned NumBytes; 1695 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes)) 1696 return false; 1697 1698 // Issue the call, BLXr9 for darwin, BLX otherwise. This uses V5 ops. 1699 // TODO: Turn this into the table of arm call ops. 1700 MachineInstrBuilder MIB; 1701 unsigned CallOpc; 1702 if(isThumb) 1703 CallOpc = Subtarget->isTargetDarwin() ? ARM::tBLXi_r9 : ARM::tBLXi; 1704 else 1705 CallOpc = Subtarget->isTargetDarwin() ? ARM::BLr9 : ARM::BL; 1706 // Explicitly adding the predicate here. 1707 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1708 TII.get(CallOpc))) 1709 .addExternalSymbol(TLI.getLibcallName(Call)); 1710 1711 // Add implicit physical register uses to the call. 1712 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 1713 MIB.addReg(RegArgs[i]); 1714 1715 // Finish off the call including any return values. 1716 SmallVector<unsigned, 4> UsedRegs; 1717 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes)) return false; 1718 1719 // Set all unused physreg defs as dead. 1720 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 1721 1722 return true; 1723} 1724 1725bool ARMFastISel::SelectCall(const Instruction *I) { 1726 const CallInst *CI = cast<CallInst>(I); 1727 const Value *Callee = CI->getCalledValue(); 1728 1729 // Can't handle inline asm or worry about intrinsics yet. 1730 if (isa<InlineAsm>(Callee) || isa<IntrinsicInst>(CI)) return false; 1731 1732 // Only handle global variable Callees that are direct calls. 1733 const GlobalValue *GV = dyn_cast<GlobalValue>(Callee); 1734 if (!GV || Subtarget->GVIsIndirectSymbol(GV, TM.getRelocationModel())) 1735 return false; 1736 1737 // Check the calling convention. 1738 ImmutableCallSite CS(CI); 1739 CallingConv::ID CC = CS.getCallingConv(); 1740 1741 // TODO: Avoid some calling conventions? 1742 1743 // Let SDISel handle vararg functions. 1744 const PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType()); 1745 const FunctionType *FTy = cast<FunctionType>(PT->getElementType()); 1746 if (FTy->isVarArg()) 1747 return false; 1748 1749 // Handle *simple* calls for now. 1750 const Type *RetTy = I->getType(); 1751 MVT RetVT; 1752 if (RetTy->isVoidTy()) 1753 RetVT = MVT::isVoid; 1754 else if (!isTypeLegal(RetTy, RetVT)) 1755 return false; 1756 1757 // For now we're using BLX etc on the assumption that we have v5t ops. 1758 // TODO: Maybe? 1759 if (!Subtarget->hasV5TOps()) return false; 1760 1761 // TODO: For now if we have long calls specified we don't handle the call. 1762 if (EnableARMLongCalls) return false; 1763 1764 // Set up the argument vectors. 1765 SmallVector<Value*, 8> Args; 1766 SmallVector<unsigned, 8> ArgRegs; 1767 SmallVector<MVT, 8> ArgVTs; 1768 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 1769 Args.reserve(CS.arg_size()); 1770 ArgRegs.reserve(CS.arg_size()); 1771 ArgVTs.reserve(CS.arg_size()); 1772 ArgFlags.reserve(CS.arg_size()); 1773 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end(); 1774 i != e; ++i) { 1775 unsigned Arg = getRegForValue(*i); 1776 1777 if (Arg == 0) 1778 return false; 1779 ISD::ArgFlagsTy Flags; 1780 unsigned AttrInd = i - CS.arg_begin() + 1; 1781 if (CS.paramHasAttr(AttrInd, Attribute::SExt)) 1782 Flags.setSExt(); 1783 if (CS.paramHasAttr(AttrInd, Attribute::ZExt)) 1784 Flags.setZExt(); 1785 1786 // FIXME: Only handle *easy* calls for now. 1787 if (CS.paramHasAttr(AttrInd, Attribute::InReg) || 1788 CS.paramHasAttr(AttrInd, Attribute::StructRet) || 1789 CS.paramHasAttr(AttrInd, Attribute::Nest) || 1790 CS.paramHasAttr(AttrInd, Attribute::ByVal)) 1791 return false; 1792 1793 const Type *ArgTy = (*i)->getType(); 1794 MVT ArgVT; 1795 if (!isTypeLegal(ArgTy, ArgVT)) 1796 return false; 1797 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 1798 Flags.setOrigAlign(OriginalAlignment); 1799 1800 Args.push_back(*i); 1801 ArgRegs.push_back(Arg); 1802 ArgVTs.push_back(ArgVT); 1803 ArgFlags.push_back(Flags); 1804 } 1805 1806 // Handle the arguments now that we've gotten them. 1807 SmallVector<unsigned, 4> RegArgs; 1808 unsigned NumBytes; 1809 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes)) 1810 return false; 1811 1812 // Issue the call, BLXr9 for darwin, BLX otherwise. This uses V5 ops. 1813 // TODO: Turn this into the table of arm call ops. 1814 MachineInstrBuilder MIB; 1815 unsigned CallOpc; 1816 if(isThumb) 1817 CallOpc = Subtarget->isTargetDarwin() ? ARM::tBLXi_r9 : ARM::tBLXi; 1818 else 1819 CallOpc = Subtarget->isTargetDarwin() ? ARM::BLr9 : ARM::BL; 1820 // Explicitly adding the predicate here. 1821 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1822 TII.get(CallOpc))) 1823 .addGlobalAddress(GV, 0, 0); 1824 1825 // Add implicit physical register uses to the call. 1826 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 1827 MIB.addReg(RegArgs[i]); 1828 1829 // Finish off the call including any return values. 1830 SmallVector<unsigned, 4> UsedRegs; 1831 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes)) return false; 1832 1833 // Set all unused physreg defs as dead. 1834 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 1835 1836 return true; 1837 1838} 1839 1840// TODO: SoftFP support. 1841bool ARMFastISel::TargetSelectInstruction(const Instruction *I) { 1842 1843 switch (I->getOpcode()) { 1844 case Instruction::Load: 1845 return SelectLoad(I); 1846 case Instruction::Store: 1847 return SelectStore(I); 1848 case Instruction::Br: 1849 return SelectBranch(I); 1850 case Instruction::ICmp: 1851 case Instruction::FCmp: 1852 return SelectCmp(I); 1853 case Instruction::FPExt: 1854 return SelectFPExt(I); 1855 case Instruction::FPTrunc: 1856 return SelectFPTrunc(I); 1857 case Instruction::SIToFP: 1858 return SelectSIToFP(I); 1859 case Instruction::FPToSI: 1860 return SelectFPToSI(I); 1861 case Instruction::FAdd: 1862 return SelectBinaryOp(I, ISD::FADD); 1863 case Instruction::FSub: 1864 return SelectBinaryOp(I, ISD::FSUB); 1865 case Instruction::FMul: 1866 return SelectBinaryOp(I, ISD::FMUL); 1867 case Instruction::SDiv: 1868 return SelectSDiv(I); 1869 case Instruction::SRem: 1870 return SelectSRem(I); 1871 case Instruction::Call: 1872 return SelectCall(I); 1873 case Instruction::Select: 1874 return SelectSelect(I); 1875 case Instruction::Ret: 1876 return SelectRet(I); 1877 default: break; 1878 } 1879 return false; 1880} 1881 1882namespace llvm { 1883 llvm::FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo) { 1884 // Completely untested on non-darwin. 1885 const TargetMachine &TM = funcInfo.MF->getTarget(); 1886 1887 // Darwin and thumb1 only for now. 1888 const ARMSubtarget *Subtarget = &TM.getSubtarget<ARMSubtarget>(); 1889 if (Subtarget->isTargetDarwin() && !Subtarget->isThumb1Only() && 1890 !DisableARMFastISel) 1891 return new ARMFastISel(funcInfo); 1892 return 0; 1893 } 1894} 1895