ARMFastISel.cpp revision 47650ece374315ce4ff5e483f6165ae37752f230
1//===-- ARMFastISel.cpp - ARM FastISel implementation ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the ARM-specific support for the FastISel class. Some 11// of the target-specific code is generated by tablegen in the file 12// ARMGenFastISel.inc, which is #included here. 13// 14//===----------------------------------------------------------------------===// 15 16#include "ARM.h" 17#include "ARMBaseInstrInfo.h" 18#include "ARMCallingConv.h" 19#include "ARMRegisterInfo.h" 20#include "ARMTargetMachine.h" 21#include "ARMSubtarget.h" 22#include "ARMConstantPoolValue.h" 23#include "llvm/CallingConv.h" 24#include "llvm/DerivedTypes.h" 25#include "llvm/GlobalVariable.h" 26#include "llvm/Instructions.h" 27#include "llvm/IntrinsicInst.h" 28#include "llvm/Module.h" 29#include "llvm/CodeGen/Analysis.h" 30#include "llvm/CodeGen/FastISel.h" 31#include "llvm/CodeGen/FunctionLoweringInfo.h" 32#include "llvm/CodeGen/MachineInstrBuilder.h" 33#include "llvm/CodeGen/MachineModuleInfo.h" 34#include "llvm/CodeGen/MachineConstantPool.h" 35#include "llvm/CodeGen/MachineFrameInfo.h" 36#include "llvm/CodeGen/MachineRegisterInfo.h" 37#include "llvm/Support/CallSite.h" 38#include "llvm/Support/CommandLine.h" 39#include "llvm/Support/ErrorHandling.h" 40#include "llvm/Support/GetElementPtrTypeIterator.h" 41#include "llvm/Target/TargetData.h" 42#include "llvm/Target/TargetInstrInfo.h" 43#include "llvm/Target/TargetLowering.h" 44#include "llvm/Target/TargetMachine.h" 45#include "llvm/Target/TargetOptions.h" 46using namespace llvm; 47 48static cl::opt<bool> 49EnableARMFastISel("arm-fast-isel", 50 cl::desc("Turn on experimental ARM fast-isel support"), 51 cl::init(false), cl::Hidden); 52 53namespace { 54 55class ARMFastISel : public FastISel { 56 57 typedef struct AddrBase { 58 unsigned Reg; 59 unsigned FrameIndex; 60 } AddrBase; 61 62 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can 63 /// make the right decision when generating code for different targets. 64 const ARMSubtarget *Subtarget; 65 const TargetMachine &TM; 66 const TargetInstrInfo &TII; 67 const TargetLowering &TLI; 68 ARMFunctionInfo *AFI; 69 70 // Convenience variables to avoid some queries. 71 bool isThumb; 72 LLVMContext *Context; 73 74 public: 75 explicit ARMFastISel(FunctionLoweringInfo &funcInfo) 76 : FastISel(funcInfo), 77 TM(funcInfo.MF->getTarget()), 78 TII(*TM.getInstrInfo()), 79 TLI(*TM.getTargetLowering()) { 80 Subtarget = &TM.getSubtarget<ARMSubtarget>(); 81 AFI = funcInfo.MF->getInfo<ARMFunctionInfo>(); 82 isThumb = AFI->isThumbFunction(); 83 Context = &funcInfo.Fn->getContext(); 84 } 85 86 // Code from FastISel.cpp. 87 virtual unsigned FastEmitInst_(unsigned MachineInstOpcode, 88 const TargetRegisterClass *RC); 89 virtual unsigned FastEmitInst_r(unsigned MachineInstOpcode, 90 const TargetRegisterClass *RC, 91 unsigned Op0, bool Op0IsKill); 92 virtual unsigned FastEmitInst_rr(unsigned MachineInstOpcode, 93 const TargetRegisterClass *RC, 94 unsigned Op0, bool Op0IsKill, 95 unsigned Op1, bool Op1IsKill); 96 virtual unsigned FastEmitInst_ri(unsigned MachineInstOpcode, 97 const TargetRegisterClass *RC, 98 unsigned Op0, bool Op0IsKill, 99 uint64_t Imm); 100 virtual unsigned FastEmitInst_rf(unsigned MachineInstOpcode, 101 const TargetRegisterClass *RC, 102 unsigned Op0, bool Op0IsKill, 103 const ConstantFP *FPImm); 104 virtual unsigned FastEmitInst_i(unsigned MachineInstOpcode, 105 const TargetRegisterClass *RC, 106 uint64_t Imm); 107 virtual unsigned FastEmitInst_rri(unsigned MachineInstOpcode, 108 const TargetRegisterClass *RC, 109 unsigned Op0, bool Op0IsKill, 110 unsigned Op1, bool Op1IsKill, 111 uint64_t Imm); 112 virtual unsigned FastEmitInst_extractsubreg(MVT RetVT, 113 unsigned Op0, bool Op0IsKill, 114 uint32_t Idx); 115 116 // Backend specific FastISel code. 117 virtual bool TargetSelectInstruction(const Instruction *I); 118 virtual unsigned TargetMaterializeConstant(const Constant *C); 119 virtual unsigned TargetMaterializeAlloca(const AllocaInst *AI); 120 121 #include "ARMGenFastISel.inc" 122 123 // Instruction selection routines. 124 private: 125 virtual bool SelectLoad(const Instruction *I); 126 virtual bool SelectStore(const Instruction *I); 127 virtual bool SelectBranch(const Instruction *I); 128 virtual bool SelectCmp(const Instruction *I); 129 virtual bool SelectFPExt(const Instruction *I); 130 virtual bool SelectFPTrunc(const Instruction *I); 131 virtual bool SelectBinaryOp(const Instruction *I, unsigned ISDOpcode); 132 virtual bool SelectSIToFP(const Instruction *I); 133 virtual bool SelectFPToSI(const Instruction *I); 134 virtual bool SelectSDiv(const Instruction *I); 135 virtual bool SelectSRem(const Instruction *I); 136 virtual bool SelectCall(const Instruction *I); 137 virtual bool SelectSelect(const Instruction *I); 138 139 // Utility routines. 140 private: 141 bool isTypeLegal(const Type *Ty, EVT &VT); 142 bool isLoadTypeLegal(const Type *Ty, EVT &VT); 143 bool ARMEmitLoad(EVT VT, unsigned &ResultReg, AddrBase Base, int Offset); 144 bool ARMEmitStore(EVT VT, unsigned SrcReg, AddrBase Base, int Offset); 145 bool ARMComputeRegOffset(const Value *Obj, AddrBase &Base, int &Offset); 146 void ARMSimplifyRegOffset(AddrBase &Base, int &Offset, EVT VT); 147 unsigned ARMMaterializeFP(const ConstantFP *CFP, EVT VT); 148 unsigned ARMMaterializeInt(const Constant *C, EVT VT); 149 unsigned ARMMaterializeGV(const GlobalValue *GV, EVT VT); 150 unsigned ARMMoveToFPReg(EVT VT, unsigned SrcReg); 151 unsigned ARMMoveToIntReg(EVT VT, unsigned SrcReg); 152 153 // Call handling routines. 154 private: 155 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool Return); 156 bool ProcessCallArgs(SmallVectorImpl<Value*> &Args, 157 SmallVectorImpl<unsigned> &ArgRegs, 158 SmallVectorImpl<EVT> &ArgVTs, 159 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 160 SmallVectorImpl<unsigned> &RegArgs, 161 CallingConv::ID CC, 162 unsigned &NumBytes); 163 bool FinishCall(EVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 164 const Instruction *I, CallingConv::ID CC, 165 unsigned &NumBytes); 166 bool ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call); 167 168 // OptionalDef handling routines. 169 private: 170 bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR); 171 const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB); 172}; 173 174} // end anonymous namespace 175 176#include "ARMGenCallingConv.inc" 177 178// DefinesOptionalPredicate - This is different from DefinesPredicate in that 179// we don't care about implicit defs here, just places we'll need to add a 180// default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR. 181bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) { 182 const TargetInstrDesc &TID = MI->getDesc(); 183 if (!TID.hasOptionalDef()) 184 return false; 185 186 // Look to see if our OptionalDef is defining CPSR or CCR. 187 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 188 const MachineOperand &MO = MI->getOperand(i); 189 if (!MO.isReg() || !MO.isDef()) continue; 190 if (MO.getReg() == ARM::CPSR) 191 *CPSR = true; 192 } 193 return true; 194} 195 196// If the machine is predicable go ahead and add the predicate operands, if 197// it needs default CC operands add those. 198const MachineInstrBuilder & 199ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) { 200 MachineInstr *MI = &*MIB; 201 202 // Do we use a predicate? 203 if (TII.isPredicable(MI)) 204 AddDefaultPred(MIB); 205 206 // Do we optionally set a predicate? Preds is size > 0 iff the predicate 207 // defines CPSR. All other OptionalDefines in ARM are the CCR register. 208 bool CPSR = false; 209 if (DefinesOptionalPredicate(MI, &CPSR)) { 210 if (CPSR) 211 AddDefaultT1CC(MIB); 212 else 213 AddDefaultCC(MIB); 214 } 215 return MIB; 216} 217 218unsigned ARMFastISel::FastEmitInst_(unsigned MachineInstOpcode, 219 const TargetRegisterClass* RC) { 220 unsigned ResultReg = createResultReg(RC); 221 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 222 223 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)); 224 return ResultReg; 225} 226 227unsigned ARMFastISel::FastEmitInst_r(unsigned MachineInstOpcode, 228 const TargetRegisterClass *RC, 229 unsigned Op0, bool Op0IsKill) { 230 unsigned ResultReg = createResultReg(RC); 231 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 232 233 if (II.getNumDefs() >= 1) 234 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 235 .addReg(Op0, Op0IsKill * RegState::Kill)); 236 else { 237 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 238 .addReg(Op0, Op0IsKill * RegState::Kill)); 239 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 240 TII.get(TargetOpcode::COPY), ResultReg) 241 .addReg(II.ImplicitDefs[0])); 242 } 243 return ResultReg; 244} 245 246unsigned ARMFastISel::FastEmitInst_rr(unsigned MachineInstOpcode, 247 const TargetRegisterClass *RC, 248 unsigned Op0, bool Op0IsKill, 249 unsigned Op1, bool Op1IsKill) { 250 unsigned ResultReg = createResultReg(RC); 251 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 252 253 if (II.getNumDefs() >= 1) 254 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 255 .addReg(Op0, Op0IsKill * RegState::Kill) 256 .addReg(Op1, Op1IsKill * RegState::Kill)); 257 else { 258 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 259 .addReg(Op0, Op0IsKill * RegState::Kill) 260 .addReg(Op1, Op1IsKill * RegState::Kill)); 261 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 262 TII.get(TargetOpcode::COPY), ResultReg) 263 .addReg(II.ImplicitDefs[0])); 264 } 265 return ResultReg; 266} 267 268unsigned ARMFastISel::FastEmitInst_ri(unsigned MachineInstOpcode, 269 const TargetRegisterClass *RC, 270 unsigned Op0, bool Op0IsKill, 271 uint64_t Imm) { 272 unsigned ResultReg = createResultReg(RC); 273 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 274 275 if (II.getNumDefs() >= 1) 276 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 277 .addReg(Op0, Op0IsKill * RegState::Kill) 278 .addImm(Imm)); 279 else { 280 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 281 .addReg(Op0, Op0IsKill * RegState::Kill) 282 .addImm(Imm)); 283 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 284 TII.get(TargetOpcode::COPY), ResultReg) 285 .addReg(II.ImplicitDefs[0])); 286 } 287 return ResultReg; 288} 289 290unsigned ARMFastISel::FastEmitInst_rf(unsigned MachineInstOpcode, 291 const TargetRegisterClass *RC, 292 unsigned Op0, bool Op0IsKill, 293 const ConstantFP *FPImm) { 294 unsigned ResultReg = createResultReg(RC); 295 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 296 297 if (II.getNumDefs() >= 1) 298 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 299 .addReg(Op0, Op0IsKill * RegState::Kill) 300 .addFPImm(FPImm)); 301 else { 302 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 303 .addReg(Op0, Op0IsKill * RegState::Kill) 304 .addFPImm(FPImm)); 305 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 306 TII.get(TargetOpcode::COPY), ResultReg) 307 .addReg(II.ImplicitDefs[0])); 308 } 309 return ResultReg; 310} 311 312unsigned ARMFastISel::FastEmitInst_rri(unsigned MachineInstOpcode, 313 const TargetRegisterClass *RC, 314 unsigned Op0, bool Op0IsKill, 315 unsigned Op1, bool Op1IsKill, 316 uint64_t Imm) { 317 unsigned ResultReg = createResultReg(RC); 318 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 319 320 if (II.getNumDefs() >= 1) 321 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 322 .addReg(Op0, Op0IsKill * RegState::Kill) 323 .addReg(Op1, Op1IsKill * RegState::Kill) 324 .addImm(Imm)); 325 else { 326 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 327 .addReg(Op0, Op0IsKill * RegState::Kill) 328 .addReg(Op1, Op1IsKill * RegState::Kill) 329 .addImm(Imm)); 330 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 331 TII.get(TargetOpcode::COPY), ResultReg) 332 .addReg(II.ImplicitDefs[0])); 333 } 334 return ResultReg; 335} 336 337unsigned ARMFastISel::FastEmitInst_i(unsigned MachineInstOpcode, 338 const TargetRegisterClass *RC, 339 uint64_t Imm) { 340 unsigned ResultReg = createResultReg(RC); 341 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 342 343 if (II.getNumDefs() >= 1) 344 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 345 .addImm(Imm)); 346 else { 347 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 348 .addImm(Imm)); 349 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 350 TII.get(TargetOpcode::COPY), ResultReg) 351 .addReg(II.ImplicitDefs[0])); 352 } 353 return ResultReg; 354} 355 356unsigned ARMFastISel::FastEmitInst_extractsubreg(MVT RetVT, 357 unsigned Op0, bool Op0IsKill, 358 uint32_t Idx) { 359 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT)); 360 assert(TargetRegisterInfo::isVirtualRegister(Op0) && 361 "Cannot yet extract from physregs"); 362 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 363 DL, TII.get(TargetOpcode::COPY), ResultReg) 364 .addReg(Op0, getKillRegState(Op0IsKill), Idx)); 365 return ResultReg; 366} 367 368// TODO: Don't worry about 64-bit now, but when this is fixed remove the 369// checks from the various callers. 370unsigned ARMFastISel::ARMMoveToFPReg(EVT VT, unsigned SrcReg) { 371 if (VT.getSimpleVT().SimpleTy == MVT::f64) return 0; 372 373 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 374 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 375 TII.get(ARM::VMOVRS), MoveReg) 376 .addReg(SrcReg)); 377 return MoveReg; 378} 379 380unsigned ARMFastISel::ARMMoveToIntReg(EVT VT, unsigned SrcReg) { 381 if (VT.getSimpleVT().SimpleTy == MVT::i64) return 0; 382 383 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 384 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 385 TII.get(ARM::VMOVSR), MoveReg) 386 .addReg(SrcReg)); 387 return MoveReg; 388} 389 390// For double width floating point we need to materialize two constants 391// (the high and the low) into integer registers then use a move to get 392// the combined constant into an FP reg. 393unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, EVT VT) { 394 const APFloat Val = CFP->getValueAPF(); 395 bool is64bit = VT.getSimpleVT().SimpleTy == MVT::f64; 396 397 // This checks to see if we can use VFP3 instructions to materialize 398 // a constant, otherwise we have to go through the constant pool. 399 if (TLI.isFPImmLegal(Val, VT)) { 400 unsigned Opc = is64bit ? ARM::FCONSTD : ARM::FCONSTS; 401 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 402 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 403 DestReg) 404 .addFPImm(CFP)); 405 return DestReg; 406 } 407 408 // Require VFP2 for loading fp constants. 409 if (!Subtarget->hasVFP2()) return false; 410 411 // MachineConstantPool wants an explicit alignment. 412 unsigned Align = TD.getPrefTypeAlignment(CFP->getType()); 413 if (Align == 0) { 414 // TODO: Figure out if this is correct. 415 Align = TD.getTypeAllocSize(CFP->getType()); 416 } 417 unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align); 418 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 419 unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS; 420 421 // The extra reg is for addrmode5. 422 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 423 DestReg) 424 .addConstantPoolIndex(Idx) 425 .addReg(0)); 426 return DestReg; 427} 428 429unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, EVT VT) { 430 431 // For now 32-bit only. 432 if (VT.getSimpleVT().SimpleTy != MVT::i32) return false; 433 434 // MachineConstantPool wants an explicit alignment. 435 unsigned Align = TD.getPrefTypeAlignment(C->getType()); 436 if (Align == 0) { 437 // TODO: Figure out if this is correct. 438 Align = TD.getTypeAllocSize(C->getType()); 439 } 440 unsigned Idx = MCP.getConstantPoolIndex(C, Align); 441 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 442 443 if (isThumb) 444 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 445 TII.get(ARM::t2LDRpci), DestReg) 446 .addConstantPoolIndex(Idx)); 447 else 448 // The extra reg and immediate are for addrmode2. 449 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 450 TII.get(ARM::LDRcp), DestReg) 451 .addConstantPoolIndex(Idx) 452 .addReg(0).addImm(0)); 453 454 return DestReg; 455} 456 457unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, EVT VT) { 458 // For now 32-bit only. 459 if (VT.getSimpleVT().SimpleTy != MVT::i32) return 0; 460 461 Reloc::Model RelocM = TM.getRelocationModel(); 462 463 // TODO: No external globals for now. 464 if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) return 0; 465 466 // TODO: Need more magic for ARM PIC. 467 if (!isThumb && (RelocM == Reloc::PIC_)) return 0; 468 469 // MachineConstantPool wants an explicit alignment. 470 unsigned Align = TD.getPrefTypeAlignment(GV->getType()); 471 if (Align == 0) { 472 // TODO: Figure out if this is correct. 473 Align = TD.getTypeAllocSize(GV->getType()); 474 } 475 476 // Grab index. 477 unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : (Subtarget->isThumb() ? 4 : 8); 478 unsigned Id = AFI->createConstPoolEntryUId(); 479 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, Id, 480 ARMCP::CPValue, PCAdj); 481 unsigned Idx = MCP.getConstantPoolIndex(CPV, Align); 482 483 // Load value. 484 MachineInstrBuilder MIB; 485 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 486 if (isThumb) { 487 unsigned Opc = (RelocM != Reloc::PIC_) ? ARM::t2LDRpci : ARM::t2LDRpci_pic; 488 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg) 489 .addConstantPoolIndex(Idx); 490 if (RelocM == Reloc::PIC_) 491 MIB.addImm(Id); 492 } else { 493 // The extra reg and immediate are for addrmode2. 494 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRcp), 495 DestReg) 496 .addConstantPoolIndex(Idx) 497 .addReg(0).addImm(0); 498 } 499 AddOptionalDefs(MIB); 500 return DestReg; 501} 502 503unsigned ARMFastISel::TargetMaterializeConstant(const Constant *C) { 504 EVT VT = TLI.getValueType(C->getType(), true); 505 506 // Only handle simple types. 507 if (!VT.isSimple()) return 0; 508 509 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) 510 return ARMMaterializeFP(CFP, VT); 511 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C)) 512 return ARMMaterializeGV(GV, VT); 513 else if (isa<ConstantInt>(C)) 514 return ARMMaterializeInt(C, VT); 515 516 return 0; 517} 518 519unsigned ARMFastISel::TargetMaterializeAlloca(const AllocaInst *AI) { 520 // Don't handle dynamic allocas. 521 if (!FuncInfo.StaticAllocaMap.count(AI)) return 0; 522 523 EVT VT; 524 if (!isTypeLegal(AI->getType(), VT)) return false; 525 526 DenseMap<const AllocaInst*, int>::iterator SI = 527 FuncInfo.StaticAllocaMap.find(AI); 528 529 // This will get lowered later into the correct offsets and registers 530 // via rewriteXFrameIndex. 531 if (SI != FuncInfo.StaticAllocaMap.end()) { 532 TargetRegisterClass* RC = TLI.getRegClassFor(VT); 533 unsigned ResultReg = createResultReg(RC); 534 unsigned Opc = isThumb ? ARM::t2ADDri : ARM::ADDri; 535 AddOptionalDefs(BuildMI(*FuncInfo.MBB, *FuncInfo.InsertPt, DL, 536 TII.get(Opc), ResultReg) 537 .addFrameIndex(SI->second) 538 .addImm(0)); 539 return ResultReg; 540 } 541 542 return 0; 543} 544 545bool ARMFastISel::isTypeLegal(const Type *Ty, EVT &VT) { 546 VT = TLI.getValueType(Ty, true); 547 548 // Only handle simple types. 549 if (VT == MVT::Other || !VT.isSimple()) return false; 550 551 // Handle all legal types, i.e. a register that will directly hold this 552 // value. 553 return TLI.isTypeLegal(VT); 554} 555 556bool ARMFastISel::isLoadTypeLegal(const Type *Ty, EVT &VT) { 557 if (isTypeLegal(Ty, VT)) return true; 558 559 // If this is a type than can be sign or zero-extended to a basic operation 560 // go ahead and accept it now. 561 if (VT == MVT::i8 || VT == MVT::i16) 562 return true; 563 564 return false; 565} 566 567// Computes the Reg+Offset to get to an object. 568bool ARMFastISel::ARMComputeRegOffset(const Value *Obj, AddrBase &Base, 569 int &Offset) { 570 // Some boilerplate from the X86 FastISel. 571 const User *U = NULL; 572 unsigned Opcode = Instruction::UserOp1; 573 if (const Instruction *I = dyn_cast<Instruction>(Obj)) { 574 // Don't walk into other basic blocks; it's possible we haven't 575 // visited them yet, so the instructions may not yet be assigned 576 // virtual registers. 577 if (FuncInfo.MBBMap[I->getParent()] != FuncInfo.MBB) 578 return false; 579 Opcode = I->getOpcode(); 580 U = I; 581 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) { 582 Opcode = C->getOpcode(); 583 U = C; 584 } 585 586 if (const PointerType *Ty = dyn_cast<PointerType>(Obj->getType())) 587 if (Ty->getAddressSpace() > 255) 588 // Fast instruction selection doesn't support the special 589 // address spaces. 590 return false; 591 592 switch (Opcode) { 593 default: 594 break; 595 case Instruction::BitCast: { 596 // Look through bitcasts. 597 return ARMComputeRegOffset(U->getOperand(0), Base, Offset); 598 } 599 case Instruction::IntToPtr: { 600 // Look past no-op inttoptrs. 601 if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy()) 602 return ARMComputeRegOffset(U->getOperand(0), Base, Offset); 603 break; 604 } 605 case Instruction::PtrToInt: { 606 // Look past no-op ptrtoints. 607 if (TLI.getValueType(U->getType()) == TLI.getPointerTy()) 608 return ARMComputeRegOffset(U->getOperand(0), Base, Offset); 609 break; 610 } 611 case Instruction::GetElementPtr: { 612 int SavedOffset = Offset; 613 AddrBase SavedBase = Base; 614 int TmpOffset = Offset; 615 616 // Iterate through the GEP folding the constants into offsets where 617 // we can. 618 gep_type_iterator GTI = gep_type_begin(U); 619 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); 620 i != e; ++i, ++GTI) { 621 const Value *Op = *i; 622 if (const StructType *STy = dyn_cast<StructType>(*GTI)) { 623 const StructLayout *SL = TD.getStructLayout(STy); 624 unsigned Idx = cast<ConstantInt>(Op)->getZExtValue(); 625 TmpOffset += SL->getElementOffset(Idx); 626 } else { 627 uint64_t S = TD.getTypeAllocSize(GTI.getIndexedType()); 628 SmallVector<const Value *, 4> Worklist; 629 Worklist.push_back(Op); 630 do { 631 Op = Worklist.pop_back_val(); 632 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) { 633 // Constant-offset addressing. 634 TmpOffset += CI->getSExtValue() * S; 635 } else if (0 && isa<AddOperator>(Op) && 636 isa<ConstantInt>(cast<AddOperator>(Op)->getOperand(1))) { 637 // An add with a constant operand. Fold the constant. 638 ConstantInt *CI = 639 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1)); 640 TmpOffset += CI->getSExtValue() * S; 641 // Add the other operand back to the work list. 642 Worklist.push_back(cast<AddOperator>(Op)->getOperand(0)); 643 } else 644 goto unsupported_gep; 645 } while (!Worklist.empty()); 646 } 647 } 648 649 // Try to grab the base operand now. 650 Offset = TmpOffset; 651 if (ARMComputeRegOffset(U->getOperand(0), Base, Offset)) return true; 652 653 // We failed, restore everything and try the other options. 654 Offset = SavedOffset; 655 Base = SavedBase; 656 657 unsupported_gep: 658 break; 659 } 660 case Instruction::Alloca: { 661 // TODO: Fix this to do intermediate loads, etc. 662 if (Offset != 0) return false; 663 664 const AllocaInst *AI = cast<AllocaInst>(Obj); 665 DenseMap<const AllocaInst*, int>::iterator SI = 666 FuncInfo.StaticAllocaMap.find(AI); 667 if (SI != FuncInfo.StaticAllocaMap.end()) { 668 Base.Reg = ARM::SP; 669 Base.FrameIndex = SI->second; 670 return true; 671 } 672 // Don't handle dynamic allocas. 673 assert(!FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(Obj)) && 674 "Alloca should have been handled earlier!"); 675 return false; 676 } 677 } 678 679 // Materialize the global variable's address into a reg which can 680 // then be used later to load the variable. 681 if (const GlobalValue *GV = dyn_cast<GlobalValue>(Obj)) { 682 unsigned Tmp = ARMMaterializeGV(GV, TLI.getValueType(Obj->getType())); 683 if (Tmp == 0) return false; 684 685 Base.Reg = Tmp; 686 return true; 687 } 688 689 // Try to get this in a register if nothing else has worked. 690 if (Base.Reg == 0) Base.Reg = getRegForValue(Obj); 691 return Base.Reg != 0; 692} 693 694void ARMFastISel::ARMSimplifyRegOffset(AddrBase &Base, int &Offset, EVT VT) { 695 696 // Since the offset may be too large for the load instruction 697 // get the reg+offset into a register. 698 if (Base.Reg != ARM::SP && Offset != 0) { 699 ARMCC::CondCodes Pred = ARMCC::AL; 700 unsigned PredReg = 0; 701 702 TargetRegisterClass *RC = isThumb ? ARM::tGPRRegisterClass : 703 ARM::GPRRegisterClass; 704 unsigned BaseReg = createResultReg(RC); 705 706 if (!isThumb) 707 emitARMRegPlusImmediate(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 708 BaseReg, Base.Reg, Offset, Pred, PredReg, 709 static_cast<const ARMBaseInstrInfo&>(TII)); 710 else { 711 assert(AFI->isThumb2Function()); 712 emitT2RegPlusImmediate(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 713 BaseReg, Base.Reg, Offset, Pred, PredReg, 714 static_cast<const ARMBaseInstrInfo&>(TII)); 715 } 716 Offset = 0; 717 Base.Reg = BaseReg; 718 } 719} 720 721bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, 722 AddrBase Base, int Offset) { 723 724 assert(VT.isSimple() && "Non-simple types are invalid here!"); 725 unsigned Opc; 726 TargetRegisterClass *RC; 727 bool isFloat = false; 728 switch (VT.getSimpleVT().SimpleTy) { 729 default: 730 // This is mostly going to be Neon/vector support. 731 return false; 732 case MVT::i16: 733 Opc = isThumb ? ARM::t2LDRHi8 : ARM::LDRH; 734 RC = ARM::GPRRegisterClass; 735 VT = MVT::i32; 736 break; 737 case MVT::i8: 738 Opc = isThumb ? ARM::t2LDRBi8 : ARM::LDRB; 739 RC = ARM::GPRRegisterClass; 740 VT = MVT::i32; 741 break; 742 case MVT::i32: 743 Opc = isThumb ? ARM::t2LDRi8 : ARM::LDR; 744 RC = ARM::GPRRegisterClass; 745 break; 746 case MVT::f32: 747 Opc = ARM::VLDRS; 748 RC = TLI.getRegClassFor(VT); 749 isFloat = true; 750 break; 751 case MVT::f64: 752 Opc = ARM::VLDRD; 753 RC = TLI.getRegClassFor(VT); 754 isFloat = true; 755 break; 756 } 757 758 ResultReg = createResultReg(RC); 759 760 // For now with the additions above the offset should be zero - thus we 761 // can always fit into an i8. 762 assert((Base.Reg == ARM::SP || Offset == 0) && 763 "Offset not zero and not a stack load!"); 764 765 if (Base.Reg == ARM::SP && Offset == 0) 766 TII.loadRegFromStackSlot(*FuncInfo.MBB, *FuncInfo.InsertPt, 767 ResultReg, Base.FrameIndex, RC, 768 TM.getRegisterInfo()); 769 // The thumb and floating point instructions both take 2 operands, ARM takes 770 // another register. 771 else if (isFloat || isThumb) 772 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 773 TII.get(Opc), ResultReg) 774 .addReg(Base.Reg).addImm(Offset)); 775 else 776 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 777 TII.get(Opc), ResultReg) 778 .addReg(Base.Reg).addReg(0).addImm(Offset)); 779 return true; 780} 781 782bool ARMFastISel::SelectLoad(const Instruction *I) { 783 // Verify we have a legal type before going any further. 784 EVT VT; 785 if (!isLoadTypeLegal(I->getType(), VT)) 786 return false; 787 788 // Our register and offset with innocuous defaults. 789 AddrBase Base = { 0, 0 }; 790 int Offset = 0; 791 792 // See if we can handle this as Reg + Offset 793 if (!ARMComputeRegOffset(I->getOperand(0), Base, Offset)) 794 return false; 795 796 ARMSimplifyRegOffset(Base, Offset, VT); 797 798 unsigned ResultReg; 799 if (!ARMEmitLoad(VT, ResultReg, Base, Offset)) return false; 800 801 UpdateValueMap(I, ResultReg); 802 return true; 803} 804 805bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, 806 AddrBase Base, int Offset) { 807 unsigned StrOpc; 808 bool isFloat = false; 809 // VT is set here only for use in the alloca stores below - those are promoted 810 // to reg size always. 811 switch (VT.getSimpleVT().SimpleTy) { 812 default: return false; 813 case MVT::i1: 814 case MVT::i8: 815 VT = MVT::i32; 816 StrOpc = isThumb ? ARM::t2STRBi8 : ARM::STRB; 817 break; 818 case MVT::i16: 819 VT = MVT::i32; 820 StrOpc = isThumb ? ARM::t2STRHi8 : ARM::STRH; 821 break; 822 case MVT::i32: 823 StrOpc = isThumb ? ARM::t2STRi8 : ARM::STR; 824 break; 825 case MVT::f32: 826 if (!Subtarget->hasVFP2()) return false; 827 StrOpc = ARM::VSTRS; 828 isFloat = true; 829 break; 830 case MVT::f64: 831 if (!Subtarget->hasVFP2()) return false; 832 StrOpc = ARM::VSTRD; 833 isFloat = true; 834 break; 835 } 836 837 if (Base.Reg == ARM::SP && Offset == 0) 838 TII.storeRegToStackSlot(*FuncInfo.MBB, *FuncInfo.InsertPt, 839 SrcReg, true /*isKill*/, Base.FrameIndex, 840 TLI.getRegClassFor(VT), TM.getRegisterInfo()); 841 // The thumb addressing mode has operands swapped from the arm addressing 842 // mode, the floating point one only has two operands. 843 else if (isFloat || isThumb) 844 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 845 TII.get(StrOpc)) 846 .addReg(SrcReg).addReg(Base.Reg).addImm(Offset)); 847 else 848 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 849 TII.get(StrOpc)) 850 .addReg(SrcReg).addReg(Base.Reg).addReg(0).addImm(Offset)); 851 852 return true; 853} 854 855bool ARMFastISel::SelectStore(const Instruction *I) { 856 Value *Op0 = I->getOperand(0); 857 unsigned SrcReg = 0; 858 859 // Yay type legalization 860 EVT VT; 861 if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT)) 862 return false; 863 864 // Get the value to be stored into a register. 865 SrcReg = getRegForValue(Op0); 866 if (SrcReg == 0) 867 return false; 868 869 // Our register and offset with innocuous defaults. 870 AddrBase Base = { 0, 0 }; 871 int Offset = 0; 872 873 // See if we can handle this as Reg + Offset 874 if (!ARMComputeRegOffset(I->getOperand(1), Base, Offset)) 875 return false; 876 877 ARMSimplifyRegOffset(Base, Offset, VT); 878 879 if (!ARMEmitStore(VT, SrcReg, Base, Offset)) return false; 880 881 return true; 882} 883 884static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred) { 885 switch (Pred) { 886 // Needs two compares... 887 case CmpInst::FCMP_ONE: 888 case CmpInst::FCMP_UEQ: 889 default: 890 assert(false && "Unhandled CmpInst::Predicate!"); 891 return ARMCC::AL; 892 case CmpInst::ICMP_EQ: 893 case CmpInst::FCMP_OEQ: 894 return ARMCC::EQ; 895 case CmpInst::ICMP_SGT: 896 case CmpInst::FCMP_OGT: 897 return ARMCC::GT; 898 case CmpInst::ICMP_SGE: 899 case CmpInst::FCMP_OGE: 900 return ARMCC::GE; 901 case CmpInst::ICMP_UGT: 902 case CmpInst::FCMP_UGT: 903 return ARMCC::HI; 904 case CmpInst::FCMP_OLT: 905 return ARMCC::MI; 906 case CmpInst::ICMP_ULE: 907 case CmpInst::FCMP_OLE: 908 return ARMCC::LS; 909 case CmpInst::FCMP_ORD: 910 return ARMCC::VC; 911 case CmpInst::FCMP_UNO: 912 return ARMCC::VS; 913 case CmpInst::FCMP_UGE: 914 return ARMCC::PL; 915 case CmpInst::ICMP_SLT: 916 case CmpInst::FCMP_ULT: 917 return ARMCC::LT; 918 case CmpInst::ICMP_SLE: 919 case CmpInst::FCMP_ULE: 920 return ARMCC::LE; 921 case CmpInst::FCMP_UNE: 922 case CmpInst::ICMP_NE: 923 return ARMCC::NE; 924 case CmpInst::ICMP_UGE: 925 return ARMCC::HS; 926 case CmpInst::ICMP_ULT: 927 return ARMCC::LO; 928 } 929} 930 931bool ARMFastISel::SelectBranch(const Instruction *I) { 932 const BranchInst *BI = cast<BranchInst>(I); 933 MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)]; 934 MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)]; 935 936 // Simple branch support. 937 // TODO: Try to avoid the re-computation in some places. 938 unsigned CondReg = getRegForValue(BI->getCondition()); 939 if (CondReg == 0) return false; 940 941 // Re-set the flags just in case. 942 unsigned CmpOpc = isThumb ? ARM::t2CMPri : ARM::CMPri; 943 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 944 .addReg(CondReg).addImm(1)); 945 946 unsigned BrOpc = isThumb ? ARM::t2Bcc : ARM::Bcc; 947 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 948 .addMBB(TBB).addImm(ARMCC::EQ).addReg(ARM::CPSR); 949 FastEmitBranch(FBB, DL); 950 FuncInfo.MBB->addSuccessor(TBB); 951 return true; 952} 953 954bool ARMFastISel::SelectCmp(const Instruction *I) { 955 const CmpInst *CI = cast<CmpInst>(I); 956 957 EVT VT; 958 const Type *Ty = CI->getOperand(0)->getType(); 959 if (!isTypeLegal(Ty, VT)) 960 return false; 961 962 bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy()); 963 if (isFloat && !Subtarget->hasVFP2()) 964 return false; 965 966 unsigned CmpOpc; 967 unsigned CondReg; 968 switch (VT.getSimpleVT().SimpleTy) { 969 default: return false; 970 // TODO: Verify compares. 971 case MVT::f32: 972 CmpOpc = ARM::VCMPES; 973 CondReg = ARM::FPSCR; 974 break; 975 case MVT::f64: 976 CmpOpc = ARM::VCMPED; 977 CondReg = ARM::FPSCR; 978 break; 979 case MVT::i32: 980 CmpOpc = isThumb ? ARM::t2CMPrr : ARM::CMPrr; 981 CondReg = ARM::CPSR; 982 break; 983 } 984 985 // Get the compare predicate. 986 ARMCC::CondCodes ARMPred = getComparePred(CI->getPredicate()); 987 988 // We may not handle every CC for now. 989 if (ARMPred == ARMCC::AL) return false; 990 991 unsigned Arg1 = getRegForValue(CI->getOperand(0)); 992 if (Arg1 == 0) return false; 993 994 unsigned Arg2 = getRegForValue(CI->getOperand(1)); 995 if (Arg2 == 0) return false; 996 997 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 998 .addReg(Arg1).addReg(Arg2)); 999 1000 // For floating point we need to move the result to a comparison register 1001 // that we can then use for branches. 1002 if (isFloat) 1003 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1004 TII.get(ARM::FMSTAT))); 1005 1006 // Now set a register based on the comparison. Explicitly set the predicates 1007 // here. 1008 unsigned MovCCOpc = isThumb ? ARM::t2MOVCCi : ARM::MOVCCi; 1009 TargetRegisterClass *RC = isThumb ? ARM::rGPRRegisterClass 1010 : ARM::GPRRegisterClass; 1011 unsigned DestReg = createResultReg(RC); 1012 Constant *Zero 1013 = ConstantInt::get(Type::getInt32Ty(*Context), 0); 1014 unsigned ZeroReg = TargetMaterializeConstant(Zero); 1015 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), DestReg) 1016 .addReg(ZeroReg).addImm(1) 1017 .addImm(ARMPred).addReg(CondReg); 1018 1019 UpdateValueMap(I, DestReg); 1020 return true; 1021} 1022 1023bool ARMFastISel::SelectFPExt(const Instruction *I) { 1024 // Make sure we have VFP and that we're extending float to double. 1025 if (!Subtarget->hasVFP2()) return false; 1026 1027 Value *V = I->getOperand(0); 1028 if (!I->getType()->isDoubleTy() || 1029 !V->getType()->isFloatTy()) return false; 1030 1031 unsigned Op = getRegForValue(V); 1032 if (Op == 0) return false; 1033 1034 unsigned Result = createResultReg(ARM::DPRRegisterClass); 1035 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1036 TII.get(ARM::VCVTDS), Result) 1037 .addReg(Op)); 1038 UpdateValueMap(I, Result); 1039 return true; 1040} 1041 1042bool ARMFastISel::SelectFPTrunc(const Instruction *I) { 1043 // Make sure we have VFP and that we're truncating double to float. 1044 if (!Subtarget->hasVFP2()) return false; 1045 1046 Value *V = I->getOperand(0); 1047 if (!(I->getType()->isFloatTy() && 1048 V->getType()->isDoubleTy())) return false; 1049 1050 unsigned Op = getRegForValue(V); 1051 if (Op == 0) return false; 1052 1053 unsigned Result = createResultReg(ARM::SPRRegisterClass); 1054 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1055 TII.get(ARM::VCVTSD), Result) 1056 .addReg(Op)); 1057 UpdateValueMap(I, Result); 1058 return true; 1059} 1060 1061bool ARMFastISel::SelectSIToFP(const Instruction *I) { 1062 // Make sure we have VFP. 1063 if (!Subtarget->hasVFP2()) return false; 1064 1065 EVT DstVT; 1066 const Type *Ty = I->getType(); 1067 if (!isTypeLegal(Ty, DstVT)) 1068 return false; 1069 1070 unsigned Op = getRegForValue(I->getOperand(0)); 1071 if (Op == 0) return false; 1072 1073 // The conversion routine works on fp-reg to fp-reg and the operand above 1074 // was an integer, move it to the fp registers if possible. 1075 unsigned FP = ARMMoveToFPReg(MVT::f32, Op); 1076 if (FP == 0) return false; 1077 1078 unsigned Opc; 1079 if (Ty->isFloatTy()) Opc = ARM::VSITOS; 1080 else if (Ty->isDoubleTy()) Opc = ARM::VSITOD; 1081 else return 0; 1082 1083 unsigned ResultReg = createResultReg(TLI.getRegClassFor(DstVT)); 1084 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1085 ResultReg) 1086 .addReg(FP)); 1087 UpdateValueMap(I, ResultReg); 1088 return true; 1089} 1090 1091bool ARMFastISel::SelectFPToSI(const Instruction *I) { 1092 // Make sure we have VFP. 1093 if (!Subtarget->hasVFP2()) return false; 1094 1095 EVT DstVT; 1096 const Type *RetTy = I->getType(); 1097 if (!isTypeLegal(RetTy, DstVT)) 1098 return false; 1099 1100 unsigned Op = getRegForValue(I->getOperand(0)); 1101 if (Op == 0) return false; 1102 1103 unsigned Opc; 1104 const Type *OpTy = I->getOperand(0)->getType(); 1105 if (OpTy->isFloatTy()) Opc = ARM::VTOSIZS; 1106 else if (OpTy->isDoubleTy()) Opc = ARM::VTOSIZD; 1107 else return 0; 1108 1109 // f64->s32 or f32->s32 both need an intermediate f32 reg. 1110 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32)); 1111 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1112 ResultReg) 1113 .addReg(Op)); 1114 1115 // This result needs to be in an integer register, but the conversion only 1116 // takes place in fp-regs. 1117 unsigned IntReg = ARMMoveToIntReg(DstVT, ResultReg); 1118 if (IntReg == 0) return false; 1119 1120 UpdateValueMap(I, IntReg); 1121 return true; 1122} 1123 1124bool ARMFastISel::SelectSelect(const Instruction *I) { 1125 EVT VT = TLI.getValueType(I->getType(), /*HandleUnknown=*/true); 1126 if (VT == MVT::Other || !isTypeLegal(I->getType(), VT)) 1127 return false; 1128 1129 // Things need to be register sized for register moves. 1130 if (VT.getSimpleVT().SimpleTy != MVT::i32) return false; 1131 const TargetRegisterClass *RC = TLI.getRegClassFor(VT); 1132 1133 unsigned CondReg = getRegForValue(I->getOperand(0)); 1134 if (CondReg == 0) return false; 1135 unsigned Op1Reg = getRegForValue(I->getOperand(1)); 1136 if (Op1Reg == 0) return false; 1137 unsigned Op2Reg = getRegForValue(I->getOperand(2)); 1138 if (Op2Reg == 0) return false; 1139 1140 unsigned CmpOpc = isThumb ? ARM::t2TSTri : ARM::TSTri; 1141 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1142 .addReg(CondReg).addImm(1)); 1143 unsigned ResultReg = createResultReg(RC); 1144 unsigned MovCCOpc = isThumb ? ARM::t2MOVCCr : ARM::MOVCCr; 1145 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg) 1146 .addReg(Op1Reg).addReg(Op2Reg) 1147 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 1148 UpdateValueMap(I, ResultReg); 1149 return true; 1150} 1151 1152bool ARMFastISel::SelectSDiv(const Instruction *I) { 1153 EVT VT; 1154 const Type *Ty = I->getType(); 1155 if (!isTypeLegal(Ty, VT)) 1156 return false; 1157 1158 // If we have integer div support we should have selected this automagically. 1159 // In case we have a real miss go ahead and return false and we'll pick 1160 // it up later. 1161 if (Subtarget->hasDivide()) return false; 1162 1163 // Otherwise emit a libcall. 1164 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1165 if (VT == MVT::i8) 1166 LC = RTLIB::SDIV_I8; 1167 else if (VT == MVT::i16) 1168 LC = RTLIB::SDIV_I16; 1169 else if (VT == MVT::i32) 1170 LC = RTLIB::SDIV_I32; 1171 else if (VT == MVT::i64) 1172 LC = RTLIB::SDIV_I64; 1173 else if (VT == MVT::i128) 1174 LC = RTLIB::SDIV_I128; 1175 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SDIV!"); 1176 1177 return ARMEmitLibcall(I, LC); 1178} 1179 1180bool ARMFastISel::SelectSRem(const Instruction *I) { 1181 EVT VT; 1182 const Type *Ty = I->getType(); 1183 if (!isTypeLegal(Ty, VT)) 1184 return false; 1185 1186 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1187 if (VT == MVT::i8) 1188 LC = RTLIB::SREM_I8; 1189 else if (VT == MVT::i16) 1190 LC = RTLIB::SREM_I16; 1191 else if (VT == MVT::i32) 1192 LC = RTLIB::SREM_I32; 1193 else if (VT == MVT::i64) 1194 LC = RTLIB::SREM_I64; 1195 else if (VT == MVT::i128) 1196 LC = RTLIB::SREM_I128; 1197 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SREM!"); 1198 1199 return ARMEmitLibcall(I, LC); 1200} 1201 1202bool ARMFastISel::SelectBinaryOp(const Instruction *I, unsigned ISDOpcode) { 1203 EVT VT = TLI.getValueType(I->getType(), true); 1204 1205 // We can get here in the case when we want to use NEON for our fp 1206 // operations, but can't figure out how to. Just use the vfp instructions 1207 // if we have them. 1208 // FIXME: It'd be nice to use NEON instructions. 1209 const Type *Ty = I->getType(); 1210 bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy()); 1211 if (isFloat && !Subtarget->hasVFP2()) 1212 return false; 1213 1214 unsigned Op1 = getRegForValue(I->getOperand(0)); 1215 if (Op1 == 0) return false; 1216 1217 unsigned Op2 = getRegForValue(I->getOperand(1)); 1218 if (Op2 == 0) return false; 1219 1220 unsigned Opc; 1221 bool is64bit = VT.getSimpleVT().SimpleTy == MVT::f64 || 1222 VT.getSimpleVT().SimpleTy == MVT::i64; 1223 switch (ISDOpcode) { 1224 default: return false; 1225 case ISD::FADD: 1226 Opc = is64bit ? ARM::VADDD : ARM::VADDS; 1227 break; 1228 case ISD::FSUB: 1229 Opc = is64bit ? ARM::VSUBD : ARM::VSUBS; 1230 break; 1231 case ISD::FMUL: 1232 Opc = is64bit ? ARM::VMULD : ARM::VMULS; 1233 break; 1234 } 1235 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT)); 1236 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1237 TII.get(Opc), ResultReg) 1238 .addReg(Op1).addReg(Op2)); 1239 UpdateValueMap(I, ResultReg); 1240 return true; 1241} 1242 1243// Call Handling Code 1244 1245// This is largely taken directly from CCAssignFnForNode - we don't support 1246// varargs in FastISel so that part has been removed. 1247// TODO: We may not support all of this. 1248CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC, bool Return) { 1249 switch (CC) { 1250 default: 1251 llvm_unreachable("Unsupported calling convention"); 1252 case CallingConv::C: 1253 case CallingConv::Fast: 1254 // Use target triple & subtarget features to do actual dispatch. 1255 if (Subtarget->isAAPCS_ABI()) { 1256 if (Subtarget->hasVFP2() && 1257 FloatABIType == FloatABI::Hard) 1258 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1259 else 1260 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1261 } else 1262 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1263 case CallingConv::ARM_AAPCS_VFP: 1264 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1265 case CallingConv::ARM_AAPCS: 1266 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1267 case CallingConv::ARM_APCS: 1268 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1269 } 1270} 1271 1272bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args, 1273 SmallVectorImpl<unsigned> &ArgRegs, 1274 SmallVectorImpl<EVT> &ArgVTs, 1275 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 1276 SmallVectorImpl<unsigned> &RegArgs, 1277 CallingConv::ID CC, 1278 unsigned &NumBytes) { 1279 SmallVector<CCValAssign, 16> ArgLocs; 1280 CCState CCInfo(CC, false, TM, ArgLocs, *Context); 1281 CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CCAssignFnForCall(CC, false)); 1282 1283 // Get a count of how many bytes are to be pushed on the stack. 1284 NumBytes = CCInfo.getNextStackOffset(); 1285 1286 // Issue CALLSEQ_START 1287 unsigned AdjStackDown = TM.getRegisterInfo()->getCallFrameSetupOpcode(); 1288 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1289 TII.get(AdjStackDown)) 1290 .addImm(NumBytes)); 1291 1292 // Process the args. 1293 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1294 CCValAssign &VA = ArgLocs[i]; 1295 unsigned Arg = ArgRegs[VA.getValNo()]; 1296 EVT ArgVT = ArgVTs[VA.getValNo()]; 1297 1298 // Handle arg promotion, etc. 1299 switch (VA.getLocInfo()) { 1300 case CCValAssign::Full: break; 1301 default: 1302 // TODO: Handle arg promotion. 1303 return false; 1304 } 1305 1306 // Now copy/store arg to correct locations. 1307 // TODO: We need custom lowering for f64 args. 1308 if (VA.isRegLoc() && !VA.needsCustom()) { 1309 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1310 VA.getLocReg()) 1311 .addReg(Arg); 1312 RegArgs.push_back(VA.getLocReg()); 1313 } else { 1314 // Need to store 1315 return false; 1316 } 1317 } 1318 1319 return true; 1320} 1321 1322bool ARMFastISel::FinishCall(EVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 1323 const Instruction *I, CallingConv::ID CC, 1324 unsigned &NumBytes) { 1325 // Issue CALLSEQ_END 1326 unsigned AdjStackUp = TM.getRegisterInfo()->getCallFrameDestroyOpcode(); 1327 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1328 TII.get(AdjStackUp)) 1329 .addImm(NumBytes).addImm(0)); 1330 1331 // Now the return value. 1332 if (RetVT.getSimpleVT().SimpleTy != MVT::isVoid) { 1333 SmallVector<CCValAssign, 16> RVLocs; 1334 CCState CCInfo(CC, false, TM, RVLocs, *Context); 1335 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true)); 1336 1337 // Copy all of the result registers out of their specified physreg. 1338 if (RVLocs.size() == 2 && RetVT.getSimpleVT().SimpleTy == MVT::f64) { 1339 // For this move we copy into two registers and then move into the 1340 // double fp reg we want. 1341 // TODO: Are the copies necessary? 1342 TargetRegisterClass *CopyRC = TLI.getRegClassFor(MVT::i32); 1343 unsigned Copy1 = createResultReg(CopyRC); 1344 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1345 Copy1).addReg(RVLocs[0].getLocReg()); 1346 UsedRegs.push_back(RVLocs[0].getLocReg()); 1347 1348 unsigned Copy2 = createResultReg(CopyRC); 1349 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1350 Copy2).addReg(RVLocs[1].getLocReg()); 1351 UsedRegs.push_back(RVLocs[1].getLocReg()); 1352 1353 EVT DestVT = RVLocs[0].getValVT(); 1354 TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT); 1355 unsigned ResultReg = createResultReg(DstRC); 1356 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1357 TII.get(ARM::VMOVDRR), ResultReg) 1358 .addReg(Copy1).addReg(Copy2)); 1359 1360 // Finally update the result. 1361 UpdateValueMap(I, ResultReg); 1362 } else { 1363 assert(RVLocs.size() == 1 &&"Can't handle non-double multi-reg retvals!"); 1364 EVT CopyVT = RVLocs[0].getValVT(); 1365 TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT); 1366 1367 unsigned ResultReg = createResultReg(DstRC); 1368 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1369 ResultReg).addReg(RVLocs[0].getLocReg()); 1370 UsedRegs.push_back(RVLocs[0].getLocReg()); 1371 1372 // Finally update the result. 1373 UpdateValueMap(I, ResultReg); 1374 } 1375 } 1376 1377 return true; 1378} 1379 1380// A quick function that will emit a call for a named libcall in F with the 1381// vector of passed arguments for the Instruction in I. We can assume that we 1382// can emit a call for any libcall we can produce. This is an abridged version 1383// of the full call infrastructure since we won't need to worry about things 1384// like computed function pointers or strange arguments at call sites. 1385// TODO: Try to unify this and the normal call bits for ARM, then try to unify 1386// with X86. 1387bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) { 1388 CallingConv::ID CC = TLI.getLibcallCallingConv(Call); 1389 1390 // Handle *simple* calls for now. 1391 const Type *RetTy = I->getType(); 1392 EVT RetVT; 1393 if (RetTy->isVoidTy()) 1394 RetVT = MVT::isVoid; 1395 else if (!isTypeLegal(RetTy, RetVT)) 1396 return false; 1397 1398 // For now we're using BLX etc on the assumption that we have v5t ops. 1399 if (!Subtarget->hasV5TOps()) return false; 1400 1401 // Set up the argument vectors. 1402 SmallVector<Value*, 8> Args; 1403 SmallVector<unsigned, 8> ArgRegs; 1404 SmallVector<EVT, 8> ArgVTs; 1405 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 1406 Args.reserve(I->getNumOperands()); 1407 ArgRegs.reserve(I->getNumOperands()); 1408 ArgVTs.reserve(I->getNumOperands()); 1409 ArgFlags.reserve(I->getNumOperands()); 1410 for (unsigned i = 0; i < I->getNumOperands(); ++i) { 1411 Value *Op = I->getOperand(i); 1412 unsigned Arg = getRegForValue(Op); 1413 if (Arg == 0) return false; 1414 1415 const Type *ArgTy = Op->getType(); 1416 EVT ArgVT; 1417 if (!isTypeLegal(ArgTy, ArgVT)) return false; 1418 1419 ISD::ArgFlagsTy Flags; 1420 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 1421 Flags.setOrigAlign(OriginalAlignment); 1422 1423 Args.push_back(Op); 1424 ArgRegs.push_back(Arg); 1425 ArgVTs.push_back(ArgVT); 1426 ArgFlags.push_back(Flags); 1427 } 1428 1429 // Handle the arguments now that we've gotten them. 1430 SmallVector<unsigned, 4> RegArgs; 1431 unsigned NumBytes; 1432 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes)) 1433 return false; 1434 1435 // Issue the call, BLXr9 for darwin, BLX otherwise. This uses V5 ops. 1436 // TODO: Turn this into the table of arm call ops. 1437 MachineInstrBuilder MIB; 1438 unsigned CallOpc; 1439 if(isThumb) 1440 CallOpc = Subtarget->isTargetDarwin() ? ARM::tBLXi_r9 : ARM::tBLXi; 1441 else 1442 CallOpc = Subtarget->isTargetDarwin() ? ARM::BLr9 : ARM::BL; 1443 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CallOpc)) 1444 .addExternalSymbol(TLI.getLibcallName(Call)); 1445 1446 // Add implicit physical register uses to the call. 1447 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 1448 MIB.addReg(RegArgs[i]); 1449 1450 // Finish off the call including any return values. 1451 SmallVector<unsigned, 4> UsedRegs; 1452 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes)) return false; 1453 1454 // Set all unused physreg defs as dead. 1455 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 1456 1457 return true; 1458} 1459 1460bool ARMFastISel::SelectCall(const Instruction *I) { 1461 const CallInst *CI = cast<CallInst>(I); 1462 const Value *Callee = CI->getCalledValue(); 1463 1464 // Can't handle inline asm or worry about intrinsics yet. 1465 if (isa<InlineAsm>(Callee) || isa<IntrinsicInst>(CI)) return false; 1466 1467 // Only handle global variable Callees that are direct calls. 1468 const GlobalValue *GV = dyn_cast<GlobalValue>(Callee); 1469 if (!GV || Subtarget->GVIsIndirectSymbol(GV, TM.getRelocationModel())) 1470 return false; 1471 1472 // Check the calling convention. 1473 ImmutableCallSite CS(CI); 1474 CallingConv::ID CC = CS.getCallingConv(); 1475 // TODO: Avoid some calling conventions? 1476 if (CC != CallingConv::C) { 1477 // errs() << "Can't handle calling convention: " << CC << "\n"; 1478 return false; 1479 } 1480 1481 // Let SDISel handle vararg functions. 1482 const PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType()); 1483 const FunctionType *FTy = cast<FunctionType>(PT->getElementType()); 1484 if (FTy->isVarArg()) 1485 return false; 1486 1487 // Handle *simple* calls for now. 1488 const Type *RetTy = I->getType(); 1489 EVT RetVT; 1490 if (RetTy->isVoidTy()) 1491 RetVT = MVT::isVoid; 1492 else if (!isTypeLegal(RetTy, RetVT)) 1493 return false; 1494 1495 // For now we're using BLX etc on the assumption that we have v5t ops. 1496 // TODO: Maybe? 1497 if (!Subtarget->hasV5TOps()) return false; 1498 1499 // Set up the argument vectors. 1500 SmallVector<Value*, 8> Args; 1501 SmallVector<unsigned, 8> ArgRegs; 1502 SmallVector<EVT, 8> ArgVTs; 1503 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 1504 Args.reserve(CS.arg_size()); 1505 ArgRegs.reserve(CS.arg_size()); 1506 ArgVTs.reserve(CS.arg_size()); 1507 ArgFlags.reserve(CS.arg_size()); 1508 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end(); 1509 i != e; ++i) { 1510 unsigned Arg = getRegForValue(*i); 1511 1512 if (Arg == 0) 1513 return false; 1514 ISD::ArgFlagsTy Flags; 1515 unsigned AttrInd = i - CS.arg_begin() + 1; 1516 if (CS.paramHasAttr(AttrInd, Attribute::SExt)) 1517 Flags.setSExt(); 1518 if (CS.paramHasAttr(AttrInd, Attribute::ZExt)) 1519 Flags.setZExt(); 1520 1521 // FIXME: Only handle *easy* calls for now. 1522 if (CS.paramHasAttr(AttrInd, Attribute::InReg) || 1523 CS.paramHasAttr(AttrInd, Attribute::StructRet) || 1524 CS.paramHasAttr(AttrInd, Attribute::Nest) || 1525 CS.paramHasAttr(AttrInd, Attribute::ByVal)) 1526 return false; 1527 1528 const Type *ArgTy = (*i)->getType(); 1529 EVT ArgVT; 1530 if (!isTypeLegal(ArgTy, ArgVT)) 1531 return false; 1532 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 1533 Flags.setOrigAlign(OriginalAlignment); 1534 1535 Args.push_back(*i); 1536 ArgRegs.push_back(Arg); 1537 ArgVTs.push_back(ArgVT); 1538 ArgFlags.push_back(Flags); 1539 } 1540 1541 // Handle the arguments now that we've gotten them. 1542 SmallVector<unsigned, 4> RegArgs; 1543 unsigned NumBytes; 1544 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes)) 1545 return false; 1546 1547 // Issue the call, BLXr9 for darwin, BLX otherwise. This uses V5 ops. 1548 // TODO: Turn this into the table of arm call ops. 1549 MachineInstrBuilder MIB; 1550 unsigned CallOpc; 1551 if(isThumb) 1552 CallOpc = Subtarget->isTargetDarwin() ? ARM::tBLXi_r9 : ARM::tBLXi; 1553 else 1554 CallOpc = Subtarget->isTargetDarwin() ? ARM::BLr9 : ARM::BL; 1555 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CallOpc)) 1556 .addGlobalAddress(GV, 0, 0); 1557 1558 // Add implicit physical register uses to the call. 1559 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 1560 MIB.addReg(RegArgs[i]); 1561 1562 // Finish off the call including any return values. 1563 SmallVector<unsigned, 4> UsedRegs; 1564 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes)) return false; 1565 1566 // Set all unused physreg defs as dead. 1567 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 1568 1569 return true; 1570 1571} 1572 1573// TODO: SoftFP support. 1574bool ARMFastISel::TargetSelectInstruction(const Instruction *I) { 1575 // No Thumb-1 for now. 1576 if (isThumb && !AFI->isThumb2Function()) return false; 1577 1578 switch (I->getOpcode()) { 1579 case Instruction::Load: 1580 return SelectLoad(I); 1581 case Instruction::Store: 1582 return SelectStore(I); 1583 case Instruction::Br: 1584 return SelectBranch(I); 1585 case Instruction::ICmp: 1586 case Instruction::FCmp: 1587 return SelectCmp(I); 1588 case Instruction::FPExt: 1589 return SelectFPExt(I); 1590 case Instruction::FPTrunc: 1591 return SelectFPTrunc(I); 1592 case Instruction::SIToFP: 1593 return SelectSIToFP(I); 1594 case Instruction::FPToSI: 1595 return SelectFPToSI(I); 1596 case Instruction::FAdd: 1597 return SelectBinaryOp(I, ISD::FADD); 1598 case Instruction::FSub: 1599 return SelectBinaryOp(I, ISD::FSUB); 1600 case Instruction::FMul: 1601 return SelectBinaryOp(I, ISD::FMUL); 1602 case Instruction::SDiv: 1603 return SelectSDiv(I); 1604 case Instruction::SRem: 1605 return SelectSRem(I); 1606 case Instruction::Call: 1607 return SelectCall(I); 1608 case Instruction::Select: 1609 return SelectSelect(I); 1610 default: break; 1611 } 1612 return false; 1613} 1614 1615namespace llvm { 1616 llvm::FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo) { 1617 // Completely untested on non-darwin. 1618 const TargetMachine &TM = funcInfo.MF->getTarget(); 1619 const ARMSubtarget *Subtarget = &TM.getSubtarget<ARMSubtarget>(); 1620 if (Subtarget->isTargetDarwin() && EnableARMFastISel) 1621 return new ARMFastISel(funcInfo); 1622 return 0; 1623 } 1624} 1625