ARMFastISel.cpp revision 61d69da051f5e45ebca4b78f3cec21370de25061
1//===-- ARMFastISel.cpp - ARM FastISel implementation ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the ARM-specific support for the FastISel class. Some 11// of the target-specific code is generated by tablegen in the file 12// ARMGenFastISel.inc, which is #included here. 13// 14//===----------------------------------------------------------------------===// 15 16#include "ARM.h" 17#include "ARMBaseInstrInfo.h" 18#include "ARMCallingConv.h" 19#include "ARMRegisterInfo.h" 20#include "ARMTargetMachine.h" 21#include "ARMSubtarget.h" 22#include "ARMConstantPoolValue.h" 23#include "llvm/CallingConv.h" 24#include "llvm/DerivedTypes.h" 25#include "llvm/GlobalVariable.h" 26#include "llvm/Instructions.h" 27#include "llvm/IntrinsicInst.h" 28#include "llvm/Module.h" 29#include "llvm/CodeGen/Analysis.h" 30#include "llvm/CodeGen/FastISel.h" 31#include "llvm/CodeGen/FunctionLoweringInfo.h" 32#include "llvm/CodeGen/MachineInstrBuilder.h" 33#include "llvm/CodeGen/MachineModuleInfo.h" 34#include "llvm/CodeGen/MachineConstantPool.h" 35#include "llvm/CodeGen/MachineFrameInfo.h" 36#include "llvm/CodeGen/MachineMemOperand.h" 37#include "llvm/CodeGen/MachineRegisterInfo.h" 38#include "llvm/CodeGen/PseudoSourceValue.h" 39#include "llvm/Support/CallSite.h" 40#include "llvm/Support/CommandLine.h" 41#include "llvm/Support/ErrorHandling.h" 42#include "llvm/Support/GetElementPtrTypeIterator.h" 43#include "llvm/Target/TargetData.h" 44#include "llvm/Target/TargetInstrInfo.h" 45#include "llvm/Target/TargetLowering.h" 46#include "llvm/Target/TargetMachine.h" 47#include "llvm/Target/TargetOptions.h" 48using namespace llvm; 49 50static cl::opt<bool> 51DisableARMFastISel("disable-arm-fast-isel", 52 cl::desc("Turn off experimental ARM fast-isel support"), 53 cl::init(false), cl::Hidden); 54 55namespace { 56 57class ARMFastISel : public FastISel { 58 59 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can 60 /// make the right decision when generating code for different targets. 61 const ARMSubtarget *Subtarget; 62 const TargetMachine &TM; 63 const TargetInstrInfo &TII; 64 const TargetLowering &TLI; 65 ARMFunctionInfo *AFI; 66 67 // Convenience variables to avoid some queries. 68 bool isThumb; 69 LLVMContext *Context; 70 71 public: 72 explicit ARMFastISel(FunctionLoweringInfo &funcInfo) 73 : FastISel(funcInfo), 74 TM(funcInfo.MF->getTarget()), 75 TII(*TM.getInstrInfo()), 76 TLI(*TM.getTargetLowering()) { 77 Subtarget = &TM.getSubtarget<ARMSubtarget>(); 78 AFI = funcInfo.MF->getInfo<ARMFunctionInfo>(); 79 isThumb = AFI->isThumbFunction(); 80 Context = &funcInfo.Fn->getContext(); 81 } 82 83 // Code from FastISel.cpp. 84 virtual unsigned FastEmitInst_(unsigned MachineInstOpcode, 85 const TargetRegisterClass *RC); 86 virtual unsigned FastEmitInst_r(unsigned MachineInstOpcode, 87 const TargetRegisterClass *RC, 88 unsigned Op0, bool Op0IsKill); 89 virtual unsigned FastEmitInst_rr(unsigned MachineInstOpcode, 90 const TargetRegisterClass *RC, 91 unsigned Op0, bool Op0IsKill, 92 unsigned Op1, bool Op1IsKill); 93 virtual unsigned FastEmitInst_ri(unsigned MachineInstOpcode, 94 const TargetRegisterClass *RC, 95 unsigned Op0, bool Op0IsKill, 96 uint64_t Imm); 97 virtual unsigned FastEmitInst_rf(unsigned MachineInstOpcode, 98 const TargetRegisterClass *RC, 99 unsigned Op0, bool Op0IsKill, 100 const ConstantFP *FPImm); 101 virtual unsigned FastEmitInst_i(unsigned MachineInstOpcode, 102 const TargetRegisterClass *RC, 103 uint64_t Imm); 104 virtual unsigned FastEmitInst_rri(unsigned MachineInstOpcode, 105 const TargetRegisterClass *RC, 106 unsigned Op0, bool Op0IsKill, 107 unsigned Op1, bool Op1IsKill, 108 uint64_t Imm); 109 virtual unsigned FastEmitInst_extractsubreg(MVT RetVT, 110 unsigned Op0, bool Op0IsKill, 111 uint32_t Idx); 112 113 // Backend specific FastISel code. 114 virtual bool TargetSelectInstruction(const Instruction *I); 115 virtual unsigned TargetMaterializeConstant(const Constant *C); 116 virtual unsigned TargetMaterializeAlloca(const AllocaInst *AI); 117 118 #include "ARMGenFastISel.inc" 119 120 // Instruction selection routines. 121 private: 122 bool SelectLoad(const Instruction *I); 123 bool SelectStore(const Instruction *I); 124 bool SelectBranch(const Instruction *I); 125 bool SelectCmp(const Instruction *I); 126 bool SelectFPExt(const Instruction *I); 127 bool SelectFPTrunc(const Instruction *I); 128 bool SelectBinaryOp(const Instruction *I, unsigned ISDOpcode); 129 bool SelectSIToFP(const Instruction *I); 130 bool SelectFPToSI(const Instruction *I); 131 bool SelectSDiv(const Instruction *I); 132 bool SelectSRem(const Instruction *I); 133 bool SelectCall(const Instruction *I); 134 bool SelectSelect(const Instruction *I); 135 bool SelectRet(const Instruction *I); 136 137 // Utility routines. 138 private: 139 bool isTypeLegal(const Type *Ty, EVT &VT); 140 bool isLoadTypeLegal(const Type *Ty, EVT &VT); 141 bool ARMEmitLoad(EVT VT, unsigned &ResultReg, unsigned Base, int Offset); 142 bool ARMEmitStore(EVT VT, unsigned SrcReg, unsigned Base, int Offset); 143 bool ARMComputeRegOffset(const Value *Obj, unsigned &Base, int &Offset); 144 void ARMSimplifyRegOffset(unsigned &Base, int &Offset, EVT VT); 145 unsigned ARMMaterializeFP(const ConstantFP *CFP, EVT VT); 146 unsigned ARMMaterializeInt(const Constant *C, EVT VT); 147 unsigned ARMMaterializeGV(const GlobalValue *GV, EVT VT); 148 unsigned ARMMoveToFPReg(EVT VT, unsigned SrcReg); 149 unsigned ARMMoveToIntReg(EVT VT, unsigned SrcReg); 150 151 // Call handling routines. 152 private: 153 bool FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, EVT SrcVT, 154 unsigned &ResultReg); 155 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool Return); 156 bool ProcessCallArgs(SmallVectorImpl<Value*> &Args, 157 SmallVectorImpl<unsigned> &ArgRegs, 158 SmallVectorImpl<EVT> &ArgVTs, 159 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 160 SmallVectorImpl<unsigned> &RegArgs, 161 CallingConv::ID CC, 162 unsigned &NumBytes); 163 bool FinishCall(EVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 164 const Instruction *I, CallingConv::ID CC, 165 unsigned &NumBytes); 166 bool ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call); 167 168 // OptionalDef handling routines. 169 private: 170 bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR); 171 const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB); 172}; 173 174} // end anonymous namespace 175 176#include "ARMGenCallingConv.inc" 177 178// DefinesOptionalPredicate - This is different from DefinesPredicate in that 179// we don't care about implicit defs here, just places we'll need to add a 180// default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR. 181bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) { 182 const TargetInstrDesc &TID = MI->getDesc(); 183 if (!TID.hasOptionalDef()) 184 return false; 185 186 // Look to see if our OptionalDef is defining CPSR or CCR. 187 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 188 const MachineOperand &MO = MI->getOperand(i); 189 if (!MO.isReg() || !MO.isDef()) continue; 190 if (MO.getReg() == ARM::CPSR) 191 *CPSR = true; 192 } 193 return true; 194} 195 196// If the machine is predicable go ahead and add the predicate operands, if 197// it needs default CC operands add those. 198// TODO: If we want to support thumb1 then we'll need to deal with optional 199// CPSR defs that need to be added before the remaining operands. See s_cc_out 200// for descriptions why. 201const MachineInstrBuilder & 202ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) { 203 MachineInstr *MI = &*MIB; 204 205 // Do we use a predicate? 206 if (TII.isPredicable(MI)) 207 AddDefaultPred(MIB); 208 209 // Do we optionally set a predicate? Preds is size > 0 iff the predicate 210 // defines CPSR. All other OptionalDefines in ARM are the CCR register. 211 bool CPSR = false; 212 if (DefinesOptionalPredicate(MI, &CPSR)) { 213 if (CPSR) 214 AddDefaultT1CC(MIB); 215 else 216 AddDefaultCC(MIB); 217 } 218 return MIB; 219} 220 221unsigned ARMFastISel::FastEmitInst_(unsigned MachineInstOpcode, 222 const TargetRegisterClass* RC) { 223 unsigned ResultReg = createResultReg(RC); 224 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 225 226 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)); 227 return ResultReg; 228} 229 230unsigned ARMFastISel::FastEmitInst_r(unsigned MachineInstOpcode, 231 const TargetRegisterClass *RC, 232 unsigned Op0, bool Op0IsKill) { 233 unsigned ResultReg = createResultReg(RC); 234 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 235 236 if (II.getNumDefs() >= 1) 237 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 238 .addReg(Op0, Op0IsKill * RegState::Kill)); 239 else { 240 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 241 .addReg(Op0, Op0IsKill * RegState::Kill)); 242 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 243 TII.get(TargetOpcode::COPY), ResultReg) 244 .addReg(II.ImplicitDefs[0])); 245 } 246 return ResultReg; 247} 248 249unsigned ARMFastISel::FastEmitInst_rr(unsigned MachineInstOpcode, 250 const TargetRegisterClass *RC, 251 unsigned Op0, bool Op0IsKill, 252 unsigned Op1, bool Op1IsKill) { 253 unsigned ResultReg = createResultReg(RC); 254 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 255 256 if (II.getNumDefs() >= 1) 257 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 258 .addReg(Op0, Op0IsKill * RegState::Kill) 259 .addReg(Op1, Op1IsKill * RegState::Kill)); 260 else { 261 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 262 .addReg(Op0, Op0IsKill * RegState::Kill) 263 .addReg(Op1, Op1IsKill * RegState::Kill)); 264 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 265 TII.get(TargetOpcode::COPY), ResultReg) 266 .addReg(II.ImplicitDefs[0])); 267 } 268 return ResultReg; 269} 270 271unsigned ARMFastISel::FastEmitInst_ri(unsigned MachineInstOpcode, 272 const TargetRegisterClass *RC, 273 unsigned Op0, bool Op0IsKill, 274 uint64_t Imm) { 275 unsigned ResultReg = createResultReg(RC); 276 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 277 278 if (II.getNumDefs() >= 1) 279 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 280 .addReg(Op0, Op0IsKill * RegState::Kill) 281 .addImm(Imm)); 282 else { 283 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 284 .addReg(Op0, Op0IsKill * RegState::Kill) 285 .addImm(Imm)); 286 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 287 TII.get(TargetOpcode::COPY), ResultReg) 288 .addReg(II.ImplicitDefs[0])); 289 } 290 return ResultReg; 291} 292 293unsigned ARMFastISel::FastEmitInst_rf(unsigned MachineInstOpcode, 294 const TargetRegisterClass *RC, 295 unsigned Op0, bool Op0IsKill, 296 const ConstantFP *FPImm) { 297 unsigned ResultReg = createResultReg(RC); 298 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 299 300 if (II.getNumDefs() >= 1) 301 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 302 .addReg(Op0, Op0IsKill * RegState::Kill) 303 .addFPImm(FPImm)); 304 else { 305 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 306 .addReg(Op0, Op0IsKill * RegState::Kill) 307 .addFPImm(FPImm)); 308 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 309 TII.get(TargetOpcode::COPY), ResultReg) 310 .addReg(II.ImplicitDefs[0])); 311 } 312 return ResultReg; 313} 314 315unsigned ARMFastISel::FastEmitInst_rri(unsigned MachineInstOpcode, 316 const TargetRegisterClass *RC, 317 unsigned Op0, bool Op0IsKill, 318 unsigned Op1, bool Op1IsKill, 319 uint64_t Imm) { 320 unsigned ResultReg = createResultReg(RC); 321 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 322 323 if (II.getNumDefs() >= 1) 324 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 325 .addReg(Op0, Op0IsKill * RegState::Kill) 326 .addReg(Op1, Op1IsKill * RegState::Kill) 327 .addImm(Imm)); 328 else { 329 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 330 .addReg(Op0, Op0IsKill * RegState::Kill) 331 .addReg(Op1, Op1IsKill * RegState::Kill) 332 .addImm(Imm)); 333 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 334 TII.get(TargetOpcode::COPY), ResultReg) 335 .addReg(II.ImplicitDefs[0])); 336 } 337 return ResultReg; 338} 339 340unsigned ARMFastISel::FastEmitInst_i(unsigned MachineInstOpcode, 341 const TargetRegisterClass *RC, 342 uint64_t Imm) { 343 unsigned ResultReg = createResultReg(RC); 344 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 345 346 if (II.getNumDefs() >= 1) 347 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 348 .addImm(Imm)); 349 else { 350 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 351 .addImm(Imm)); 352 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 353 TII.get(TargetOpcode::COPY), ResultReg) 354 .addReg(II.ImplicitDefs[0])); 355 } 356 return ResultReg; 357} 358 359unsigned ARMFastISel::FastEmitInst_extractsubreg(MVT RetVT, 360 unsigned Op0, bool Op0IsKill, 361 uint32_t Idx) { 362 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT)); 363 assert(TargetRegisterInfo::isVirtualRegister(Op0) && 364 "Cannot yet extract from physregs"); 365 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 366 DL, TII.get(TargetOpcode::COPY), ResultReg) 367 .addReg(Op0, getKillRegState(Op0IsKill), Idx)); 368 return ResultReg; 369} 370 371// TODO: Don't worry about 64-bit now, but when this is fixed remove the 372// checks from the various callers. 373unsigned ARMFastISel::ARMMoveToFPReg(EVT VT, unsigned SrcReg) { 374 if (VT.getSimpleVT().SimpleTy == MVT::f64) return 0; 375 376 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 377 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 378 TII.get(ARM::VMOVRS), MoveReg) 379 .addReg(SrcReg)); 380 return MoveReg; 381} 382 383unsigned ARMFastISel::ARMMoveToIntReg(EVT VT, unsigned SrcReg) { 384 if (VT.getSimpleVT().SimpleTy == MVT::i64) return 0; 385 386 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 387 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 388 TII.get(ARM::VMOVSR), MoveReg) 389 .addReg(SrcReg)); 390 return MoveReg; 391} 392 393// For double width floating point we need to materialize two constants 394// (the high and the low) into integer registers then use a move to get 395// the combined constant into an FP reg. 396unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, EVT VT) { 397 const APFloat Val = CFP->getValueAPF(); 398 bool is64bit = VT.getSimpleVT().SimpleTy == MVT::f64; 399 400 // This checks to see if we can use VFP3 instructions to materialize 401 // a constant, otherwise we have to go through the constant pool. 402 if (TLI.isFPImmLegal(Val, VT)) { 403 unsigned Opc = is64bit ? ARM::FCONSTD : ARM::FCONSTS; 404 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 405 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 406 DestReg) 407 .addFPImm(CFP)); 408 return DestReg; 409 } 410 411 // Require VFP2 for loading fp constants. 412 if (!Subtarget->hasVFP2()) return false; 413 414 // MachineConstantPool wants an explicit alignment. 415 unsigned Align = TD.getPrefTypeAlignment(CFP->getType()); 416 if (Align == 0) { 417 // TODO: Figure out if this is correct. 418 Align = TD.getTypeAllocSize(CFP->getType()); 419 } 420 unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align); 421 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 422 unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS; 423 424 // The extra reg is for addrmode5. 425 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 426 DestReg) 427 .addConstantPoolIndex(Idx) 428 .addReg(0)); 429 return DestReg; 430} 431 432unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, EVT VT) { 433 434 // For now 32-bit only. 435 if (VT.getSimpleVT().SimpleTy != MVT::i32) return false; 436 437 // MachineConstantPool wants an explicit alignment. 438 unsigned Align = TD.getPrefTypeAlignment(C->getType()); 439 if (Align == 0) { 440 // TODO: Figure out if this is correct. 441 Align = TD.getTypeAllocSize(C->getType()); 442 } 443 unsigned Idx = MCP.getConstantPoolIndex(C, Align); 444 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 445 446 if (isThumb) 447 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 448 TII.get(ARM::t2LDRpci), DestReg) 449 .addConstantPoolIndex(Idx)); 450 else 451 // The extra reg and immediate are for addrmode2. 452 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 453 TII.get(ARM::LDRcp), DestReg) 454 .addConstantPoolIndex(Idx) 455 .addImm(0)); 456 457 return DestReg; 458} 459 460unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, EVT VT) { 461 // For now 32-bit only. 462 if (VT.getSimpleVT().SimpleTy != MVT::i32) return 0; 463 464 Reloc::Model RelocM = TM.getRelocationModel(); 465 466 // TODO: No external globals for now. 467 if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) return 0; 468 469 // TODO: Need more magic for ARM PIC. 470 if (!isThumb && (RelocM == Reloc::PIC_)) return 0; 471 472 // MachineConstantPool wants an explicit alignment. 473 unsigned Align = TD.getPrefTypeAlignment(GV->getType()); 474 if (Align == 0) { 475 // TODO: Figure out if this is correct. 476 Align = TD.getTypeAllocSize(GV->getType()); 477 } 478 479 // Grab index. 480 unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : (Subtarget->isThumb() ? 4 : 8); 481 unsigned Id = AFI->createConstPoolEntryUId(); 482 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, Id, 483 ARMCP::CPValue, PCAdj); 484 unsigned Idx = MCP.getConstantPoolIndex(CPV, Align); 485 486 // Load value. 487 MachineInstrBuilder MIB; 488 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 489 if (isThumb) { 490 unsigned Opc = (RelocM != Reloc::PIC_) ? ARM::t2LDRpci : ARM::t2LDRpci_pic; 491 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg) 492 .addConstantPoolIndex(Idx); 493 if (RelocM == Reloc::PIC_) 494 MIB.addImm(Id); 495 } else { 496 // The extra reg and immediate are for addrmode2. 497 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRcp), 498 DestReg) 499 .addConstantPoolIndex(Idx) 500 .addReg(0).addImm(0); 501 } 502 AddOptionalDefs(MIB); 503 return DestReg; 504} 505 506unsigned ARMFastISel::TargetMaterializeConstant(const Constant *C) { 507 EVT VT = TLI.getValueType(C->getType(), true); 508 509 // Only handle simple types. 510 if (!VT.isSimple()) return 0; 511 512 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) 513 return ARMMaterializeFP(CFP, VT); 514 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C)) 515 return ARMMaterializeGV(GV, VT); 516 else if (isa<ConstantInt>(C)) 517 return ARMMaterializeInt(C, VT); 518 519 return 0; 520} 521 522unsigned ARMFastISel::TargetMaterializeAlloca(const AllocaInst *AI) { 523 // Don't handle dynamic allocas. 524 if (!FuncInfo.StaticAllocaMap.count(AI)) return 0; 525 526 EVT VT; 527 if (!isLoadTypeLegal(AI->getType(), VT)) return false; 528 529 DenseMap<const AllocaInst*, int>::iterator SI = 530 FuncInfo.StaticAllocaMap.find(AI); 531 532 // This will get lowered later into the correct offsets and registers 533 // via rewriteXFrameIndex. 534 if (SI != FuncInfo.StaticAllocaMap.end()) { 535 TargetRegisterClass* RC = TLI.getRegClassFor(VT); 536 unsigned ResultReg = createResultReg(RC); 537 unsigned Opc = isThumb ? ARM::t2ADDri : ARM::ADDri; 538 AddOptionalDefs(BuildMI(*FuncInfo.MBB, *FuncInfo.InsertPt, DL, 539 TII.get(Opc), ResultReg) 540 .addFrameIndex(SI->second) 541 .addImm(0)); 542 return ResultReg; 543 } 544 545 return 0; 546} 547 548bool ARMFastISel::isTypeLegal(const Type *Ty, EVT &VT) { 549 VT = TLI.getValueType(Ty, true); 550 551 // Only handle simple types. 552 if (VT == MVT::Other || !VT.isSimple()) return false; 553 554 // Handle all legal types, i.e. a register that will directly hold this 555 // value. 556 return TLI.isTypeLegal(VT); 557} 558 559bool ARMFastISel::isLoadTypeLegal(const Type *Ty, EVT &VT) { 560 if (isTypeLegal(Ty, VT)) return true; 561 562 // If this is a type than can be sign or zero-extended to a basic operation 563 // go ahead and accept it now. 564 if (VT == MVT::i8 || VT == MVT::i16) 565 return true; 566 567 return false; 568} 569 570// Computes the Reg+Offset to get to an object. 571bool ARMFastISel::ARMComputeRegOffset(const Value *Obj, unsigned &Base, 572 int &Offset) { 573 // Some boilerplate from the X86 FastISel. 574 const User *U = NULL; 575 unsigned Opcode = Instruction::UserOp1; 576 if (const Instruction *I = dyn_cast<Instruction>(Obj)) { 577 // Don't walk into other basic blocks; it's possible we haven't 578 // visited them yet, so the instructions may not yet be assigned 579 // virtual registers. 580 if (FuncInfo.MBBMap[I->getParent()] != FuncInfo.MBB) 581 return false; 582 Opcode = I->getOpcode(); 583 U = I; 584 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) { 585 Opcode = C->getOpcode(); 586 U = C; 587 } 588 589 if (const PointerType *Ty = dyn_cast<PointerType>(Obj->getType())) 590 if (Ty->getAddressSpace() > 255) 591 // Fast instruction selection doesn't support the special 592 // address spaces. 593 return false; 594 595 switch (Opcode) { 596 default: 597 break; 598 case Instruction::BitCast: { 599 // Look through bitcasts. 600 return ARMComputeRegOffset(U->getOperand(0), Base, Offset); 601 } 602 case Instruction::IntToPtr: { 603 // Look past no-op inttoptrs. 604 if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy()) 605 return ARMComputeRegOffset(U->getOperand(0), Base, Offset); 606 break; 607 } 608 case Instruction::PtrToInt: { 609 // Look past no-op ptrtoints. 610 if (TLI.getValueType(U->getType()) == TLI.getPointerTy()) 611 return ARMComputeRegOffset(U->getOperand(0), Base, Offset); 612 break; 613 } 614 case Instruction::GetElementPtr: { 615 int SavedOffset = Offset; 616 unsigned SavedBase = Base; 617 int TmpOffset = Offset; 618 619 // Iterate through the GEP folding the constants into offsets where 620 // we can. 621 gep_type_iterator GTI = gep_type_begin(U); 622 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); 623 i != e; ++i, ++GTI) { 624 const Value *Op = *i; 625 if (const StructType *STy = dyn_cast<StructType>(*GTI)) { 626 const StructLayout *SL = TD.getStructLayout(STy); 627 unsigned Idx = cast<ConstantInt>(Op)->getZExtValue(); 628 TmpOffset += SL->getElementOffset(Idx); 629 } else { 630 uint64_t S = TD.getTypeAllocSize(GTI.getIndexedType()); 631 SmallVector<const Value *, 4> Worklist; 632 Worklist.push_back(Op); 633 do { 634 Op = Worklist.pop_back_val(); 635 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) { 636 // Constant-offset addressing. 637 TmpOffset += CI->getSExtValue() * S; 638 } else if (isa<AddOperator>(Op) && 639 isa<ConstantInt>(cast<AddOperator>(Op)->getOperand(1))) { 640 // An add with a constant operand. Fold the constant. 641 ConstantInt *CI = 642 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1)); 643 TmpOffset += CI->getSExtValue() * S; 644 // Add the other operand back to the work list. 645 Worklist.push_back(cast<AddOperator>(Op)->getOperand(0)); 646 } else 647 goto unsupported_gep; 648 } while (!Worklist.empty()); 649 } 650 } 651 652 // Try to grab the base operand now. 653 Offset = TmpOffset; 654 if (ARMComputeRegOffset(U->getOperand(0), Base, Offset)) return true; 655 656 // We failed, restore everything and try the other options. 657 Offset = SavedOffset; 658 Base = SavedBase; 659 660 unsupported_gep: 661 break; 662 } 663 case Instruction::Alloca: { 664 const AllocaInst *AI = cast<AllocaInst>(Obj); 665 unsigned Reg = TargetMaterializeAlloca(AI); 666 667 if (Reg == 0) return false; 668 669 Base = Reg; 670 return true; 671 } 672 } 673 674 // Materialize the global variable's address into a reg which can 675 // then be used later to load the variable. 676 if (const GlobalValue *GV = dyn_cast<GlobalValue>(Obj)) { 677 unsigned Tmp = ARMMaterializeGV(GV, TLI.getValueType(Obj->getType())); 678 if (Tmp == 0) return false; 679 680 Base = Tmp; 681 return true; 682 } 683 684 // Try to get this in a register if nothing else has worked. 685 if (Base == 0) Base = getRegForValue(Obj); 686 return Base != 0; 687} 688 689void ARMFastISel::ARMSimplifyRegOffset(unsigned &Base, int &Offset, EVT VT) { 690 691 assert(VT.isSimple() && "Non-simple types are invalid here!"); 692 693 bool needsLowering = false; 694 switch (VT.getSimpleVT().SimpleTy) { 695 default: 696 assert(false && "Unhandled load/store type!"); 697 case MVT::i1: 698 case MVT::i8: 699 case MVT::i16: 700 case MVT::i32: 701 // Integer loads/stores handle 12-bit offsets. 702 needsLowering = ((Offset & 0xfff) != Offset); 703 break; 704 case MVT::f32: 705 case MVT::f64: 706 // Floating point operands handle 8-bit offsets. 707 needsLowering = ((Offset & 0xff) != Offset); 708 break; 709 } 710 711 // Since the offset is too large for the load/store instruction 712 // get the reg+offset into a register. 713 if (needsLowering) { 714 ARMCC::CondCodes Pred = ARMCC::AL; 715 unsigned PredReg = 0; 716 717 TargetRegisterClass *RC = isThumb ? ARM::tGPRRegisterClass : 718 ARM::GPRRegisterClass; 719 unsigned BaseReg = createResultReg(RC); 720 721 if (!isThumb) 722 emitARMRegPlusImmediate(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 723 BaseReg, Base, Offset, Pred, PredReg, 724 static_cast<const ARMBaseInstrInfo&>(TII)); 725 else { 726 assert(AFI->isThumb2Function()); 727 emitT2RegPlusImmediate(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 728 BaseReg, Base, Offset, Pred, PredReg, 729 static_cast<const ARMBaseInstrInfo&>(TII)); 730 } 731 Offset = 0; 732 Base = BaseReg; 733 } 734} 735 736bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, 737 unsigned Base, int Offset) { 738 739 assert(VT.isSimple() && "Non-simple types are invalid here!"); 740 unsigned Opc; 741 TargetRegisterClass *RC; 742 bool isFloat = false; 743 switch (VT.getSimpleVT().SimpleTy) { 744 default: 745 // This is mostly going to be Neon/vector support. 746 return false; 747 case MVT::i16: 748 Opc = isThumb ? ARM::t2LDRHi12 : ARM::LDRH; 749 RC = ARM::GPRRegisterClass; 750 break; 751 case MVT::i8: 752 Opc = isThumb ? ARM::t2LDRBi12 : ARM::LDRBi12; 753 RC = ARM::GPRRegisterClass; 754 break; 755 case MVT::i32: 756 Opc = isThumb ? ARM::t2LDRi12 : ARM::LDRi12; 757 RC = ARM::GPRRegisterClass; 758 break; 759 case MVT::f32: 760 Opc = ARM::VLDRS; 761 RC = TLI.getRegClassFor(VT); 762 isFloat = true; 763 break; 764 case MVT::f64: 765 Opc = ARM::VLDRD; 766 RC = TLI.getRegClassFor(VT); 767 isFloat = true; 768 break; 769 } 770 771 ResultReg = createResultReg(RC); 772 773 ARMSimplifyRegOffset(Base, Offset, VT); 774 775 // addrmode5 output depends on the selection dag addressing dividing the 776 // offset by 4 that it then later multiplies. Do this here as well. 777 if (isFloat) 778 Offset /= 4; 779 780 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 781 TII.get(Opc), ResultReg) 782 .addReg(Base).addImm(Offset)); 783 return true; 784} 785 786bool ARMFastISel::SelectLoad(const Instruction *I) { 787 // Verify we have a legal type before going any further. 788 EVT VT; 789 if (!isLoadTypeLegal(I->getType(), VT)) 790 return false; 791 792 // Our register and offset with innocuous defaults. 793 unsigned Base = 0; 794 int Offset = 0; 795 796 // See if we can handle this as Reg + Offset 797 if (!ARMComputeRegOffset(I->getOperand(0), Base, Offset)) 798 return false; 799 800 unsigned ResultReg; 801 if (!ARMEmitLoad(VT, ResultReg, Base, Offset)) return false; 802 803 UpdateValueMap(I, ResultReg); 804 return true; 805} 806 807bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, 808 unsigned Base, int Offset) { 809 unsigned StrOpc; 810 bool isFloat = false; 811 bool needReg0Op = false; 812 switch (VT.getSimpleVT().SimpleTy) { 813 default: return false; 814 case MVT::i1: 815 case MVT::i8: 816 StrOpc = isThumb ? ARM::t2STRBi12 : ARM::STRBi12; 817 break; 818 case MVT::i16: 819 StrOpc = isThumb ? ARM::t2STRHi12 : ARM::STRH; 820 needReg0Op = true; 821 break; 822 case MVT::i32: 823 StrOpc = isThumb ? ARM::t2STRi12 : ARM::STRi12; 824 break; 825 case MVT::f32: 826 if (!Subtarget->hasVFP2()) return false; 827 StrOpc = ARM::VSTRS; 828 isFloat = true; 829 break; 830 case MVT::f64: 831 if (!Subtarget->hasVFP2()) return false; 832 StrOpc = ARM::VSTRD; 833 isFloat = true; 834 break; 835 } 836 837 ARMSimplifyRegOffset(Base, Offset, VT); 838 839 // addrmode5 output depends on the selection dag addressing dividing the 840 // offset by 4 that it then later multiplies. Do this here as well. 841 if (isFloat) 842 Offset /= 4; 843 844 845 // FIXME: The 'needReg0Op' bit goes away once STRH is converted to 846 // not use the mega-addrmode stuff. 847 if (!needReg0Op) 848 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 849 TII.get(StrOpc)) 850 .addReg(SrcReg).addReg(Base).addImm(Offset)); 851 else 852 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 853 TII.get(StrOpc)) 854 .addReg(SrcReg).addReg(Base).addReg(0).addImm(Offset)); 855 856 return true; 857} 858 859bool ARMFastISel::SelectStore(const Instruction *I) { 860 Value *Op0 = I->getOperand(0); 861 unsigned SrcReg = 0; 862 863 // Yay type legalization 864 EVT VT; 865 if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT)) 866 return false; 867 868 // Get the value to be stored into a register. 869 SrcReg = getRegForValue(Op0); 870 if (SrcReg == 0) 871 return false; 872 873 // Our register and offset with innocuous defaults. 874 unsigned Base = 0; 875 int Offset = 0; 876 877 // See if we can handle this as Reg + Offset 878 if (!ARMComputeRegOffset(I->getOperand(1), Base, Offset)) 879 return false; 880 881 if (!ARMEmitStore(VT, SrcReg, Base, Offset)) return false; 882 883 return true; 884} 885 886static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred) { 887 switch (Pred) { 888 // Needs two compares... 889 case CmpInst::FCMP_ONE: 890 case CmpInst::FCMP_UEQ: 891 default: 892 assert(false && "Unhandled CmpInst::Predicate!"); 893 return ARMCC::AL; 894 case CmpInst::ICMP_EQ: 895 case CmpInst::FCMP_OEQ: 896 return ARMCC::EQ; 897 case CmpInst::ICMP_SGT: 898 case CmpInst::FCMP_OGT: 899 return ARMCC::GT; 900 case CmpInst::ICMP_SGE: 901 case CmpInst::FCMP_OGE: 902 return ARMCC::GE; 903 case CmpInst::ICMP_UGT: 904 case CmpInst::FCMP_UGT: 905 return ARMCC::HI; 906 case CmpInst::FCMP_OLT: 907 return ARMCC::MI; 908 case CmpInst::ICMP_ULE: 909 case CmpInst::FCMP_OLE: 910 return ARMCC::LS; 911 case CmpInst::FCMP_ORD: 912 return ARMCC::VC; 913 case CmpInst::FCMP_UNO: 914 return ARMCC::VS; 915 case CmpInst::FCMP_UGE: 916 return ARMCC::PL; 917 case CmpInst::ICMP_SLT: 918 case CmpInst::FCMP_ULT: 919 return ARMCC::LT; 920 case CmpInst::ICMP_SLE: 921 case CmpInst::FCMP_ULE: 922 return ARMCC::LE; 923 case CmpInst::FCMP_UNE: 924 case CmpInst::ICMP_NE: 925 return ARMCC::NE; 926 case CmpInst::ICMP_UGE: 927 return ARMCC::HS; 928 case CmpInst::ICMP_ULT: 929 return ARMCC::LO; 930 } 931} 932 933bool ARMFastISel::SelectBranch(const Instruction *I) { 934 const BranchInst *BI = cast<BranchInst>(I); 935 MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)]; 936 MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)]; 937 938 // Simple branch support. 939 940 // If we can, avoid recomputing the compare - redoing it could lead to wonky 941 // behavior. 942 // TODO: Factor this out. 943 if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) { 944 if (CI->hasOneUse() && (CI->getParent() == I->getParent())) { 945 EVT VT; 946 const Type *Ty = CI->getOperand(0)->getType(); 947 if (!isTypeLegal(Ty, VT)) 948 return false; 949 950 bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy()); 951 if (isFloat && !Subtarget->hasVFP2()) 952 return false; 953 954 unsigned CmpOpc; 955 unsigned CondReg; 956 switch (VT.getSimpleVT().SimpleTy) { 957 default: return false; 958 // TODO: Verify compares. 959 case MVT::f32: 960 CmpOpc = ARM::VCMPES; 961 CondReg = ARM::FPSCR; 962 break; 963 case MVT::f64: 964 CmpOpc = ARM::VCMPED; 965 CondReg = ARM::FPSCR; 966 break; 967 case MVT::i32: 968 CmpOpc = isThumb ? ARM::t2CMPrr : ARM::CMPrr; 969 CondReg = ARM::CPSR; 970 break; 971 } 972 973 // Get the compare predicate. 974 ARMCC::CondCodes ARMPred = getComparePred(CI->getPredicate()); 975 976 // We may not handle every CC for now. 977 if (ARMPred == ARMCC::AL) return false; 978 979 unsigned Arg1 = getRegForValue(CI->getOperand(0)); 980 if (Arg1 == 0) return false; 981 982 unsigned Arg2 = getRegForValue(CI->getOperand(1)); 983 if (Arg2 == 0) return false; 984 985 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 986 TII.get(CmpOpc)) 987 .addReg(Arg1).addReg(Arg2)); 988 989 // For floating point we need to move the result to a comparison register 990 // that we can then use for branches. 991 if (isFloat) 992 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 993 TII.get(ARM::FMSTAT))); 994 995 unsigned BrOpc = isThumb ? ARM::t2Bcc : ARM::Bcc; 996 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 997 .addMBB(TBB).addImm(ARMPred).addReg(ARM::CPSR); 998 FastEmitBranch(FBB, DL); 999 FuncInfo.MBB->addSuccessor(TBB); 1000 return true; 1001 } 1002 } 1003 1004 unsigned CmpReg = getRegForValue(BI->getCondition()); 1005 if (CmpReg == 0) return false; 1006 1007 // Re-set the flags just in case. 1008 unsigned CmpOpc = isThumb ? ARM::t2CMPri : ARM::CMPri; 1009 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1010 .addReg(CmpReg).addImm(1)); 1011 1012 unsigned BrOpc = isThumb ? ARM::t2Bcc : ARM::Bcc; 1013 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1014 .addMBB(TBB).addImm(ARMCC::EQ).addReg(ARM::CPSR); 1015 FastEmitBranch(FBB, DL); 1016 FuncInfo.MBB->addSuccessor(TBB); 1017 return true; 1018} 1019 1020bool ARMFastISel::SelectCmp(const Instruction *I) { 1021 const CmpInst *CI = cast<CmpInst>(I); 1022 1023 EVT VT; 1024 const Type *Ty = CI->getOperand(0)->getType(); 1025 if (!isTypeLegal(Ty, VT)) 1026 return false; 1027 1028 bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy()); 1029 if (isFloat && !Subtarget->hasVFP2()) 1030 return false; 1031 1032 unsigned CmpOpc; 1033 unsigned CondReg; 1034 switch (VT.getSimpleVT().SimpleTy) { 1035 default: return false; 1036 // TODO: Verify compares. 1037 case MVT::f32: 1038 CmpOpc = ARM::VCMPES; 1039 CondReg = ARM::FPSCR; 1040 break; 1041 case MVT::f64: 1042 CmpOpc = ARM::VCMPED; 1043 CondReg = ARM::FPSCR; 1044 break; 1045 case MVT::i32: 1046 CmpOpc = isThumb ? ARM::t2CMPrr : ARM::CMPrr; 1047 CondReg = ARM::CPSR; 1048 break; 1049 } 1050 1051 // Get the compare predicate. 1052 ARMCC::CondCodes ARMPred = getComparePred(CI->getPredicate()); 1053 1054 // We may not handle every CC for now. 1055 if (ARMPred == ARMCC::AL) return false; 1056 1057 unsigned Arg1 = getRegForValue(CI->getOperand(0)); 1058 if (Arg1 == 0) return false; 1059 1060 unsigned Arg2 = getRegForValue(CI->getOperand(1)); 1061 if (Arg2 == 0) return false; 1062 1063 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1064 .addReg(Arg1).addReg(Arg2)); 1065 1066 // For floating point we need to move the result to a comparison register 1067 // that we can then use for branches. 1068 if (isFloat) 1069 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1070 TII.get(ARM::FMSTAT))); 1071 1072 // Now set a register based on the comparison. Explicitly set the predicates 1073 // here. 1074 unsigned MovCCOpc = isThumb ? ARM::t2MOVCCi : ARM::MOVCCi; 1075 TargetRegisterClass *RC = isThumb ? ARM::rGPRRegisterClass 1076 : ARM::GPRRegisterClass; 1077 unsigned DestReg = createResultReg(RC); 1078 Constant *Zero 1079 = ConstantInt::get(Type::getInt32Ty(*Context), 0); 1080 unsigned ZeroReg = TargetMaterializeConstant(Zero); 1081 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), DestReg) 1082 .addReg(ZeroReg).addImm(1) 1083 .addImm(ARMPred).addReg(CondReg); 1084 1085 UpdateValueMap(I, DestReg); 1086 return true; 1087} 1088 1089bool ARMFastISel::SelectFPExt(const Instruction *I) { 1090 // Make sure we have VFP and that we're extending float to double. 1091 if (!Subtarget->hasVFP2()) return false; 1092 1093 Value *V = I->getOperand(0); 1094 if (!I->getType()->isDoubleTy() || 1095 !V->getType()->isFloatTy()) return false; 1096 1097 unsigned Op = getRegForValue(V); 1098 if (Op == 0) return false; 1099 1100 unsigned Result = createResultReg(ARM::DPRRegisterClass); 1101 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1102 TII.get(ARM::VCVTDS), Result) 1103 .addReg(Op)); 1104 UpdateValueMap(I, Result); 1105 return true; 1106} 1107 1108bool ARMFastISel::SelectFPTrunc(const Instruction *I) { 1109 // Make sure we have VFP and that we're truncating double to float. 1110 if (!Subtarget->hasVFP2()) return false; 1111 1112 Value *V = I->getOperand(0); 1113 if (!(I->getType()->isFloatTy() && 1114 V->getType()->isDoubleTy())) return false; 1115 1116 unsigned Op = getRegForValue(V); 1117 if (Op == 0) return false; 1118 1119 unsigned Result = createResultReg(ARM::SPRRegisterClass); 1120 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1121 TII.get(ARM::VCVTSD), Result) 1122 .addReg(Op)); 1123 UpdateValueMap(I, Result); 1124 return true; 1125} 1126 1127bool ARMFastISel::SelectSIToFP(const Instruction *I) { 1128 // Make sure we have VFP. 1129 if (!Subtarget->hasVFP2()) return false; 1130 1131 EVT DstVT; 1132 const Type *Ty = I->getType(); 1133 if (!isTypeLegal(Ty, DstVT)) 1134 return false; 1135 1136 unsigned Op = getRegForValue(I->getOperand(0)); 1137 if (Op == 0) return false; 1138 1139 // The conversion routine works on fp-reg to fp-reg and the operand above 1140 // was an integer, move it to the fp registers if possible. 1141 unsigned FP = ARMMoveToFPReg(MVT::f32, Op); 1142 if (FP == 0) return false; 1143 1144 unsigned Opc; 1145 if (Ty->isFloatTy()) Opc = ARM::VSITOS; 1146 else if (Ty->isDoubleTy()) Opc = ARM::VSITOD; 1147 else return 0; 1148 1149 unsigned ResultReg = createResultReg(TLI.getRegClassFor(DstVT)); 1150 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1151 ResultReg) 1152 .addReg(FP)); 1153 UpdateValueMap(I, ResultReg); 1154 return true; 1155} 1156 1157bool ARMFastISel::SelectFPToSI(const Instruction *I) { 1158 // Make sure we have VFP. 1159 if (!Subtarget->hasVFP2()) return false; 1160 1161 EVT DstVT; 1162 const Type *RetTy = I->getType(); 1163 if (!isTypeLegal(RetTy, DstVT)) 1164 return false; 1165 1166 unsigned Op = getRegForValue(I->getOperand(0)); 1167 if (Op == 0) return false; 1168 1169 unsigned Opc; 1170 const Type *OpTy = I->getOperand(0)->getType(); 1171 if (OpTy->isFloatTy()) Opc = ARM::VTOSIZS; 1172 else if (OpTy->isDoubleTy()) Opc = ARM::VTOSIZD; 1173 else return 0; 1174 1175 // f64->s32 or f32->s32 both need an intermediate f32 reg. 1176 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32)); 1177 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1178 ResultReg) 1179 .addReg(Op)); 1180 1181 // This result needs to be in an integer register, but the conversion only 1182 // takes place in fp-regs. 1183 unsigned IntReg = ARMMoveToIntReg(DstVT, ResultReg); 1184 if (IntReg == 0) return false; 1185 1186 UpdateValueMap(I, IntReg); 1187 return true; 1188} 1189 1190bool ARMFastISel::SelectSelect(const Instruction *I) { 1191 EVT VT = TLI.getValueType(I->getType(), /*HandleUnknown=*/true); 1192 if (VT == MVT::Other || !isTypeLegal(I->getType(), VT)) 1193 return false; 1194 1195 // Things need to be register sized for register moves. 1196 if (VT.getSimpleVT().SimpleTy != MVT::i32) return false; 1197 const TargetRegisterClass *RC = TLI.getRegClassFor(VT); 1198 1199 unsigned CondReg = getRegForValue(I->getOperand(0)); 1200 if (CondReg == 0) return false; 1201 unsigned Op1Reg = getRegForValue(I->getOperand(1)); 1202 if (Op1Reg == 0) return false; 1203 unsigned Op2Reg = getRegForValue(I->getOperand(2)); 1204 if (Op2Reg == 0) return false; 1205 1206 unsigned CmpOpc = isThumb ? ARM::t2TSTri : ARM::TSTri; 1207 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1208 .addReg(CondReg).addImm(1)); 1209 unsigned ResultReg = createResultReg(RC); 1210 unsigned MovCCOpc = isThumb ? ARM::t2MOVCCr : ARM::MOVCCr; 1211 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg) 1212 .addReg(Op1Reg).addReg(Op2Reg) 1213 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 1214 UpdateValueMap(I, ResultReg); 1215 return true; 1216} 1217 1218bool ARMFastISel::SelectSDiv(const Instruction *I) { 1219 EVT VT; 1220 const Type *Ty = I->getType(); 1221 if (!isTypeLegal(Ty, VT)) 1222 return false; 1223 1224 // If we have integer div support we should have selected this automagically. 1225 // In case we have a real miss go ahead and return false and we'll pick 1226 // it up later. 1227 if (Subtarget->hasDivide()) return false; 1228 1229 // Otherwise emit a libcall. 1230 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1231 if (VT == MVT::i8) 1232 LC = RTLIB::SDIV_I8; 1233 else if (VT == MVT::i16) 1234 LC = RTLIB::SDIV_I16; 1235 else if (VT == MVT::i32) 1236 LC = RTLIB::SDIV_I32; 1237 else if (VT == MVT::i64) 1238 LC = RTLIB::SDIV_I64; 1239 else if (VT == MVT::i128) 1240 LC = RTLIB::SDIV_I128; 1241 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SDIV!"); 1242 1243 return ARMEmitLibcall(I, LC); 1244} 1245 1246bool ARMFastISel::SelectSRem(const Instruction *I) { 1247 EVT VT; 1248 const Type *Ty = I->getType(); 1249 if (!isTypeLegal(Ty, VT)) 1250 return false; 1251 1252 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1253 if (VT == MVT::i8) 1254 LC = RTLIB::SREM_I8; 1255 else if (VT == MVT::i16) 1256 LC = RTLIB::SREM_I16; 1257 else if (VT == MVT::i32) 1258 LC = RTLIB::SREM_I32; 1259 else if (VT == MVT::i64) 1260 LC = RTLIB::SREM_I64; 1261 else if (VT == MVT::i128) 1262 LC = RTLIB::SREM_I128; 1263 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SREM!"); 1264 1265 return ARMEmitLibcall(I, LC); 1266} 1267 1268bool ARMFastISel::SelectBinaryOp(const Instruction *I, unsigned ISDOpcode) { 1269 EVT VT = TLI.getValueType(I->getType(), true); 1270 1271 // We can get here in the case when we want to use NEON for our fp 1272 // operations, but can't figure out how to. Just use the vfp instructions 1273 // if we have them. 1274 // FIXME: It'd be nice to use NEON instructions. 1275 const Type *Ty = I->getType(); 1276 bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy()); 1277 if (isFloat && !Subtarget->hasVFP2()) 1278 return false; 1279 1280 unsigned Op1 = getRegForValue(I->getOperand(0)); 1281 if (Op1 == 0) return false; 1282 1283 unsigned Op2 = getRegForValue(I->getOperand(1)); 1284 if (Op2 == 0) return false; 1285 1286 unsigned Opc; 1287 bool is64bit = VT.getSimpleVT().SimpleTy == MVT::f64 || 1288 VT.getSimpleVT().SimpleTy == MVT::i64; 1289 switch (ISDOpcode) { 1290 default: return false; 1291 case ISD::FADD: 1292 Opc = is64bit ? ARM::VADDD : ARM::VADDS; 1293 break; 1294 case ISD::FSUB: 1295 Opc = is64bit ? ARM::VSUBD : ARM::VSUBS; 1296 break; 1297 case ISD::FMUL: 1298 Opc = is64bit ? ARM::VMULD : ARM::VMULS; 1299 break; 1300 } 1301 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT)); 1302 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1303 TII.get(Opc), ResultReg) 1304 .addReg(Op1).addReg(Op2)); 1305 UpdateValueMap(I, ResultReg); 1306 return true; 1307} 1308 1309// Call Handling Code 1310 1311bool ARMFastISel::FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, 1312 EVT SrcVT, unsigned &ResultReg) { 1313 unsigned RR = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc, 1314 Src, /*TODO: Kill=*/false); 1315 1316 if (RR != 0) { 1317 ResultReg = RR; 1318 return true; 1319 } else 1320 return false; 1321} 1322 1323// This is largely taken directly from CCAssignFnForNode - we don't support 1324// varargs in FastISel so that part has been removed. 1325// TODO: We may not support all of this. 1326CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC, bool Return) { 1327 switch (CC) { 1328 default: 1329 llvm_unreachable("Unsupported calling convention"); 1330 case CallingConv::Fast: 1331 // Ignore fastcc. Silence compiler warnings. 1332 (void)RetFastCC_ARM_APCS; 1333 (void)FastCC_ARM_APCS; 1334 // Fallthrough 1335 case CallingConv::C: 1336 // Use target triple & subtarget features to do actual dispatch. 1337 if (Subtarget->isAAPCS_ABI()) { 1338 if (Subtarget->hasVFP2() && 1339 FloatABIType == FloatABI::Hard) 1340 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1341 else 1342 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1343 } else 1344 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1345 case CallingConv::ARM_AAPCS_VFP: 1346 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1347 case CallingConv::ARM_AAPCS: 1348 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1349 case CallingConv::ARM_APCS: 1350 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1351 } 1352} 1353 1354bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args, 1355 SmallVectorImpl<unsigned> &ArgRegs, 1356 SmallVectorImpl<EVT> &ArgVTs, 1357 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 1358 SmallVectorImpl<unsigned> &RegArgs, 1359 CallingConv::ID CC, 1360 unsigned &NumBytes) { 1361 SmallVector<CCValAssign, 16> ArgLocs; 1362 CCState CCInfo(CC, false, TM, ArgLocs, *Context); 1363 CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CCAssignFnForCall(CC, false)); 1364 1365 // Get a count of how many bytes are to be pushed on the stack. 1366 NumBytes = CCInfo.getNextStackOffset(); 1367 1368 // Issue CALLSEQ_START 1369 unsigned AdjStackDown = TM.getRegisterInfo()->getCallFrameSetupOpcode(); 1370 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1371 TII.get(AdjStackDown)) 1372 .addImm(NumBytes)); 1373 1374 // Process the args. 1375 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1376 CCValAssign &VA = ArgLocs[i]; 1377 unsigned Arg = ArgRegs[VA.getValNo()]; 1378 EVT ArgVT = ArgVTs[VA.getValNo()]; 1379 1380 // We don't handle NEON parameters yet. 1381 if (VA.getLocVT().isVector() && VA.getLocVT().getSizeInBits() > 64) 1382 return false; 1383 1384 // Handle arg promotion, etc. 1385 switch (VA.getLocInfo()) { 1386 case CCValAssign::Full: break; 1387 case CCValAssign::SExt: { 1388 bool Emitted = FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(), 1389 Arg, ArgVT, Arg); 1390 assert(Emitted && "Failed to emit a sext!"); Emitted=Emitted; 1391 Emitted = true; 1392 ArgVT = VA.getLocVT(); 1393 break; 1394 } 1395 case CCValAssign::ZExt: { 1396 bool Emitted = FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(), 1397 Arg, ArgVT, Arg); 1398 assert(Emitted && "Failed to emit a zext!"); Emitted=Emitted; 1399 Emitted = true; 1400 ArgVT = VA.getLocVT(); 1401 break; 1402 } 1403 case CCValAssign::AExt: { 1404 bool Emitted = FastEmitExtend(ISD::ANY_EXTEND, VA.getLocVT(), 1405 Arg, ArgVT, Arg); 1406 if (!Emitted) 1407 Emitted = FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(), 1408 Arg, ArgVT, Arg); 1409 if (!Emitted) 1410 Emitted = FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(), 1411 Arg, ArgVT, Arg); 1412 1413 assert(Emitted && "Failed to emit a aext!"); Emitted=Emitted; 1414 ArgVT = VA.getLocVT(); 1415 break; 1416 } 1417 case CCValAssign::BCvt: { 1418 unsigned BC = FastEmit_r(ArgVT.getSimpleVT(), 1419 VA.getLocVT().getSimpleVT(), 1420 ISD::BIT_CONVERT, Arg, /*TODO: Kill=*/false); 1421 assert(BC != 0 && "Failed to emit a bitcast!"); 1422 Arg = BC; 1423 ArgVT = VA.getLocVT(); 1424 break; 1425 } 1426 default: llvm_unreachable("Unknown arg promotion!"); 1427 } 1428 1429 // Now copy/store arg to correct locations. 1430 if (VA.isRegLoc() && !VA.needsCustom()) { 1431 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1432 VA.getLocReg()) 1433 .addReg(Arg); 1434 RegArgs.push_back(VA.getLocReg()); 1435 } else if (VA.needsCustom()) { 1436 // TODO: We need custom lowering for vector (v2f64) args. 1437 if (VA.getLocVT() != MVT::f64) return false; 1438 1439 CCValAssign &NextVA = ArgLocs[++i]; 1440 1441 // TODO: Only handle register args for now. 1442 if(!(VA.isRegLoc() && NextVA.isRegLoc())) return false; 1443 1444 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1445 TII.get(ARM::VMOVRRD), VA.getLocReg()) 1446 .addReg(NextVA.getLocReg(), RegState::Define) 1447 .addReg(Arg)); 1448 RegArgs.push_back(VA.getLocReg()); 1449 RegArgs.push_back(NextVA.getLocReg()); 1450 } else { 1451 assert(VA.isMemLoc()); 1452 // Need to store on the stack. 1453 unsigned Base = ARM::SP; 1454 int Offset = VA.getLocMemOffset(); 1455 1456 if (!ARMEmitStore(ArgVT, Arg, Base, Offset)) return false; 1457 } 1458 } 1459 return true; 1460} 1461 1462bool ARMFastISel::FinishCall(EVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 1463 const Instruction *I, CallingConv::ID CC, 1464 unsigned &NumBytes) { 1465 // Issue CALLSEQ_END 1466 unsigned AdjStackUp = TM.getRegisterInfo()->getCallFrameDestroyOpcode(); 1467 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1468 TII.get(AdjStackUp)) 1469 .addImm(NumBytes).addImm(0)); 1470 1471 // Now the return value. 1472 if (RetVT.getSimpleVT().SimpleTy != MVT::isVoid) { 1473 SmallVector<CCValAssign, 16> RVLocs; 1474 CCState CCInfo(CC, false, TM, RVLocs, *Context); 1475 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true)); 1476 1477 // Copy all of the result registers out of their specified physreg. 1478 if (RVLocs.size() == 2 && RetVT.getSimpleVT().SimpleTy == MVT::f64) { 1479 // For this move we copy into two registers and then move into the 1480 // double fp reg we want. 1481 EVT DestVT = RVLocs[0].getValVT(); 1482 TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT); 1483 unsigned ResultReg = createResultReg(DstRC); 1484 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1485 TII.get(ARM::VMOVDRR), ResultReg) 1486 .addReg(RVLocs[0].getLocReg()) 1487 .addReg(RVLocs[1].getLocReg())); 1488 1489 UsedRegs.push_back(RVLocs[0].getLocReg()); 1490 UsedRegs.push_back(RVLocs[1].getLocReg()); 1491 1492 // Finally update the result. 1493 UpdateValueMap(I, ResultReg); 1494 } else { 1495 assert(RVLocs.size() == 1 &&"Can't handle non-double multi-reg retvals!"); 1496 EVT CopyVT = RVLocs[0].getValVT(); 1497 TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT); 1498 1499 unsigned ResultReg = createResultReg(DstRC); 1500 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1501 ResultReg).addReg(RVLocs[0].getLocReg()); 1502 UsedRegs.push_back(RVLocs[0].getLocReg()); 1503 1504 // Finally update the result. 1505 UpdateValueMap(I, ResultReg); 1506 } 1507 } 1508 1509 return true; 1510} 1511 1512bool ARMFastISel::SelectRet(const Instruction *I) { 1513 const ReturnInst *Ret = cast<ReturnInst>(I); 1514 const Function &F = *I->getParent()->getParent(); 1515 1516 if (!FuncInfo.CanLowerReturn) 1517 return false; 1518 1519 if (F.isVarArg()) 1520 return false; 1521 1522 CallingConv::ID CC = F.getCallingConv(); 1523 if (Ret->getNumOperands() > 0) { 1524 SmallVector<ISD::OutputArg, 4> Outs; 1525 GetReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(), 1526 Outs, TLI); 1527 1528 // Analyze operands of the call, assigning locations to each operand. 1529 SmallVector<CCValAssign, 16> ValLocs; 1530 CCState CCInfo(CC, F.isVarArg(), TM, ValLocs, I->getContext()); 1531 CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC, true /* is Ret */)); 1532 1533 const Value *RV = Ret->getOperand(0); 1534 unsigned Reg = getRegForValue(RV); 1535 if (Reg == 0) 1536 return false; 1537 1538 // Only handle a single return value for now. 1539 if (ValLocs.size() != 1) 1540 return false; 1541 1542 CCValAssign &VA = ValLocs[0]; 1543 1544 // Don't bother handling odd stuff for now. 1545 if (VA.getLocInfo() != CCValAssign::Full) 1546 return false; 1547 // Only handle register returns for now. 1548 if (!VA.isRegLoc()) 1549 return false; 1550 // TODO: For now, don't try to handle cases where getLocInfo() 1551 // says Full but the types don't match. 1552 if (VA.getValVT() != TLI.getValueType(RV->getType())) 1553 return false; 1554 1555 // Make the copy. 1556 unsigned SrcReg = Reg + VA.getValNo(); 1557 unsigned DstReg = VA.getLocReg(); 1558 const TargetRegisterClass* SrcRC = MRI.getRegClass(SrcReg); 1559 // Avoid a cross-class copy. This is very unlikely. 1560 if (!SrcRC->contains(DstReg)) 1561 return false; 1562 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1563 DstReg).addReg(SrcReg); 1564 1565 // Mark the register as live out of the function. 1566 MRI.addLiveOut(VA.getLocReg()); 1567 } 1568 1569 unsigned RetOpc = isThumb ? ARM::tBX_RET : ARM::BX_RET; 1570 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1571 TII.get(RetOpc))); 1572 return true; 1573} 1574 1575// A quick function that will emit a call for a named libcall in F with the 1576// vector of passed arguments for the Instruction in I. We can assume that we 1577// can emit a call for any libcall we can produce. This is an abridged version 1578// of the full call infrastructure since we won't need to worry about things 1579// like computed function pointers or strange arguments at call sites. 1580// TODO: Try to unify this and the normal call bits for ARM, then try to unify 1581// with X86. 1582bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) { 1583 CallingConv::ID CC = TLI.getLibcallCallingConv(Call); 1584 1585 // Handle *simple* calls for now. 1586 const Type *RetTy = I->getType(); 1587 EVT RetVT; 1588 if (RetTy->isVoidTy()) 1589 RetVT = MVT::isVoid; 1590 else if (!isTypeLegal(RetTy, RetVT)) 1591 return false; 1592 1593 // For now we're using BLX etc on the assumption that we have v5t ops. 1594 if (!Subtarget->hasV5TOps()) return false; 1595 1596 // Set up the argument vectors. 1597 SmallVector<Value*, 8> Args; 1598 SmallVector<unsigned, 8> ArgRegs; 1599 SmallVector<EVT, 8> ArgVTs; 1600 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 1601 Args.reserve(I->getNumOperands()); 1602 ArgRegs.reserve(I->getNumOperands()); 1603 ArgVTs.reserve(I->getNumOperands()); 1604 ArgFlags.reserve(I->getNumOperands()); 1605 for (unsigned i = 0; i < I->getNumOperands(); ++i) { 1606 Value *Op = I->getOperand(i); 1607 unsigned Arg = getRegForValue(Op); 1608 if (Arg == 0) return false; 1609 1610 const Type *ArgTy = Op->getType(); 1611 EVT ArgVT; 1612 if (!isTypeLegal(ArgTy, ArgVT)) return false; 1613 1614 ISD::ArgFlagsTy Flags; 1615 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 1616 Flags.setOrigAlign(OriginalAlignment); 1617 1618 Args.push_back(Op); 1619 ArgRegs.push_back(Arg); 1620 ArgVTs.push_back(ArgVT); 1621 ArgFlags.push_back(Flags); 1622 } 1623 1624 // Handle the arguments now that we've gotten them. 1625 SmallVector<unsigned, 4> RegArgs; 1626 unsigned NumBytes; 1627 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes)) 1628 return false; 1629 1630 // Issue the call, BLXr9 for darwin, BLX otherwise. This uses V5 ops. 1631 // TODO: Turn this into the table of arm call ops. 1632 MachineInstrBuilder MIB; 1633 unsigned CallOpc; 1634 if(isThumb) 1635 CallOpc = Subtarget->isTargetDarwin() ? ARM::tBLXi_r9 : ARM::tBLXi; 1636 else 1637 CallOpc = Subtarget->isTargetDarwin() ? ARM::BLr9 : ARM::BL; 1638 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CallOpc)) 1639 .addExternalSymbol(TLI.getLibcallName(Call)); 1640 1641 // Add implicit physical register uses to the call. 1642 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 1643 MIB.addReg(RegArgs[i]); 1644 1645 // Finish off the call including any return values. 1646 SmallVector<unsigned, 4> UsedRegs; 1647 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes)) return false; 1648 1649 // Set all unused physreg defs as dead. 1650 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 1651 1652 return true; 1653} 1654 1655bool ARMFastISel::SelectCall(const Instruction *I) { 1656 const CallInst *CI = cast<CallInst>(I); 1657 const Value *Callee = CI->getCalledValue(); 1658 1659 // Can't handle inline asm or worry about intrinsics yet. 1660 if (isa<InlineAsm>(Callee) || isa<IntrinsicInst>(CI)) return false; 1661 1662 // Only handle global variable Callees that are direct calls. 1663 const GlobalValue *GV = dyn_cast<GlobalValue>(Callee); 1664 if (!GV || Subtarget->GVIsIndirectSymbol(GV, TM.getRelocationModel())) 1665 return false; 1666 1667 // Check the calling convention. 1668 ImmutableCallSite CS(CI); 1669 CallingConv::ID CC = CS.getCallingConv(); 1670 1671 // TODO: Avoid some calling conventions? 1672 1673 // Let SDISel handle vararg functions. 1674 const PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType()); 1675 const FunctionType *FTy = cast<FunctionType>(PT->getElementType()); 1676 if (FTy->isVarArg()) 1677 return false; 1678 1679 // Handle *simple* calls for now. 1680 const Type *RetTy = I->getType(); 1681 EVT RetVT; 1682 if (RetTy->isVoidTy()) 1683 RetVT = MVT::isVoid; 1684 else if (!isTypeLegal(RetTy, RetVT)) 1685 return false; 1686 1687 // For now we're using BLX etc on the assumption that we have v5t ops. 1688 // TODO: Maybe? 1689 if (!Subtarget->hasV5TOps()) return false; 1690 1691 // Set up the argument vectors. 1692 SmallVector<Value*, 8> Args; 1693 SmallVector<unsigned, 8> ArgRegs; 1694 SmallVector<EVT, 8> ArgVTs; 1695 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 1696 Args.reserve(CS.arg_size()); 1697 ArgRegs.reserve(CS.arg_size()); 1698 ArgVTs.reserve(CS.arg_size()); 1699 ArgFlags.reserve(CS.arg_size()); 1700 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end(); 1701 i != e; ++i) { 1702 unsigned Arg = getRegForValue(*i); 1703 1704 if (Arg == 0) 1705 return false; 1706 ISD::ArgFlagsTy Flags; 1707 unsigned AttrInd = i - CS.arg_begin() + 1; 1708 if (CS.paramHasAttr(AttrInd, Attribute::SExt)) 1709 Flags.setSExt(); 1710 if (CS.paramHasAttr(AttrInd, Attribute::ZExt)) 1711 Flags.setZExt(); 1712 1713 // FIXME: Only handle *easy* calls for now. 1714 if (CS.paramHasAttr(AttrInd, Attribute::InReg) || 1715 CS.paramHasAttr(AttrInd, Attribute::StructRet) || 1716 CS.paramHasAttr(AttrInd, Attribute::Nest) || 1717 CS.paramHasAttr(AttrInd, Attribute::ByVal)) 1718 return false; 1719 1720 const Type *ArgTy = (*i)->getType(); 1721 EVT ArgVT; 1722 if (!isTypeLegal(ArgTy, ArgVT)) 1723 return false; 1724 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 1725 Flags.setOrigAlign(OriginalAlignment); 1726 1727 Args.push_back(*i); 1728 ArgRegs.push_back(Arg); 1729 ArgVTs.push_back(ArgVT); 1730 ArgFlags.push_back(Flags); 1731 } 1732 1733 // Handle the arguments now that we've gotten them. 1734 SmallVector<unsigned, 4> RegArgs; 1735 unsigned NumBytes; 1736 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes)) 1737 return false; 1738 1739 // Issue the call, BLXr9 for darwin, BLX otherwise. This uses V5 ops. 1740 // TODO: Turn this into the table of arm call ops. 1741 MachineInstrBuilder MIB; 1742 unsigned CallOpc; 1743 if(isThumb) 1744 CallOpc = Subtarget->isTargetDarwin() ? ARM::tBLXi_r9 : ARM::tBLXi; 1745 else 1746 CallOpc = Subtarget->isTargetDarwin() ? ARM::BLr9 : ARM::BL; 1747 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CallOpc)) 1748 .addGlobalAddress(GV, 0, 0); 1749 1750 // Add implicit physical register uses to the call. 1751 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 1752 MIB.addReg(RegArgs[i]); 1753 1754 // Finish off the call including any return values. 1755 SmallVector<unsigned, 4> UsedRegs; 1756 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes)) return false; 1757 1758 // Set all unused physreg defs as dead. 1759 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 1760 1761 return true; 1762 1763} 1764 1765// TODO: SoftFP support. 1766bool ARMFastISel::TargetSelectInstruction(const Instruction *I) { 1767 1768 switch (I->getOpcode()) { 1769 case Instruction::Load: 1770 return SelectLoad(I); 1771 case Instruction::Store: 1772 return SelectStore(I); 1773 case Instruction::Br: 1774 return SelectBranch(I); 1775 case Instruction::ICmp: 1776 case Instruction::FCmp: 1777 return SelectCmp(I); 1778 case Instruction::FPExt: 1779 return SelectFPExt(I); 1780 case Instruction::FPTrunc: 1781 return SelectFPTrunc(I); 1782 case Instruction::SIToFP: 1783 return SelectSIToFP(I); 1784 case Instruction::FPToSI: 1785 return SelectFPToSI(I); 1786 case Instruction::FAdd: 1787 return SelectBinaryOp(I, ISD::FADD); 1788 case Instruction::FSub: 1789 return SelectBinaryOp(I, ISD::FSUB); 1790 case Instruction::FMul: 1791 return SelectBinaryOp(I, ISD::FMUL); 1792 case Instruction::SDiv: 1793 return SelectSDiv(I); 1794 case Instruction::SRem: 1795 return SelectSRem(I); 1796 case Instruction::Call: 1797 return SelectCall(I); 1798 case Instruction::Select: 1799 return SelectSelect(I); 1800 case Instruction::Ret: 1801 return SelectRet(I); 1802 default: break; 1803 } 1804 return false; 1805} 1806 1807namespace llvm { 1808 llvm::FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo) { 1809 // Completely untested on non-darwin. 1810 const TargetMachine &TM = funcInfo.MF->getTarget(); 1811 1812 // Darwin and thumb1 only for now. 1813 const ARMSubtarget *Subtarget = &TM.getSubtarget<ARMSubtarget>(); 1814 if (Subtarget->isTargetDarwin() && !Subtarget->isThumb1Only() && 1815 !DisableARMFastISel) 1816 return new ARMFastISel(funcInfo); 1817 return 0; 1818 } 1819} 1820