ARMFastISel.cpp revision d0c82a683e965f326e36a2bcaa85c00e917f8282
1//===-- ARMFastISel.cpp - ARM FastISel implementation ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the ARM-specific support for the FastISel class. Some 11// of the target-specific code is generated by tablegen in the file 12// ARMGenFastISel.inc, which is #included here. 13// 14//===----------------------------------------------------------------------===// 15 16#include "ARM.h" 17#include "ARMBaseInstrInfo.h" 18#include "ARMCallingConv.h" 19#include "ARMRegisterInfo.h" 20#include "ARMTargetMachine.h" 21#include "ARMSubtarget.h" 22#include "ARMConstantPoolValue.h" 23#include "llvm/CallingConv.h" 24#include "llvm/DerivedTypes.h" 25#include "llvm/GlobalVariable.h" 26#include "llvm/Instructions.h" 27#include "llvm/IntrinsicInst.h" 28#include "llvm/Module.h" 29#include "llvm/CodeGen/Analysis.h" 30#include "llvm/CodeGen/FastISel.h" 31#include "llvm/CodeGen/FunctionLoweringInfo.h" 32#include "llvm/CodeGen/MachineInstrBuilder.h" 33#include "llvm/CodeGen/MachineModuleInfo.h" 34#include "llvm/CodeGen/MachineConstantPool.h" 35#include "llvm/CodeGen/MachineFrameInfo.h" 36#include "llvm/CodeGen/MachineMemOperand.h" 37#include "llvm/CodeGen/MachineRegisterInfo.h" 38#include "llvm/CodeGen/PseudoSourceValue.h" 39#include "llvm/Support/CallSite.h" 40#include "llvm/Support/CommandLine.h" 41#include "llvm/Support/ErrorHandling.h" 42#include "llvm/Support/GetElementPtrTypeIterator.h" 43#include "llvm/Target/TargetData.h" 44#include "llvm/Target/TargetInstrInfo.h" 45#include "llvm/Target/TargetLowering.h" 46#include "llvm/Target/TargetMachine.h" 47#include "llvm/Target/TargetOptions.h" 48using namespace llvm; 49 50static cl::opt<bool> 51DisableARMFastISel("disable-arm-fast-isel", 52 cl::desc("Turn off experimental ARM fast-isel support"), 53 cl::init(false), cl::Hidden); 54 55namespace { 56 57class ARMFastISel : public FastISel { 58 59 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can 60 /// make the right decision when generating code for different targets. 61 const ARMSubtarget *Subtarget; 62 const TargetMachine &TM; 63 const TargetInstrInfo &TII; 64 const TargetLowering &TLI; 65 ARMFunctionInfo *AFI; 66 67 // Convenience variables to avoid some queries. 68 bool isThumb; 69 LLVMContext *Context; 70 71 public: 72 explicit ARMFastISel(FunctionLoweringInfo &funcInfo) 73 : FastISel(funcInfo), 74 TM(funcInfo.MF->getTarget()), 75 TII(*TM.getInstrInfo()), 76 TLI(*TM.getTargetLowering()) { 77 Subtarget = &TM.getSubtarget<ARMSubtarget>(); 78 AFI = funcInfo.MF->getInfo<ARMFunctionInfo>(); 79 isThumb = AFI->isThumbFunction(); 80 Context = &funcInfo.Fn->getContext(); 81 } 82 83 // Code from FastISel.cpp. 84 virtual unsigned FastEmitInst_(unsigned MachineInstOpcode, 85 const TargetRegisterClass *RC); 86 virtual unsigned FastEmitInst_r(unsigned MachineInstOpcode, 87 const TargetRegisterClass *RC, 88 unsigned Op0, bool Op0IsKill); 89 virtual unsigned FastEmitInst_rr(unsigned MachineInstOpcode, 90 const TargetRegisterClass *RC, 91 unsigned Op0, bool Op0IsKill, 92 unsigned Op1, bool Op1IsKill); 93 virtual unsigned FastEmitInst_ri(unsigned MachineInstOpcode, 94 const TargetRegisterClass *RC, 95 unsigned Op0, bool Op0IsKill, 96 uint64_t Imm); 97 virtual unsigned FastEmitInst_rf(unsigned MachineInstOpcode, 98 const TargetRegisterClass *RC, 99 unsigned Op0, bool Op0IsKill, 100 const ConstantFP *FPImm); 101 virtual unsigned FastEmitInst_i(unsigned MachineInstOpcode, 102 const TargetRegisterClass *RC, 103 uint64_t Imm); 104 virtual unsigned FastEmitInst_rri(unsigned MachineInstOpcode, 105 const TargetRegisterClass *RC, 106 unsigned Op0, bool Op0IsKill, 107 unsigned Op1, bool Op1IsKill, 108 uint64_t Imm); 109 virtual unsigned FastEmitInst_extractsubreg(MVT RetVT, 110 unsigned Op0, bool Op0IsKill, 111 uint32_t Idx); 112 113 // Backend specific FastISel code. 114 virtual bool TargetSelectInstruction(const Instruction *I); 115 virtual unsigned TargetMaterializeConstant(const Constant *C); 116 virtual unsigned TargetMaterializeAlloca(const AllocaInst *AI); 117 118 #include "ARMGenFastISel.inc" 119 120 // Instruction selection routines. 121 private: 122 bool SelectLoad(const Instruction *I); 123 bool SelectStore(const Instruction *I); 124 bool SelectBranch(const Instruction *I); 125 bool SelectCmp(const Instruction *I); 126 bool SelectFPExt(const Instruction *I); 127 bool SelectFPTrunc(const Instruction *I); 128 bool SelectBinaryOp(const Instruction *I, unsigned ISDOpcode); 129 bool SelectSIToFP(const Instruction *I); 130 bool SelectFPToSI(const Instruction *I); 131 bool SelectSDiv(const Instruction *I); 132 bool SelectSRem(const Instruction *I); 133 bool SelectCall(const Instruction *I); 134 bool SelectSelect(const Instruction *I); 135 bool SelectRet(const Instruction *I); 136 137 // Utility routines. 138 private: 139 bool isTypeLegal(const Type *Ty, MVT &VT); 140 bool isLoadTypeLegal(const Type *Ty, MVT &VT); 141 bool ARMEmitLoad(EVT VT, unsigned &ResultReg, unsigned Base, int Offset); 142 bool ARMEmitStore(EVT VT, unsigned SrcReg, unsigned Base, int Offset); 143 bool ARMComputeRegOffset(const Value *Obj, unsigned &Base, int &Offset); 144 void ARMSimplifyRegOffset(unsigned &Base, int &Offset, EVT VT); 145 unsigned ARMMaterializeFP(const ConstantFP *CFP, EVT VT); 146 unsigned ARMMaterializeInt(const Constant *C, EVT VT); 147 unsigned ARMMaterializeGV(const GlobalValue *GV, EVT VT); 148 unsigned ARMMoveToFPReg(EVT VT, unsigned SrcReg); 149 unsigned ARMMoveToIntReg(EVT VT, unsigned SrcReg); 150 151 // Call handling routines. 152 private: 153 bool FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, EVT SrcVT, 154 unsigned &ResultReg); 155 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool Return); 156 bool ProcessCallArgs(SmallVectorImpl<Value*> &Args, 157 SmallVectorImpl<unsigned> &ArgRegs, 158 SmallVectorImpl<MVT> &ArgVTs, 159 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 160 SmallVectorImpl<unsigned> &RegArgs, 161 CallingConv::ID CC, 162 unsigned &NumBytes); 163 bool FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 164 const Instruction *I, CallingConv::ID CC, 165 unsigned &NumBytes); 166 bool ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call); 167 168 // OptionalDef handling routines. 169 private: 170 bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR); 171 const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB); 172}; 173 174} // end anonymous namespace 175 176#include "ARMGenCallingConv.inc" 177 178// DefinesOptionalPredicate - This is different from DefinesPredicate in that 179// we don't care about implicit defs here, just places we'll need to add a 180// default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR. 181bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) { 182 const TargetInstrDesc &TID = MI->getDesc(); 183 if (!TID.hasOptionalDef()) 184 return false; 185 186 // Look to see if our OptionalDef is defining CPSR or CCR. 187 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 188 const MachineOperand &MO = MI->getOperand(i); 189 if (!MO.isReg() || !MO.isDef()) continue; 190 if (MO.getReg() == ARM::CPSR) 191 *CPSR = true; 192 } 193 return true; 194} 195 196// If the machine is predicable go ahead and add the predicate operands, if 197// it needs default CC operands add those. 198// TODO: If we want to support thumb1 then we'll need to deal with optional 199// CPSR defs that need to be added before the remaining operands. See s_cc_out 200// for descriptions why. 201const MachineInstrBuilder & 202ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) { 203 MachineInstr *MI = &*MIB; 204 205 // Do we use a predicate? 206 if (TII.isPredicable(MI)) 207 AddDefaultPred(MIB); 208 209 // Do we optionally set a predicate? Preds is size > 0 iff the predicate 210 // defines CPSR. All other OptionalDefines in ARM are the CCR register. 211 bool CPSR = false; 212 if (DefinesOptionalPredicate(MI, &CPSR)) { 213 if (CPSR) 214 AddDefaultT1CC(MIB); 215 else 216 AddDefaultCC(MIB); 217 } 218 return MIB; 219} 220 221unsigned ARMFastISel::FastEmitInst_(unsigned MachineInstOpcode, 222 const TargetRegisterClass* RC) { 223 unsigned ResultReg = createResultReg(RC); 224 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 225 226 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)); 227 return ResultReg; 228} 229 230unsigned ARMFastISel::FastEmitInst_r(unsigned MachineInstOpcode, 231 const TargetRegisterClass *RC, 232 unsigned Op0, bool Op0IsKill) { 233 unsigned ResultReg = createResultReg(RC); 234 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 235 236 if (II.getNumDefs() >= 1) 237 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 238 .addReg(Op0, Op0IsKill * RegState::Kill)); 239 else { 240 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 241 .addReg(Op0, Op0IsKill * RegState::Kill)); 242 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 243 TII.get(TargetOpcode::COPY), ResultReg) 244 .addReg(II.ImplicitDefs[0])); 245 } 246 return ResultReg; 247} 248 249unsigned ARMFastISel::FastEmitInst_rr(unsigned MachineInstOpcode, 250 const TargetRegisterClass *RC, 251 unsigned Op0, bool Op0IsKill, 252 unsigned Op1, bool Op1IsKill) { 253 unsigned ResultReg = createResultReg(RC); 254 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 255 256 if (II.getNumDefs() >= 1) 257 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 258 .addReg(Op0, Op0IsKill * RegState::Kill) 259 .addReg(Op1, Op1IsKill * RegState::Kill)); 260 else { 261 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 262 .addReg(Op0, Op0IsKill * RegState::Kill) 263 .addReg(Op1, Op1IsKill * RegState::Kill)); 264 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 265 TII.get(TargetOpcode::COPY), ResultReg) 266 .addReg(II.ImplicitDefs[0])); 267 } 268 return ResultReg; 269} 270 271unsigned ARMFastISel::FastEmitInst_ri(unsigned MachineInstOpcode, 272 const TargetRegisterClass *RC, 273 unsigned Op0, bool Op0IsKill, 274 uint64_t Imm) { 275 unsigned ResultReg = createResultReg(RC); 276 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 277 278 if (II.getNumDefs() >= 1) 279 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 280 .addReg(Op0, Op0IsKill * RegState::Kill) 281 .addImm(Imm)); 282 else { 283 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 284 .addReg(Op0, Op0IsKill * RegState::Kill) 285 .addImm(Imm)); 286 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 287 TII.get(TargetOpcode::COPY), ResultReg) 288 .addReg(II.ImplicitDefs[0])); 289 } 290 return ResultReg; 291} 292 293unsigned ARMFastISel::FastEmitInst_rf(unsigned MachineInstOpcode, 294 const TargetRegisterClass *RC, 295 unsigned Op0, bool Op0IsKill, 296 const ConstantFP *FPImm) { 297 unsigned ResultReg = createResultReg(RC); 298 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 299 300 if (II.getNumDefs() >= 1) 301 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 302 .addReg(Op0, Op0IsKill * RegState::Kill) 303 .addFPImm(FPImm)); 304 else { 305 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 306 .addReg(Op0, Op0IsKill * RegState::Kill) 307 .addFPImm(FPImm)); 308 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 309 TII.get(TargetOpcode::COPY), ResultReg) 310 .addReg(II.ImplicitDefs[0])); 311 } 312 return ResultReg; 313} 314 315unsigned ARMFastISel::FastEmitInst_rri(unsigned MachineInstOpcode, 316 const TargetRegisterClass *RC, 317 unsigned Op0, bool Op0IsKill, 318 unsigned Op1, bool Op1IsKill, 319 uint64_t Imm) { 320 unsigned ResultReg = createResultReg(RC); 321 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 322 323 if (II.getNumDefs() >= 1) 324 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 325 .addReg(Op0, Op0IsKill * RegState::Kill) 326 .addReg(Op1, Op1IsKill * RegState::Kill) 327 .addImm(Imm)); 328 else { 329 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 330 .addReg(Op0, Op0IsKill * RegState::Kill) 331 .addReg(Op1, Op1IsKill * RegState::Kill) 332 .addImm(Imm)); 333 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 334 TII.get(TargetOpcode::COPY), ResultReg) 335 .addReg(II.ImplicitDefs[0])); 336 } 337 return ResultReg; 338} 339 340unsigned ARMFastISel::FastEmitInst_i(unsigned MachineInstOpcode, 341 const TargetRegisterClass *RC, 342 uint64_t Imm) { 343 unsigned ResultReg = createResultReg(RC); 344 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 345 346 if (II.getNumDefs() >= 1) 347 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 348 .addImm(Imm)); 349 else { 350 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 351 .addImm(Imm)); 352 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 353 TII.get(TargetOpcode::COPY), ResultReg) 354 .addReg(II.ImplicitDefs[0])); 355 } 356 return ResultReg; 357} 358 359unsigned ARMFastISel::FastEmitInst_extractsubreg(MVT RetVT, 360 unsigned Op0, bool Op0IsKill, 361 uint32_t Idx) { 362 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT)); 363 assert(TargetRegisterInfo::isVirtualRegister(Op0) && 364 "Cannot yet extract from physregs"); 365 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 366 DL, TII.get(TargetOpcode::COPY), ResultReg) 367 .addReg(Op0, getKillRegState(Op0IsKill), Idx)); 368 return ResultReg; 369} 370 371// TODO: Don't worry about 64-bit now, but when this is fixed remove the 372// checks from the various callers. 373unsigned ARMFastISel::ARMMoveToFPReg(EVT VT, unsigned SrcReg) { 374 if (VT == MVT::f64) return 0; 375 376 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 377 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 378 TII.get(ARM::VMOVRS), MoveReg) 379 .addReg(SrcReg)); 380 return MoveReg; 381} 382 383unsigned ARMFastISel::ARMMoveToIntReg(EVT VT, unsigned SrcReg) { 384 if (VT == MVT::i64) return 0; 385 386 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 387 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 388 TII.get(ARM::VMOVSR), MoveReg) 389 .addReg(SrcReg)); 390 return MoveReg; 391} 392 393// For double width floating point we need to materialize two constants 394// (the high and the low) into integer registers then use a move to get 395// the combined constant into an FP reg. 396unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, EVT VT) { 397 const APFloat Val = CFP->getValueAPF(); 398 bool is64bit = VT == MVT::f64; 399 400 // This checks to see if we can use VFP3 instructions to materialize 401 // a constant, otherwise we have to go through the constant pool. 402 if (TLI.isFPImmLegal(Val, VT)) { 403 unsigned Opc = is64bit ? ARM::FCONSTD : ARM::FCONSTS; 404 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 405 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 406 DestReg) 407 .addFPImm(CFP)); 408 return DestReg; 409 } 410 411 // Require VFP2 for loading fp constants. 412 if (!Subtarget->hasVFP2()) return false; 413 414 // MachineConstantPool wants an explicit alignment. 415 unsigned Align = TD.getPrefTypeAlignment(CFP->getType()); 416 if (Align == 0) { 417 // TODO: Figure out if this is correct. 418 Align = TD.getTypeAllocSize(CFP->getType()); 419 } 420 unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align); 421 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 422 unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS; 423 424 // The extra reg is for addrmode5. 425 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 426 DestReg) 427 .addConstantPoolIndex(Idx) 428 .addReg(0)); 429 return DestReg; 430} 431 432unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, EVT VT) { 433 434 // For now 32-bit only. 435 if (VT != MVT::i32) return false; 436 437 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 438 439 // If we can do this in a single instruction without a constant pool entry 440 // do so now. 441 const ConstantInt *CI = cast<ConstantInt>(C); 442 if (Subtarget->hasV6T2Ops() && isUInt<16>(CI->getSExtValue())) { 443 unsigned Opc = isThumb ? ARM::t2MOVi16 : ARM::MOVi16; 444 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 445 TII.get(Opc), DestReg) 446 .addImm(CI->getSExtValue())); 447 return DestReg; 448 } 449 450 // MachineConstantPool wants an explicit alignment. 451 unsigned Align = TD.getPrefTypeAlignment(C->getType()); 452 if (Align == 0) { 453 // TODO: Figure out if this is correct. 454 Align = TD.getTypeAllocSize(C->getType()); 455 } 456 unsigned Idx = MCP.getConstantPoolIndex(C, Align); 457 458 if (isThumb) 459 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 460 TII.get(ARM::t2LDRpci), DestReg) 461 .addConstantPoolIndex(Idx)); 462 else 463 // The extra immediate is for addrmode2. 464 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 465 TII.get(ARM::LDRcp), DestReg) 466 .addConstantPoolIndex(Idx) 467 .addImm(0)); 468 469 return DestReg; 470} 471 472unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, EVT VT) { 473 // For now 32-bit only. 474 if (VT != MVT::i32) return 0; 475 476 Reloc::Model RelocM = TM.getRelocationModel(); 477 478 // TODO: No external globals for now. 479 if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) return 0; 480 481 // TODO: Need more magic for ARM PIC. 482 if (!isThumb && (RelocM == Reloc::PIC_)) return 0; 483 484 // MachineConstantPool wants an explicit alignment. 485 unsigned Align = TD.getPrefTypeAlignment(GV->getType()); 486 if (Align == 0) { 487 // TODO: Figure out if this is correct. 488 Align = TD.getTypeAllocSize(GV->getType()); 489 } 490 491 // Grab index. 492 unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : (Subtarget->isThumb() ? 4 : 8); 493 unsigned Id = AFI->createConstPoolEntryUId(); 494 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, Id, 495 ARMCP::CPValue, PCAdj); 496 unsigned Idx = MCP.getConstantPoolIndex(CPV, Align); 497 498 // Load value. 499 MachineInstrBuilder MIB; 500 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 501 if (isThumb) { 502 unsigned Opc = (RelocM != Reloc::PIC_) ? ARM::t2LDRpci : ARM::t2LDRpci_pic; 503 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg) 504 .addConstantPoolIndex(Idx); 505 if (RelocM == Reloc::PIC_) 506 MIB.addImm(Id); 507 } else { 508 // The extra immediate is for addrmode2. 509 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRcp), 510 DestReg) 511 .addConstantPoolIndex(Idx) 512 .addImm(0); 513 } 514 AddOptionalDefs(MIB); 515 return DestReg; 516} 517 518unsigned ARMFastISel::TargetMaterializeConstant(const Constant *C) { 519 EVT VT = TLI.getValueType(C->getType(), true); 520 521 // Only handle simple types. 522 if (!VT.isSimple()) return 0; 523 524 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) 525 return ARMMaterializeFP(CFP, VT); 526 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C)) 527 return ARMMaterializeGV(GV, VT); 528 else if (isa<ConstantInt>(C)) 529 return ARMMaterializeInt(C, VT); 530 531 return 0; 532} 533 534unsigned ARMFastISel::TargetMaterializeAlloca(const AllocaInst *AI) { 535 // Don't handle dynamic allocas. 536 if (!FuncInfo.StaticAllocaMap.count(AI)) return 0; 537 538 MVT VT; 539 if (!isLoadTypeLegal(AI->getType(), VT)) return false; 540 541 DenseMap<const AllocaInst*, int>::iterator SI = 542 FuncInfo.StaticAllocaMap.find(AI); 543 544 // This will get lowered later into the correct offsets and registers 545 // via rewriteXFrameIndex. 546 if (SI != FuncInfo.StaticAllocaMap.end()) { 547 TargetRegisterClass* RC = TLI.getRegClassFor(VT); 548 unsigned ResultReg = createResultReg(RC); 549 unsigned Opc = isThumb ? ARM::t2ADDri : ARM::ADDri; 550 AddOptionalDefs(BuildMI(*FuncInfo.MBB, *FuncInfo.InsertPt, DL, 551 TII.get(Opc), ResultReg) 552 .addFrameIndex(SI->second) 553 .addImm(0)); 554 return ResultReg; 555 } 556 557 return 0; 558} 559 560bool ARMFastISel::isTypeLegal(const Type *Ty, MVT &VT) { 561 EVT evt = TLI.getValueType(Ty, true); 562 563 // Only handle simple types. 564 if (evt == MVT::Other || !evt.isSimple()) return false; 565 VT = evt.getSimpleVT(); 566 567 // Handle all legal types, i.e. a register that will directly hold this 568 // value. 569 return TLI.isTypeLegal(VT); 570} 571 572bool ARMFastISel::isLoadTypeLegal(const Type *Ty, MVT &VT) { 573 if (isTypeLegal(Ty, VT)) return true; 574 575 // If this is a type than can be sign or zero-extended to a basic operation 576 // go ahead and accept it now. 577 if (VT == MVT::i8 || VT == MVT::i16) 578 return true; 579 580 return false; 581} 582 583// Computes the Reg+Offset to get to an object. 584bool ARMFastISel::ARMComputeRegOffset(const Value *Obj, unsigned &Base, 585 int &Offset) { 586 // Some boilerplate from the X86 FastISel. 587 const User *U = NULL; 588 unsigned Opcode = Instruction::UserOp1; 589 if (const Instruction *I = dyn_cast<Instruction>(Obj)) { 590 // Don't walk into other basic blocks; it's possible we haven't 591 // visited them yet, so the instructions may not yet be assigned 592 // virtual registers. 593 if (FuncInfo.MBBMap[I->getParent()] != FuncInfo.MBB) 594 return false; 595 Opcode = I->getOpcode(); 596 U = I; 597 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) { 598 Opcode = C->getOpcode(); 599 U = C; 600 } 601 602 if (const PointerType *Ty = dyn_cast<PointerType>(Obj->getType())) 603 if (Ty->getAddressSpace() > 255) 604 // Fast instruction selection doesn't support the special 605 // address spaces. 606 return false; 607 608 switch (Opcode) { 609 default: 610 break; 611 case Instruction::BitCast: { 612 // Look through bitcasts. 613 return ARMComputeRegOffset(U->getOperand(0), Base, Offset); 614 } 615 case Instruction::IntToPtr: { 616 // Look past no-op inttoptrs. 617 if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy()) 618 return ARMComputeRegOffset(U->getOperand(0), Base, Offset); 619 break; 620 } 621 case Instruction::PtrToInt: { 622 // Look past no-op ptrtoints. 623 if (TLI.getValueType(U->getType()) == TLI.getPointerTy()) 624 return ARMComputeRegOffset(U->getOperand(0), Base, Offset); 625 break; 626 } 627 case Instruction::GetElementPtr: { 628 int SavedOffset = Offset; 629 unsigned SavedBase = Base; 630 int TmpOffset = Offset; 631 632 // Iterate through the GEP folding the constants into offsets where 633 // we can. 634 gep_type_iterator GTI = gep_type_begin(U); 635 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); 636 i != e; ++i, ++GTI) { 637 const Value *Op = *i; 638 if (const StructType *STy = dyn_cast<StructType>(*GTI)) { 639 const StructLayout *SL = TD.getStructLayout(STy); 640 unsigned Idx = cast<ConstantInt>(Op)->getZExtValue(); 641 TmpOffset += SL->getElementOffset(Idx); 642 } else { 643 uint64_t S = TD.getTypeAllocSize(GTI.getIndexedType()); 644 SmallVector<const Value *, 4> Worklist; 645 Worklist.push_back(Op); 646 do { 647 Op = Worklist.pop_back_val(); 648 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) { 649 // Constant-offset addressing. 650 TmpOffset += CI->getSExtValue() * S; 651 } else if (isa<AddOperator>(Op) && 652 isa<ConstantInt>(cast<AddOperator>(Op)->getOperand(1))) { 653 // An add with a constant operand. Fold the constant. 654 ConstantInt *CI = 655 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1)); 656 TmpOffset += CI->getSExtValue() * S; 657 // Add the other operand back to the work list. 658 Worklist.push_back(cast<AddOperator>(Op)->getOperand(0)); 659 } else 660 goto unsupported_gep; 661 } while (!Worklist.empty()); 662 } 663 } 664 665 // Try to grab the base operand now. 666 Offset = TmpOffset; 667 if (ARMComputeRegOffset(U->getOperand(0), Base, Offset)) return true; 668 669 // We failed, restore everything and try the other options. 670 Offset = SavedOffset; 671 Base = SavedBase; 672 673 unsupported_gep: 674 break; 675 } 676 case Instruction::Alloca: { 677 const AllocaInst *AI = cast<AllocaInst>(Obj); 678 unsigned Reg = TargetMaterializeAlloca(AI); 679 680 if (Reg == 0) return false; 681 682 Base = Reg; 683 return true; 684 } 685 } 686 687 // Materialize the global variable's address into a reg which can 688 // then be used later to load the variable. 689 if (const GlobalValue *GV = dyn_cast<GlobalValue>(Obj)) { 690 unsigned Tmp = ARMMaterializeGV(GV, TLI.getValueType(Obj->getType())); 691 if (Tmp == 0) return false; 692 693 Base = Tmp; 694 return true; 695 } 696 697 // Try to get this in a register if nothing else has worked. 698 if (Base == 0) Base = getRegForValue(Obj); 699 return Base != 0; 700} 701 702void ARMFastISel::ARMSimplifyRegOffset(unsigned &Base, int &Offset, EVT VT) { 703 704 assert(VT.isSimple() && "Non-simple types are invalid here!"); 705 706 bool needsLowering = false; 707 switch (VT.getSimpleVT().SimpleTy) { 708 default: 709 assert(false && "Unhandled load/store type!"); 710 case MVT::i1: 711 case MVT::i8: 712 case MVT::i16: 713 case MVT::i32: 714 // Integer loads/stores handle 12-bit offsets. 715 needsLowering = ((Offset & 0xfff) != Offset); 716 break; 717 case MVT::f32: 718 case MVT::f64: 719 // Floating point operands handle 8-bit offsets. 720 needsLowering = ((Offset & 0xff) != Offset); 721 break; 722 } 723 724 // Since the offset is too large for the load/store instruction 725 // get the reg+offset into a register. 726 if (needsLowering) { 727 ARMCC::CondCodes Pred = ARMCC::AL; 728 unsigned PredReg = 0; 729 730 TargetRegisterClass *RC = isThumb ? ARM::tGPRRegisterClass : 731 ARM::GPRRegisterClass; 732 unsigned BaseReg = createResultReg(RC); 733 734 if (!isThumb) 735 emitARMRegPlusImmediate(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 736 BaseReg, Base, Offset, Pred, PredReg, 737 static_cast<const ARMBaseInstrInfo&>(TII)); 738 else { 739 assert(AFI->isThumb2Function()); 740 emitT2RegPlusImmediate(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 741 BaseReg, Base, Offset, Pred, PredReg, 742 static_cast<const ARMBaseInstrInfo&>(TII)); 743 } 744 Offset = 0; 745 Base = BaseReg; 746 } 747} 748 749bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, 750 unsigned Base, int Offset) { 751 752 assert(VT.isSimple() && "Non-simple types are invalid here!"); 753 unsigned Opc; 754 TargetRegisterClass *RC; 755 bool isFloat = false; 756 switch (VT.getSimpleVT().SimpleTy) { 757 default: 758 // This is mostly going to be Neon/vector support. 759 return false; 760 case MVT::i16: 761 Opc = isThumb ? ARM::t2LDRHi12 : ARM::LDRH; 762 RC = ARM::GPRRegisterClass; 763 break; 764 case MVT::i8: 765 Opc = isThumb ? ARM::t2LDRBi12 : ARM::LDRBi12; 766 RC = ARM::GPRRegisterClass; 767 break; 768 case MVT::i32: 769 Opc = isThumb ? ARM::t2LDRi12 : ARM::LDRi12; 770 RC = ARM::GPRRegisterClass; 771 break; 772 case MVT::f32: 773 Opc = ARM::VLDRS; 774 RC = TLI.getRegClassFor(VT); 775 isFloat = true; 776 break; 777 case MVT::f64: 778 Opc = ARM::VLDRD; 779 RC = TLI.getRegClassFor(VT); 780 isFloat = true; 781 break; 782 } 783 784 ResultReg = createResultReg(RC); 785 786 ARMSimplifyRegOffset(Base, Offset, VT); 787 788 // addrmode5 output depends on the selection dag addressing dividing the 789 // offset by 4 that it then later multiplies. Do this here as well. 790 if (isFloat) 791 Offset /= 4; 792 793 // LDRH needs an additional operand. 794 if (!isThumb && VT.getSimpleVT().SimpleTy == MVT::i16) 795 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 796 TII.get(Opc), ResultReg) 797 .addReg(Base).addReg(0).addImm(Offset)); 798 else 799 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 800 TII.get(Opc), ResultReg) 801 .addReg(Base).addImm(Offset)); 802 return true; 803} 804 805bool ARMFastISel::SelectLoad(const Instruction *I) { 806 // Verify we have a legal type before going any further. 807 MVT VT; 808 if (!isLoadTypeLegal(I->getType(), VT)) 809 return false; 810 811 // Our register and offset with innocuous defaults. 812 unsigned Base = 0; 813 int Offset = 0; 814 815 // See if we can handle this as Reg + Offset 816 if (!ARMComputeRegOffset(I->getOperand(0), Base, Offset)) 817 return false; 818 819 unsigned ResultReg; 820 if (!ARMEmitLoad(VT, ResultReg, Base, Offset)) return false; 821 822 UpdateValueMap(I, ResultReg); 823 return true; 824} 825 826bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, 827 unsigned Base, int Offset) { 828 unsigned StrOpc; 829 bool isFloat = false; 830 bool needReg0Op = false; 831 switch (VT.getSimpleVT().SimpleTy) { 832 default: return false; 833 case MVT::i1: { 834 unsigned Res = createResultReg(isThumb ? ARM::tGPRRegisterClass : 835 ARM::GPRRegisterClass); 836 unsigned Opc = isThumb ? ARM::t2ANDri : ARM::ANDri; 837 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 838 TII.get(Opc), Res) 839 .addReg(SrcReg).addImm(1)); 840 SrcReg = Res; 841 } // Fallthrough here. 842 case MVT::i8: 843 StrOpc = isThumb ? ARM::t2STRBi12 : ARM::STRBi12; 844 break; 845 case MVT::i16: 846 StrOpc = isThumb ? ARM::t2STRHi12 : ARM::STRH; 847 needReg0Op = true; 848 break; 849 case MVT::i32: 850 StrOpc = isThumb ? ARM::t2STRi12 : ARM::STRi12; 851 break; 852 case MVT::f32: 853 if (!Subtarget->hasVFP2()) return false; 854 StrOpc = ARM::VSTRS; 855 isFloat = true; 856 break; 857 case MVT::f64: 858 if (!Subtarget->hasVFP2()) return false; 859 StrOpc = ARM::VSTRD; 860 isFloat = true; 861 break; 862 } 863 864 ARMSimplifyRegOffset(Base, Offset, VT); 865 866 // addrmode5 output depends on the selection dag addressing dividing the 867 // offset by 4 that it then later multiplies. Do this here as well. 868 if (isFloat) 869 Offset /= 4; 870 871 // FIXME: The 'needReg0Op' bit goes away once STRH is converted to 872 // not use the mega-addrmode stuff. 873 if (!needReg0Op) 874 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 875 TII.get(StrOpc)) 876 .addReg(SrcReg).addReg(Base).addImm(Offset)); 877 else 878 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 879 TII.get(StrOpc)) 880 .addReg(SrcReg).addReg(Base).addReg(0).addImm(Offset)); 881 882 return true; 883} 884 885bool ARMFastISel::SelectStore(const Instruction *I) { 886 Value *Op0 = I->getOperand(0); 887 unsigned SrcReg = 0; 888 889 // Yay type legalization 890 MVT VT; 891 if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT)) 892 return false; 893 894 // Get the value to be stored into a register. 895 SrcReg = getRegForValue(Op0); 896 if (SrcReg == 0) 897 return false; 898 899 // Our register and offset with innocuous defaults. 900 unsigned Base = 0; 901 int Offset = 0; 902 903 // See if we can handle this as Reg + Offset 904 if (!ARMComputeRegOffset(I->getOperand(1), Base, Offset)) 905 return false; 906 907 if (!ARMEmitStore(VT, SrcReg, Base, Offset)) return false; 908 909 return true; 910} 911 912static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred) { 913 switch (Pred) { 914 // Needs two compares... 915 case CmpInst::FCMP_ONE: 916 case CmpInst::FCMP_UEQ: 917 default: 918 // AL is our "false" for now. The other two need more compares. 919 return ARMCC::AL; 920 case CmpInst::ICMP_EQ: 921 case CmpInst::FCMP_OEQ: 922 return ARMCC::EQ; 923 case CmpInst::ICMP_SGT: 924 case CmpInst::FCMP_OGT: 925 return ARMCC::GT; 926 case CmpInst::ICMP_SGE: 927 case CmpInst::FCMP_OGE: 928 return ARMCC::GE; 929 case CmpInst::ICMP_UGT: 930 case CmpInst::FCMP_UGT: 931 return ARMCC::HI; 932 case CmpInst::FCMP_OLT: 933 return ARMCC::MI; 934 case CmpInst::ICMP_ULE: 935 case CmpInst::FCMP_OLE: 936 return ARMCC::LS; 937 case CmpInst::FCMP_ORD: 938 return ARMCC::VC; 939 case CmpInst::FCMP_UNO: 940 return ARMCC::VS; 941 case CmpInst::FCMP_UGE: 942 return ARMCC::PL; 943 case CmpInst::ICMP_SLT: 944 case CmpInst::FCMP_ULT: 945 return ARMCC::LT; 946 case CmpInst::ICMP_SLE: 947 case CmpInst::FCMP_ULE: 948 return ARMCC::LE; 949 case CmpInst::FCMP_UNE: 950 case CmpInst::ICMP_NE: 951 return ARMCC::NE; 952 case CmpInst::ICMP_UGE: 953 return ARMCC::HS; 954 case CmpInst::ICMP_ULT: 955 return ARMCC::LO; 956 } 957} 958 959bool ARMFastISel::SelectBranch(const Instruction *I) { 960 const BranchInst *BI = cast<BranchInst>(I); 961 MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)]; 962 MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)]; 963 964 // Simple branch support. 965 966 // If we can, avoid recomputing the compare - redoing it could lead to wonky 967 // behavior. 968 // TODO: Factor this out. 969 if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) { 970 if (CI->hasOneUse() && (CI->getParent() == I->getParent())) { 971 MVT VT; 972 const Type *Ty = CI->getOperand(0)->getType(); 973 if (!isTypeLegal(Ty, VT)) 974 return false; 975 976 bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy()); 977 if (isFloat && !Subtarget->hasVFP2()) 978 return false; 979 980 unsigned CmpOpc; 981 unsigned CondReg; 982 switch (VT.SimpleTy) { 983 default: return false; 984 // TODO: Verify compares. 985 case MVT::f32: 986 CmpOpc = ARM::VCMPES; 987 CondReg = ARM::FPSCR; 988 break; 989 case MVT::f64: 990 CmpOpc = ARM::VCMPED; 991 CondReg = ARM::FPSCR; 992 break; 993 case MVT::i32: 994 CmpOpc = isThumb ? ARM::t2CMPrr : ARM::CMPrr; 995 CondReg = ARM::CPSR; 996 break; 997 } 998 999 // Get the compare predicate. 1000 ARMCC::CondCodes ARMPred = getComparePred(CI->getPredicate()); 1001 1002 // We may not handle every CC for now. 1003 if (ARMPred == ARMCC::AL) return false; 1004 1005 unsigned Arg1 = getRegForValue(CI->getOperand(0)); 1006 if (Arg1 == 0) return false; 1007 1008 unsigned Arg2 = getRegForValue(CI->getOperand(1)); 1009 if (Arg2 == 0) return false; 1010 1011 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1012 TII.get(CmpOpc)) 1013 .addReg(Arg1).addReg(Arg2)); 1014 1015 // For floating point we need to move the result to a comparison register 1016 // that we can then use for branches. 1017 if (isFloat) 1018 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1019 TII.get(ARM::FMSTAT))); 1020 1021 unsigned BrOpc = isThumb ? ARM::t2Bcc : ARM::Bcc; 1022 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1023 .addMBB(TBB).addImm(ARMPred).addReg(ARM::CPSR); 1024 FastEmitBranch(FBB, DL); 1025 FuncInfo.MBB->addSuccessor(TBB); 1026 return true; 1027 } 1028 } 1029 1030 unsigned CmpReg = getRegForValue(BI->getCondition()); 1031 if (CmpReg == 0) return false; 1032 1033 // Re-set the flags just in case. 1034 unsigned CmpOpc = isThumb ? ARM::t2CMPri : ARM::CMPri; 1035 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1036 .addReg(CmpReg).addImm(0)); 1037 1038 unsigned BrOpc = isThumb ? ARM::t2Bcc : ARM::Bcc; 1039 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1040 .addMBB(TBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 1041 FastEmitBranch(FBB, DL); 1042 FuncInfo.MBB->addSuccessor(TBB); 1043 return true; 1044} 1045 1046bool ARMFastISel::SelectCmp(const Instruction *I) { 1047 const CmpInst *CI = cast<CmpInst>(I); 1048 1049 MVT VT; 1050 const Type *Ty = CI->getOperand(0)->getType(); 1051 if (!isTypeLegal(Ty, VT)) 1052 return false; 1053 1054 bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy()); 1055 if (isFloat && !Subtarget->hasVFP2()) 1056 return false; 1057 1058 unsigned CmpOpc; 1059 unsigned CondReg; 1060 switch (VT.SimpleTy) { 1061 default: return false; 1062 // TODO: Verify compares. 1063 case MVT::f32: 1064 CmpOpc = ARM::VCMPES; 1065 CondReg = ARM::FPSCR; 1066 break; 1067 case MVT::f64: 1068 CmpOpc = ARM::VCMPED; 1069 CondReg = ARM::FPSCR; 1070 break; 1071 case MVT::i32: 1072 CmpOpc = isThumb ? ARM::t2CMPrr : ARM::CMPrr; 1073 CondReg = ARM::CPSR; 1074 break; 1075 } 1076 1077 // Get the compare predicate. 1078 ARMCC::CondCodes ARMPred = getComparePred(CI->getPredicate()); 1079 1080 // We may not handle every CC for now. 1081 if (ARMPred == ARMCC::AL) return false; 1082 1083 unsigned Arg1 = getRegForValue(CI->getOperand(0)); 1084 if (Arg1 == 0) return false; 1085 1086 unsigned Arg2 = getRegForValue(CI->getOperand(1)); 1087 if (Arg2 == 0) return false; 1088 1089 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1090 .addReg(Arg1).addReg(Arg2)); 1091 1092 // For floating point we need to move the result to a comparison register 1093 // that we can then use for branches. 1094 if (isFloat) 1095 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1096 TII.get(ARM::FMSTAT))); 1097 1098 // Now set a register based on the comparison. Explicitly set the predicates 1099 // here. 1100 unsigned MovCCOpc = isThumb ? ARM::t2MOVCCi : ARM::MOVCCi; 1101 TargetRegisterClass *RC = isThumb ? ARM::rGPRRegisterClass 1102 : ARM::GPRRegisterClass; 1103 unsigned DestReg = createResultReg(RC); 1104 Constant *Zero 1105 = ConstantInt::get(Type::getInt32Ty(*Context), 0); 1106 unsigned ZeroReg = TargetMaterializeConstant(Zero); 1107 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), DestReg) 1108 .addReg(ZeroReg).addImm(1) 1109 .addImm(ARMPred).addReg(CondReg); 1110 1111 UpdateValueMap(I, DestReg); 1112 return true; 1113} 1114 1115bool ARMFastISel::SelectFPExt(const Instruction *I) { 1116 // Make sure we have VFP and that we're extending float to double. 1117 if (!Subtarget->hasVFP2()) return false; 1118 1119 Value *V = I->getOperand(0); 1120 if (!I->getType()->isDoubleTy() || 1121 !V->getType()->isFloatTy()) return false; 1122 1123 unsigned Op = getRegForValue(V); 1124 if (Op == 0) return false; 1125 1126 unsigned Result = createResultReg(ARM::DPRRegisterClass); 1127 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1128 TII.get(ARM::VCVTDS), Result) 1129 .addReg(Op)); 1130 UpdateValueMap(I, Result); 1131 return true; 1132} 1133 1134bool ARMFastISel::SelectFPTrunc(const Instruction *I) { 1135 // Make sure we have VFP and that we're truncating double to float. 1136 if (!Subtarget->hasVFP2()) return false; 1137 1138 Value *V = I->getOperand(0); 1139 if (!(I->getType()->isFloatTy() && 1140 V->getType()->isDoubleTy())) return false; 1141 1142 unsigned Op = getRegForValue(V); 1143 if (Op == 0) return false; 1144 1145 unsigned Result = createResultReg(ARM::SPRRegisterClass); 1146 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1147 TII.get(ARM::VCVTSD), Result) 1148 .addReg(Op)); 1149 UpdateValueMap(I, Result); 1150 return true; 1151} 1152 1153bool ARMFastISel::SelectSIToFP(const Instruction *I) { 1154 // Make sure we have VFP. 1155 if (!Subtarget->hasVFP2()) return false; 1156 1157 MVT DstVT; 1158 const Type *Ty = I->getType(); 1159 if (!isTypeLegal(Ty, DstVT)) 1160 return false; 1161 1162 unsigned Op = getRegForValue(I->getOperand(0)); 1163 if (Op == 0) return false; 1164 1165 // The conversion routine works on fp-reg to fp-reg and the operand above 1166 // was an integer, move it to the fp registers if possible. 1167 unsigned FP = ARMMoveToFPReg(MVT::f32, Op); 1168 if (FP == 0) return false; 1169 1170 unsigned Opc; 1171 if (Ty->isFloatTy()) Opc = ARM::VSITOS; 1172 else if (Ty->isDoubleTy()) Opc = ARM::VSITOD; 1173 else return 0; 1174 1175 unsigned ResultReg = createResultReg(TLI.getRegClassFor(DstVT)); 1176 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1177 ResultReg) 1178 .addReg(FP)); 1179 UpdateValueMap(I, ResultReg); 1180 return true; 1181} 1182 1183bool ARMFastISel::SelectFPToSI(const Instruction *I) { 1184 // Make sure we have VFP. 1185 if (!Subtarget->hasVFP2()) return false; 1186 1187 MVT DstVT; 1188 const Type *RetTy = I->getType(); 1189 if (!isTypeLegal(RetTy, DstVT)) 1190 return false; 1191 1192 unsigned Op = getRegForValue(I->getOperand(0)); 1193 if (Op == 0) return false; 1194 1195 unsigned Opc; 1196 const Type *OpTy = I->getOperand(0)->getType(); 1197 if (OpTy->isFloatTy()) Opc = ARM::VTOSIZS; 1198 else if (OpTy->isDoubleTy()) Opc = ARM::VTOSIZD; 1199 else return 0; 1200 1201 // f64->s32 or f32->s32 both need an intermediate f32 reg. 1202 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32)); 1203 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1204 ResultReg) 1205 .addReg(Op)); 1206 1207 // This result needs to be in an integer register, but the conversion only 1208 // takes place in fp-regs. 1209 unsigned IntReg = ARMMoveToIntReg(DstVT, ResultReg); 1210 if (IntReg == 0) return false; 1211 1212 UpdateValueMap(I, IntReg); 1213 return true; 1214} 1215 1216bool ARMFastISel::SelectSelect(const Instruction *I) { 1217 MVT VT; 1218 if (!isTypeLegal(I->getType(), VT)) 1219 return false; 1220 1221 // Things need to be register sized for register moves. 1222 if (VT != MVT::i32) return false; 1223 const TargetRegisterClass *RC = TLI.getRegClassFor(VT); 1224 1225 unsigned CondReg = getRegForValue(I->getOperand(0)); 1226 if (CondReg == 0) return false; 1227 unsigned Op1Reg = getRegForValue(I->getOperand(1)); 1228 if (Op1Reg == 0) return false; 1229 unsigned Op2Reg = getRegForValue(I->getOperand(2)); 1230 if (Op2Reg == 0) return false; 1231 1232 unsigned CmpOpc = isThumb ? ARM::t2TSTri : ARM::TSTri; 1233 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1234 .addReg(CondReg).addImm(1)); 1235 unsigned ResultReg = createResultReg(RC); 1236 unsigned MovCCOpc = isThumb ? ARM::t2MOVCCr : ARM::MOVCCr; 1237 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg) 1238 .addReg(Op1Reg).addReg(Op2Reg) 1239 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 1240 UpdateValueMap(I, ResultReg); 1241 return true; 1242} 1243 1244bool ARMFastISel::SelectSDiv(const Instruction *I) { 1245 MVT VT; 1246 const Type *Ty = I->getType(); 1247 if (!isTypeLegal(Ty, VT)) 1248 return false; 1249 1250 // If we have integer div support we should have selected this automagically. 1251 // In case we have a real miss go ahead and return false and we'll pick 1252 // it up later. 1253 if (Subtarget->hasDivide()) return false; 1254 1255 // Otherwise emit a libcall. 1256 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1257 if (VT == MVT::i8) 1258 LC = RTLIB::SDIV_I8; 1259 else if (VT == MVT::i16) 1260 LC = RTLIB::SDIV_I16; 1261 else if (VT == MVT::i32) 1262 LC = RTLIB::SDIV_I32; 1263 else if (VT == MVT::i64) 1264 LC = RTLIB::SDIV_I64; 1265 else if (VT == MVT::i128) 1266 LC = RTLIB::SDIV_I128; 1267 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SDIV!"); 1268 1269 return ARMEmitLibcall(I, LC); 1270} 1271 1272bool ARMFastISel::SelectSRem(const Instruction *I) { 1273 MVT VT; 1274 const Type *Ty = I->getType(); 1275 if (!isTypeLegal(Ty, VT)) 1276 return false; 1277 1278 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1279 if (VT == MVT::i8) 1280 LC = RTLIB::SREM_I8; 1281 else if (VT == MVT::i16) 1282 LC = RTLIB::SREM_I16; 1283 else if (VT == MVT::i32) 1284 LC = RTLIB::SREM_I32; 1285 else if (VT == MVT::i64) 1286 LC = RTLIB::SREM_I64; 1287 else if (VT == MVT::i128) 1288 LC = RTLIB::SREM_I128; 1289 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SREM!"); 1290 1291 return ARMEmitLibcall(I, LC); 1292} 1293 1294bool ARMFastISel::SelectBinaryOp(const Instruction *I, unsigned ISDOpcode) { 1295 EVT VT = TLI.getValueType(I->getType(), true); 1296 1297 // We can get here in the case when we want to use NEON for our fp 1298 // operations, but can't figure out how to. Just use the vfp instructions 1299 // if we have them. 1300 // FIXME: It'd be nice to use NEON instructions. 1301 const Type *Ty = I->getType(); 1302 bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy()); 1303 if (isFloat && !Subtarget->hasVFP2()) 1304 return false; 1305 1306 unsigned Op1 = getRegForValue(I->getOperand(0)); 1307 if (Op1 == 0) return false; 1308 1309 unsigned Op2 = getRegForValue(I->getOperand(1)); 1310 if (Op2 == 0) return false; 1311 1312 unsigned Opc; 1313 bool is64bit = VT == MVT::f64 || VT == MVT::i64; 1314 switch (ISDOpcode) { 1315 default: return false; 1316 case ISD::FADD: 1317 Opc = is64bit ? ARM::VADDD : ARM::VADDS; 1318 break; 1319 case ISD::FSUB: 1320 Opc = is64bit ? ARM::VSUBD : ARM::VSUBS; 1321 break; 1322 case ISD::FMUL: 1323 Opc = is64bit ? ARM::VMULD : ARM::VMULS; 1324 break; 1325 } 1326 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT)); 1327 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1328 TII.get(Opc), ResultReg) 1329 .addReg(Op1).addReg(Op2)); 1330 UpdateValueMap(I, ResultReg); 1331 return true; 1332} 1333 1334// Call Handling Code 1335 1336bool ARMFastISel::FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, 1337 EVT SrcVT, unsigned &ResultReg) { 1338 unsigned RR = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc, 1339 Src, /*TODO: Kill=*/false); 1340 1341 if (RR != 0) { 1342 ResultReg = RR; 1343 return true; 1344 } else 1345 return false; 1346} 1347 1348// This is largely taken directly from CCAssignFnForNode - we don't support 1349// varargs in FastISel so that part has been removed. 1350// TODO: We may not support all of this. 1351CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC, bool Return) { 1352 switch (CC) { 1353 default: 1354 llvm_unreachable("Unsupported calling convention"); 1355 case CallingConv::Fast: 1356 // Ignore fastcc. Silence compiler warnings. 1357 (void)RetFastCC_ARM_APCS; 1358 (void)FastCC_ARM_APCS; 1359 // Fallthrough 1360 case CallingConv::C: 1361 // Use target triple & subtarget features to do actual dispatch. 1362 if (Subtarget->isAAPCS_ABI()) { 1363 if (Subtarget->hasVFP2() && 1364 FloatABIType == FloatABI::Hard) 1365 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1366 else 1367 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1368 } else 1369 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1370 case CallingConv::ARM_AAPCS_VFP: 1371 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1372 case CallingConv::ARM_AAPCS: 1373 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1374 case CallingConv::ARM_APCS: 1375 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1376 } 1377} 1378 1379bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args, 1380 SmallVectorImpl<unsigned> &ArgRegs, 1381 SmallVectorImpl<MVT> &ArgVTs, 1382 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 1383 SmallVectorImpl<unsigned> &RegArgs, 1384 CallingConv::ID CC, 1385 unsigned &NumBytes) { 1386 SmallVector<CCValAssign, 16> ArgLocs; 1387 CCState CCInfo(CC, false, TM, ArgLocs, *Context); 1388 CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CCAssignFnForCall(CC, false)); 1389 1390 // Get a count of how many bytes are to be pushed on the stack. 1391 NumBytes = CCInfo.getNextStackOffset(); 1392 1393 // Issue CALLSEQ_START 1394 unsigned AdjStackDown = TM.getRegisterInfo()->getCallFrameSetupOpcode(); 1395 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1396 TII.get(AdjStackDown)) 1397 .addImm(NumBytes)); 1398 1399 // Process the args. 1400 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1401 CCValAssign &VA = ArgLocs[i]; 1402 unsigned Arg = ArgRegs[VA.getValNo()]; 1403 MVT ArgVT = ArgVTs[VA.getValNo()]; 1404 1405 // We don't handle NEON parameters yet. 1406 if (VA.getLocVT().isVector() && VA.getLocVT().getSizeInBits() > 64) 1407 return false; 1408 1409 // Handle arg promotion, etc. 1410 switch (VA.getLocInfo()) { 1411 case CCValAssign::Full: break; 1412 case CCValAssign::SExt: { 1413 bool Emitted = FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(), 1414 Arg, ArgVT, Arg); 1415 assert(Emitted && "Failed to emit a sext!"); Emitted=Emitted; 1416 Emitted = true; 1417 ArgVT = VA.getLocVT(); 1418 break; 1419 } 1420 case CCValAssign::ZExt: { 1421 bool Emitted = FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(), 1422 Arg, ArgVT, Arg); 1423 assert(Emitted && "Failed to emit a zext!"); Emitted=Emitted; 1424 Emitted = true; 1425 ArgVT = VA.getLocVT(); 1426 break; 1427 } 1428 case CCValAssign::AExt: { 1429 bool Emitted = FastEmitExtend(ISD::ANY_EXTEND, VA.getLocVT(), 1430 Arg, ArgVT, Arg); 1431 if (!Emitted) 1432 Emitted = FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(), 1433 Arg, ArgVT, Arg); 1434 if (!Emitted) 1435 Emitted = FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(), 1436 Arg, ArgVT, Arg); 1437 1438 assert(Emitted && "Failed to emit a aext!"); Emitted=Emitted; 1439 ArgVT = VA.getLocVT(); 1440 break; 1441 } 1442 case CCValAssign::BCvt: { 1443 unsigned BC = FastEmit_r(ArgVT, VA.getLocVT(), ISD::BIT_CONVERT, Arg, 1444 /*TODO: Kill=*/false); 1445 assert(BC != 0 && "Failed to emit a bitcast!"); 1446 Arg = BC; 1447 ArgVT = VA.getLocVT(); 1448 break; 1449 } 1450 default: llvm_unreachable("Unknown arg promotion!"); 1451 } 1452 1453 // Now copy/store arg to correct locations. 1454 if (VA.isRegLoc() && !VA.needsCustom()) { 1455 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1456 VA.getLocReg()) 1457 .addReg(Arg); 1458 RegArgs.push_back(VA.getLocReg()); 1459 } else if (VA.needsCustom()) { 1460 // TODO: We need custom lowering for vector (v2f64) args. 1461 if (VA.getLocVT() != MVT::f64) return false; 1462 1463 CCValAssign &NextVA = ArgLocs[++i]; 1464 1465 // TODO: Only handle register args for now. 1466 if(!(VA.isRegLoc() && NextVA.isRegLoc())) return false; 1467 1468 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1469 TII.get(ARM::VMOVRRD), VA.getLocReg()) 1470 .addReg(NextVA.getLocReg(), RegState::Define) 1471 .addReg(Arg)); 1472 RegArgs.push_back(VA.getLocReg()); 1473 RegArgs.push_back(NextVA.getLocReg()); 1474 } else { 1475 assert(VA.isMemLoc()); 1476 // Need to store on the stack. 1477 unsigned Base = ARM::SP; 1478 int Offset = VA.getLocMemOffset(); 1479 1480 if (!ARMEmitStore(ArgVT, Arg, Base, Offset)) return false; 1481 } 1482 } 1483 return true; 1484} 1485 1486bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 1487 const Instruction *I, CallingConv::ID CC, 1488 unsigned &NumBytes) { 1489 // Issue CALLSEQ_END 1490 unsigned AdjStackUp = TM.getRegisterInfo()->getCallFrameDestroyOpcode(); 1491 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1492 TII.get(AdjStackUp)) 1493 .addImm(NumBytes).addImm(0)); 1494 1495 // Now the return value. 1496 if (RetVT != MVT::isVoid) { 1497 SmallVector<CCValAssign, 16> RVLocs; 1498 CCState CCInfo(CC, false, TM, RVLocs, *Context); 1499 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true)); 1500 1501 // Copy all of the result registers out of their specified physreg. 1502 if (RVLocs.size() == 2 && RetVT == MVT::f64) { 1503 // For this move we copy into two registers and then move into the 1504 // double fp reg we want. 1505 EVT DestVT = RVLocs[0].getValVT(); 1506 TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT); 1507 unsigned ResultReg = createResultReg(DstRC); 1508 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1509 TII.get(ARM::VMOVDRR), ResultReg) 1510 .addReg(RVLocs[0].getLocReg()) 1511 .addReg(RVLocs[1].getLocReg())); 1512 1513 UsedRegs.push_back(RVLocs[0].getLocReg()); 1514 UsedRegs.push_back(RVLocs[1].getLocReg()); 1515 1516 // Finally update the result. 1517 UpdateValueMap(I, ResultReg); 1518 } else { 1519 assert(RVLocs.size() == 1 &&"Can't handle non-double multi-reg retvals!"); 1520 EVT CopyVT = RVLocs[0].getValVT(); 1521 TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT); 1522 1523 unsigned ResultReg = createResultReg(DstRC); 1524 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1525 ResultReg).addReg(RVLocs[0].getLocReg()); 1526 UsedRegs.push_back(RVLocs[0].getLocReg()); 1527 1528 // Finally update the result. 1529 UpdateValueMap(I, ResultReg); 1530 } 1531 } 1532 1533 return true; 1534} 1535 1536bool ARMFastISel::SelectRet(const Instruction *I) { 1537 const ReturnInst *Ret = cast<ReturnInst>(I); 1538 const Function &F = *I->getParent()->getParent(); 1539 1540 if (!FuncInfo.CanLowerReturn) 1541 return false; 1542 1543 if (F.isVarArg()) 1544 return false; 1545 1546 CallingConv::ID CC = F.getCallingConv(); 1547 if (Ret->getNumOperands() > 0) { 1548 SmallVector<ISD::OutputArg, 4> Outs; 1549 GetReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(), 1550 Outs, TLI); 1551 1552 // Analyze operands of the call, assigning locations to each operand. 1553 SmallVector<CCValAssign, 16> ValLocs; 1554 CCState CCInfo(CC, F.isVarArg(), TM, ValLocs, I->getContext()); 1555 CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC, true /* is Ret */)); 1556 1557 const Value *RV = Ret->getOperand(0); 1558 unsigned Reg = getRegForValue(RV); 1559 if (Reg == 0) 1560 return false; 1561 1562 // Only handle a single return value for now. 1563 if (ValLocs.size() != 1) 1564 return false; 1565 1566 CCValAssign &VA = ValLocs[0]; 1567 1568 // Don't bother handling odd stuff for now. 1569 if (VA.getLocInfo() != CCValAssign::Full) 1570 return false; 1571 // Only handle register returns for now. 1572 if (!VA.isRegLoc()) 1573 return false; 1574 // TODO: For now, don't try to handle cases where getLocInfo() 1575 // says Full but the types don't match. 1576 if (TLI.getValueType(RV->getType()) != VA.getValVT()) 1577 return false; 1578 1579 // Make the copy. 1580 unsigned SrcReg = Reg + VA.getValNo(); 1581 unsigned DstReg = VA.getLocReg(); 1582 const TargetRegisterClass* SrcRC = MRI.getRegClass(SrcReg); 1583 // Avoid a cross-class copy. This is very unlikely. 1584 if (!SrcRC->contains(DstReg)) 1585 return false; 1586 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1587 DstReg).addReg(SrcReg); 1588 1589 // Mark the register as live out of the function. 1590 MRI.addLiveOut(VA.getLocReg()); 1591 } 1592 1593 unsigned RetOpc = isThumb ? ARM::tBX_RET : ARM::BX_RET; 1594 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1595 TII.get(RetOpc))); 1596 return true; 1597} 1598 1599// A quick function that will emit a call for a named libcall in F with the 1600// vector of passed arguments for the Instruction in I. We can assume that we 1601// can emit a call for any libcall we can produce. This is an abridged version 1602// of the full call infrastructure since we won't need to worry about things 1603// like computed function pointers or strange arguments at call sites. 1604// TODO: Try to unify this and the normal call bits for ARM, then try to unify 1605// with X86. 1606bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) { 1607 CallingConv::ID CC = TLI.getLibcallCallingConv(Call); 1608 1609 // Handle *simple* calls for now. 1610 const Type *RetTy = I->getType(); 1611 MVT RetVT; 1612 if (RetTy->isVoidTy()) 1613 RetVT = MVT::isVoid; 1614 else if (!isTypeLegal(RetTy, RetVT)) 1615 return false; 1616 1617 // For now we're using BLX etc on the assumption that we have v5t ops. 1618 if (!Subtarget->hasV5TOps()) return false; 1619 1620 // Set up the argument vectors. 1621 SmallVector<Value*, 8> Args; 1622 SmallVector<unsigned, 8> ArgRegs; 1623 SmallVector<MVT, 8> ArgVTs; 1624 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 1625 Args.reserve(I->getNumOperands()); 1626 ArgRegs.reserve(I->getNumOperands()); 1627 ArgVTs.reserve(I->getNumOperands()); 1628 ArgFlags.reserve(I->getNumOperands()); 1629 for (unsigned i = 0; i < I->getNumOperands(); ++i) { 1630 Value *Op = I->getOperand(i); 1631 unsigned Arg = getRegForValue(Op); 1632 if (Arg == 0) return false; 1633 1634 const Type *ArgTy = Op->getType(); 1635 MVT ArgVT; 1636 if (!isTypeLegal(ArgTy, ArgVT)) return false; 1637 1638 ISD::ArgFlagsTy Flags; 1639 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 1640 Flags.setOrigAlign(OriginalAlignment); 1641 1642 Args.push_back(Op); 1643 ArgRegs.push_back(Arg); 1644 ArgVTs.push_back(ArgVT); 1645 ArgFlags.push_back(Flags); 1646 } 1647 1648 // Handle the arguments now that we've gotten them. 1649 SmallVector<unsigned, 4> RegArgs; 1650 unsigned NumBytes; 1651 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes)) 1652 return false; 1653 1654 // Issue the call, BLXr9 for darwin, BLX otherwise. This uses V5 ops. 1655 // TODO: Turn this into the table of arm call ops. 1656 MachineInstrBuilder MIB; 1657 unsigned CallOpc; 1658 if(isThumb) 1659 CallOpc = Subtarget->isTargetDarwin() ? ARM::tBLXi_r9 : ARM::tBLXi; 1660 else 1661 CallOpc = Subtarget->isTargetDarwin() ? ARM::BLr9 : ARM::BL; 1662 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CallOpc)) 1663 .addExternalSymbol(TLI.getLibcallName(Call)); 1664 1665 // Add implicit physical register uses to the call. 1666 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 1667 MIB.addReg(RegArgs[i]); 1668 1669 // Finish off the call including any return values. 1670 SmallVector<unsigned, 4> UsedRegs; 1671 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes)) return false; 1672 1673 // Set all unused physreg defs as dead. 1674 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 1675 1676 return true; 1677} 1678 1679bool ARMFastISel::SelectCall(const Instruction *I) { 1680 const CallInst *CI = cast<CallInst>(I); 1681 const Value *Callee = CI->getCalledValue(); 1682 1683 // Can't handle inline asm or worry about intrinsics yet. 1684 if (isa<InlineAsm>(Callee) || isa<IntrinsicInst>(CI)) return false; 1685 1686 // Only handle global variable Callees that are direct calls. 1687 const GlobalValue *GV = dyn_cast<GlobalValue>(Callee); 1688 if (!GV || Subtarget->GVIsIndirectSymbol(GV, TM.getRelocationModel())) 1689 return false; 1690 1691 // Check the calling convention. 1692 ImmutableCallSite CS(CI); 1693 CallingConv::ID CC = CS.getCallingConv(); 1694 1695 // TODO: Avoid some calling conventions? 1696 1697 // Let SDISel handle vararg functions. 1698 const PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType()); 1699 const FunctionType *FTy = cast<FunctionType>(PT->getElementType()); 1700 if (FTy->isVarArg()) 1701 return false; 1702 1703 // Handle *simple* calls for now. 1704 const Type *RetTy = I->getType(); 1705 MVT RetVT; 1706 if (RetTy->isVoidTy()) 1707 RetVT = MVT::isVoid; 1708 else if (!isTypeLegal(RetTy, RetVT)) 1709 return false; 1710 1711 // For now we're using BLX etc on the assumption that we have v5t ops. 1712 // TODO: Maybe? 1713 if (!Subtarget->hasV5TOps()) return false; 1714 1715 // Set up the argument vectors. 1716 SmallVector<Value*, 8> Args; 1717 SmallVector<unsigned, 8> ArgRegs; 1718 SmallVector<MVT, 8> ArgVTs; 1719 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 1720 Args.reserve(CS.arg_size()); 1721 ArgRegs.reserve(CS.arg_size()); 1722 ArgVTs.reserve(CS.arg_size()); 1723 ArgFlags.reserve(CS.arg_size()); 1724 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end(); 1725 i != e; ++i) { 1726 unsigned Arg = getRegForValue(*i); 1727 1728 if (Arg == 0) 1729 return false; 1730 ISD::ArgFlagsTy Flags; 1731 unsigned AttrInd = i - CS.arg_begin() + 1; 1732 if (CS.paramHasAttr(AttrInd, Attribute::SExt)) 1733 Flags.setSExt(); 1734 if (CS.paramHasAttr(AttrInd, Attribute::ZExt)) 1735 Flags.setZExt(); 1736 1737 // FIXME: Only handle *easy* calls for now. 1738 if (CS.paramHasAttr(AttrInd, Attribute::InReg) || 1739 CS.paramHasAttr(AttrInd, Attribute::StructRet) || 1740 CS.paramHasAttr(AttrInd, Attribute::Nest) || 1741 CS.paramHasAttr(AttrInd, Attribute::ByVal)) 1742 return false; 1743 1744 const Type *ArgTy = (*i)->getType(); 1745 MVT ArgVT; 1746 if (!isTypeLegal(ArgTy, ArgVT)) 1747 return false; 1748 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 1749 Flags.setOrigAlign(OriginalAlignment); 1750 1751 Args.push_back(*i); 1752 ArgRegs.push_back(Arg); 1753 ArgVTs.push_back(ArgVT); 1754 ArgFlags.push_back(Flags); 1755 } 1756 1757 // Handle the arguments now that we've gotten them. 1758 SmallVector<unsigned, 4> RegArgs; 1759 unsigned NumBytes; 1760 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes)) 1761 return false; 1762 1763 // Issue the call, BLXr9 for darwin, BLX otherwise. This uses V5 ops. 1764 // TODO: Turn this into the table of arm call ops. 1765 MachineInstrBuilder MIB; 1766 unsigned CallOpc; 1767 if(isThumb) 1768 CallOpc = Subtarget->isTargetDarwin() ? ARM::tBLXi_r9 : ARM::tBLXi; 1769 else 1770 CallOpc = Subtarget->isTargetDarwin() ? ARM::BLr9 : ARM::BL; 1771 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CallOpc)) 1772 .addGlobalAddress(GV, 0, 0); 1773 1774 // Add implicit physical register uses to the call. 1775 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 1776 MIB.addReg(RegArgs[i]); 1777 1778 // Finish off the call including any return values. 1779 SmallVector<unsigned, 4> UsedRegs; 1780 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes)) return false; 1781 1782 // Set all unused physreg defs as dead. 1783 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 1784 1785 return true; 1786 1787} 1788 1789// TODO: SoftFP support. 1790bool ARMFastISel::TargetSelectInstruction(const Instruction *I) { 1791 1792 switch (I->getOpcode()) { 1793 case Instruction::Load: 1794 return SelectLoad(I); 1795 case Instruction::Store: 1796 return SelectStore(I); 1797 case Instruction::Br: 1798 return SelectBranch(I); 1799 case Instruction::ICmp: 1800 case Instruction::FCmp: 1801 return SelectCmp(I); 1802 case Instruction::FPExt: 1803 return SelectFPExt(I); 1804 case Instruction::FPTrunc: 1805 return SelectFPTrunc(I); 1806 case Instruction::SIToFP: 1807 return SelectSIToFP(I); 1808 case Instruction::FPToSI: 1809 return SelectFPToSI(I); 1810 case Instruction::FAdd: 1811 return SelectBinaryOp(I, ISD::FADD); 1812 case Instruction::FSub: 1813 return SelectBinaryOp(I, ISD::FSUB); 1814 case Instruction::FMul: 1815 return SelectBinaryOp(I, ISD::FMUL); 1816 case Instruction::SDiv: 1817 return SelectSDiv(I); 1818 case Instruction::SRem: 1819 return SelectSRem(I); 1820 case Instruction::Call: 1821 return SelectCall(I); 1822 case Instruction::Select: 1823 return SelectSelect(I); 1824 case Instruction::Ret: 1825 return SelectRet(I); 1826 default: break; 1827 } 1828 return false; 1829} 1830 1831namespace llvm { 1832 llvm::FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo) { 1833 // Completely untested on non-darwin. 1834 const TargetMachine &TM = funcInfo.MF->getTarget(); 1835 1836 // Darwin and thumb1 only for now. 1837 const ARMSubtarget *Subtarget = &TM.getSubtarget<ARMSubtarget>(); 1838 if (Subtarget->isTargetDarwin() && !Subtarget->isThumb1Only() && 1839 !DisableARMFastISel) 1840 return new ARMFastISel(funcInfo); 1841 return 0; 1842 } 1843} 1844