ARMFastISel.cpp revision 52f6c03a450aeed93b97cad9e5373029ebcad5e7
1//===-- ARMFastISel.cpp - ARM FastISel implementation ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the ARM-specific support for the FastISel class. Some 11// of the target-specific code is generated by tablegen in the file 12// ARMGenFastISel.inc, which is #included here. 13// 14//===----------------------------------------------------------------------===// 15 16#include "ARM.h" 17#include "ARMAddressingModes.h" 18#include "ARMBaseInstrInfo.h" 19#include "ARMCallingConv.h" 20#include "ARMRegisterInfo.h" 21#include "ARMTargetMachine.h" 22#include "ARMSubtarget.h" 23#include "ARMConstantPoolValue.h" 24#include "llvm/CallingConv.h" 25#include "llvm/DerivedTypes.h" 26#include "llvm/GlobalVariable.h" 27#include "llvm/Instructions.h" 28#include "llvm/IntrinsicInst.h" 29#include "llvm/Module.h" 30#include "llvm/Operator.h" 31#include "llvm/CodeGen/Analysis.h" 32#include "llvm/CodeGen/FastISel.h" 33#include "llvm/CodeGen/FunctionLoweringInfo.h" 34#include "llvm/CodeGen/MachineInstrBuilder.h" 35#include "llvm/CodeGen/MachineModuleInfo.h" 36#include "llvm/CodeGen/MachineConstantPool.h" 37#include "llvm/CodeGen/MachineFrameInfo.h" 38#include "llvm/CodeGen/MachineMemOperand.h" 39#include "llvm/CodeGen/MachineRegisterInfo.h" 40#include "llvm/CodeGen/PseudoSourceValue.h" 41#include "llvm/Support/CallSite.h" 42#include "llvm/Support/CommandLine.h" 43#include "llvm/Support/ErrorHandling.h" 44#include "llvm/Support/GetElementPtrTypeIterator.h" 45#include "llvm/Target/TargetData.h" 46#include "llvm/Target/TargetInstrInfo.h" 47#include "llvm/Target/TargetLowering.h" 48#include "llvm/Target/TargetMachine.h" 49#include "llvm/Target/TargetOptions.h" 50using namespace llvm; 51 52static cl::opt<bool> 53DisableARMFastISel("disable-arm-fast-isel", 54 cl::desc("Turn off experimental ARM fast-isel support"), 55 cl::init(false), cl::Hidden); 56 57extern cl::opt<bool> EnableARMLongCalls; 58 59namespace { 60 61 // All possible address modes, plus some. 62 typedef struct Address { 63 enum { 64 RegBase, 65 FrameIndexBase 66 } BaseType; 67 68 union { 69 unsigned Reg; 70 int FI; 71 } Base; 72 73 int Offset; 74 unsigned Scale; 75 unsigned PlusReg; 76 77 // Innocuous defaults for our address. 78 Address() 79 : BaseType(RegBase), Offset(0), Scale(0), PlusReg(0) { 80 Base.Reg = 0; 81 } 82 } Address; 83 84class ARMFastISel : public FastISel { 85 86 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can 87 /// make the right decision when generating code for different targets. 88 const ARMSubtarget *Subtarget; 89 const TargetMachine &TM; 90 const TargetInstrInfo &TII; 91 const TargetLowering &TLI; 92 ARMFunctionInfo *AFI; 93 94 // Convenience variables to avoid some queries. 95 bool isThumb; 96 LLVMContext *Context; 97 98 public: 99 explicit ARMFastISel(FunctionLoweringInfo &funcInfo) 100 : FastISel(funcInfo), 101 TM(funcInfo.MF->getTarget()), 102 TII(*TM.getInstrInfo()), 103 TLI(*TM.getTargetLowering()) { 104 Subtarget = &TM.getSubtarget<ARMSubtarget>(); 105 AFI = funcInfo.MF->getInfo<ARMFunctionInfo>(); 106 isThumb = AFI->isThumbFunction(); 107 Context = &funcInfo.Fn->getContext(); 108 } 109 110 // Code from FastISel.cpp. 111 virtual unsigned FastEmitInst_(unsigned MachineInstOpcode, 112 const TargetRegisterClass *RC); 113 virtual unsigned FastEmitInst_r(unsigned MachineInstOpcode, 114 const TargetRegisterClass *RC, 115 unsigned Op0, bool Op0IsKill); 116 virtual unsigned FastEmitInst_rr(unsigned MachineInstOpcode, 117 const TargetRegisterClass *RC, 118 unsigned Op0, bool Op0IsKill, 119 unsigned Op1, bool Op1IsKill); 120 virtual unsigned FastEmitInst_rrr(unsigned MachineInstOpcode, 121 const TargetRegisterClass *RC, 122 unsigned Op0, bool Op0IsKill, 123 unsigned Op1, bool Op1IsKill, 124 unsigned Op2, bool Op2IsKill); 125 virtual unsigned FastEmitInst_ri(unsigned MachineInstOpcode, 126 const TargetRegisterClass *RC, 127 unsigned Op0, bool Op0IsKill, 128 uint64_t Imm); 129 virtual unsigned FastEmitInst_rf(unsigned MachineInstOpcode, 130 const TargetRegisterClass *RC, 131 unsigned Op0, bool Op0IsKill, 132 const ConstantFP *FPImm); 133 virtual unsigned FastEmitInst_rri(unsigned MachineInstOpcode, 134 const TargetRegisterClass *RC, 135 unsigned Op0, bool Op0IsKill, 136 unsigned Op1, bool Op1IsKill, 137 uint64_t Imm); 138 virtual unsigned FastEmitInst_i(unsigned MachineInstOpcode, 139 const TargetRegisterClass *RC, 140 uint64_t Imm); 141 virtual unsigned FastEmitInst_ii(unsigned MachineInstOpcode, 142 const TargetRegisterClass *RC, 143 uint64_t Imm1, uint64_t Imm2); 144 145 virtual unsigned FastEmitInst_extractsubreg(MVT RetVT, 146 unsigned Op0, bool Op0IsKill, 147 uint32_t Idx); 148 149 // Backend specific FastISel code. 150 virtual bool TargetSelectInstruction(const Instruction *I); 151 virtual unsigned TargetMaterializeConstant(const Constant *C); 152 virtual unsigned TargetMaterializeAlloca(const AllocaInst *AI); 153 154 #include "ARMGenFastISel.inc" 155 156 // Instruction selection routines. 157 private: 158 bool SelectLoad(const Instruction *I); 159 bool SelectStore(const Instruction *I); 160 bool SelectBranch(const Instruction *I); 161 bool SelectCmp(const Instruction *I); 162 bool SelectFPExt(const Instruction *I); 163 bool SelectFPTrunc(const Instruction *I); 164 bool SelectBinaryOp(const Instruction *I, unsigned ISDOpcode); 165 bool SelectSIToFP(const Instruction *I); 166 bool SelectFPToSI(const Instruction *I); 167 bool SelectSDiv(const Instruction *I); 168 bool SelectSRem(const Instruction *I); 169 bool SelectCall(const Instruction *I); 170 bool SelectSelect(const Instruction *I); 171 bool SelectRet(const Instruction *I); 172 173 // Utility routines. 174 private: 175 bool isTypeLegal(const Type *Ty, MVT &VT); 176 bool isLoadTypeLegal(const Type *Ty, MVT &VT); 177 bool ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr); 178 bool ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr); 179 bool ARMComputeAddress(const Value *Obj, Address &Addr); 180 void ARMSimplifyAddress(Address &Addr, EVT VT); 181 unsigned ARMMaterializeFP(const ConstantFP *CFP, EVT VT); 182 unsigned ARMMaterializeInt(const Constant *C, EVT VT); 183 unsigned ARMMaterializeGV(const GlobalValue *GV, EVT VT); 184 unsigned ARMMoveToFPReg(EVT VT, unsigned SrcReg); 185 unsigned ARMMoveToIntReg(EVT VT, unsigned SrcReg); 186 unsigned ARMSelectCallOp(const GlobalValue *GV); 187 188 // Call handling routines. 189 private: 190 bool FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, EVT SrcVT, 191 unsigned &ResultReg); 192 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool Return); 193 bool ProcessCallArgs(SmallVectorImpl<Value*> &Args, 194 SmallVectorImpl<unsigned> &ArgRegs, 195 SmallVectorImpl<MVT> &ArgVTs, 196 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 197 SmallVectorImpl<unsigned> &RegArgs, 198 CallingConv::ID CC, 199 unsigned &NumBytes); 200 bool FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 201 const Instruction *I, CallingConv::ID CC, 202 unsigned &NumBytes); 203 bool ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call); 204 205 // OptionalDef handling routines. 206 private: 207 bool isARMNEONPred(const MachineInstr *MI); 208 bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR); 209 const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB); 210 void AddLoadStoreOperands(EVT VT, Address &Addr, 211 const MachineInstrBuilder &MIB); 212}; 213 214} // end anonymous namespace 215 216#include "ARMGenCallingConv.inc" 217 218// DefinesOptionalPredicate - This is different from DefinesPredicate in that 219// we don't care about implicit defs here, just places we'll need to add a 220// default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR. 221bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) { 222 const TargetInstrDesc &TID = MI->getDesc(); 223 if (!TID.hasOptionalDef()) 224 return false; 225 226 // Look to see if our OptionalDef is defining CPSR or CCR. 227 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 228 const MachineOperand &MO = MI->getOperand(i); 229 if (!MO.isReg() || !MO.isDef()) continue; 230 if (MO.getReg() == ARM::CPSR) 231 *CPSR = true; 232 } 233 return true; 234} 235 236bool ARMFastISel::isARMNEONPred(const MachineInstr *MI) { 237 const TargetInstrDesc &TID = MI->getDesc(); 238 239 // If we're a thumb2 or not NEON function we were handled via isPredicable. 240 if ((TID.TSFlags & ARMII::DomainMask) != ARMII::DomainNEON || 241 AFI->isThumb2Function()) 242 return false; 243 244 for (unsigned i = 0, e = TID.getNumOperands(); i != e; ++i) 245 if (TID.OpInfo[i].isPredicate()) 246 return true; 247 248 return false; 249} 250 251// If the machine is predicable go ahead and add the predicate operands, if 252// it needs default CC operands add those. 253// TODO: If we want to support thumb1 then we'll need to deal with optional 254// CPSR defs that need to be added before the remaining operands. See s_cc_out 255// for descriptions why. 256const MachineInstrBuilder & 257ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) { 258 MachineInstr *MI = &*MIB; 259 260 // Do we use a predicate? or... 261 // Are we NEON in ARM mode and have a predicate operand? If so, I know 262 // we're not predicable but add it anyways. 263 if (TII.isPredicable(MI) || isARMNEONPred(MI)) 264 AddDefaultPred(MIB); 265 266 // Do we optionally set a predicate? Preds is size > 0 iff the predicate 267 // defines CPSR. All other OptionalDefines in ARM are the CCR register. 268 bool CPSR = false; 269 if (DefinesOptionalPredicate(MI, &CPSR)) { 270 if (CPSR) 271 AddDefaultT1CC(MIB); 272 else 273 AddDefaultCC(MIB); 274 } 275 return MIB; 276} 277 278unsigned ARMFastISel::FastEmitInst_(unsigned MachineInstOpcode, 279 const TargetRegisterClass* RC) { 280 unsigned ResultReg = createResultReg(RC); 281 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 282 283 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)); 284 return ResultReg; 285} 286 287unsigned ARMFastISel::FastEmitInst_r(unsigned MachineInstOpcode, 288 const TargetRegisterClass *RC, 289 unsigned Op0, bool Op0IsKill) { 290 unsigned ResultReg = createResultReg(RC); 291 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 292 293 if (II.getNumDefs() >= 1) 294 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 295 .addReg(Op0, Op0IsKill * RegState::Kill)); 296 else { 297 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 298 .addReg(Op0, Op0IsKill * RegState::Kill)); 299 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 300 TII.get(TargetOpcode::COPY), ResultReg) 301 .addReg(II.ImplicitDefs[0])); 302 } 303 return ResultReg; 304} 305 306unsigned ARMFastISel::FastEmitInst_rr(unsigned MachineInstOpcode, 307 const TargetRegisterClass *RC, 308 unsigned Op0, bool Op0IsKill, 309 unsigned Op1, bool Op1IsKill) { 310 unsigned ResultReg = createResultReg(RC); 311 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 312 313 if (II.getNumDefs() >= 1) 314 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 315 .addReg(Op0, Op0IsKill * RegState::Kill) 316 .addReg(Op1, Op1IsKill * RegState::Kill)); 317 else { 318 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 319 .addReg(Op0, Op0IsKill * RegState::Kill) 320 .addReg(Op1, Op1IsKill * RegState::Kill)); 321 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 322 TII.get(TargetOpcode::COPY), ResultReg) 323 .addReg(II.ImplicitDefs[0])); 324 } 325 return ResultReg; 326} 327 328unsigned ARMFastISel::FastEmitInst_rrr(unsigned MachineInstOpcode, 329 const TargetRegisterClass *RC, 330 unsigned Op0, bool Op0IsKill, 331 unsigned Op1, bool Op1IsKill, 332 unsigned Op2, bool Op2IsKill) { 333 unsigned ResultReg = createResultReg(RC); 334 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 335 336 if (II.getNumDefs() >= 1) 337 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 338 .addReg(Op0, Op0IsKill * RegState::Kill) 339 .addReg(Op1, Op1IsKill * RegState::Kill) 340 .addReg(Op2, Op2IsKill * RegState::Kill)); 341 else { 342 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 343 .addReg(Op0, Op0IsKill * RegState::Kill) 344 .addReg(Op1, Op1IsKill * RegState::Kill) 345 .addReg(Op2, Op2IsKill * RegState::Kill)); 346 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 347 TII.get(TargetOpcode::COPY), ResultReg) 348 .addReg(II.ImplicitDefs[0])); 349 } 350 return ResultReg; 351} 352 353unsigned ARMFastISel::FastEmitInst_ri(unsigned MachineInstOpcode, 354 const TargetRegisterClass *RC, 355 unsigned Op0, bool Op0IsKill, 356 uint64_t Imm) { 357 unsigned ResultReg = createResultReg(RC); 358 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 359 360 if (II.getNumDefs() >= 1) 361 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 362 .addReg(Op0, Op0IsKill * RegState::Kill) 363 .addImm(Imm)); 364 else { 365 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 366 .addReg(Op0, Op0IsKill * RegState::Kill) 367 .addImm(Imm)); 368 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 369 TII.get(TargetOpcode::COPY), ResultReg) 370 .addReg(II.ImplicitDefs[0])); 371 } 372 return ResultReg; 373} 374 375unsigned ARMFastISel::FastEmitInst_rf(unsigned MachineInstOpcode, 376 const TargetRegisterClass *RC, 377 unsigned Op0, bool Op0IsKill, 378 const ConstantFP *FPImm) { 379 unsigned ResultReg = createResultReg(RC); 380 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 381 382 if (II.getNumDefs() >= 1) 383 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 384 .addReg(Op0, Op0IsKill * RegState::Kill) 385 .addFPImm(FPImm)); 386 else { 387 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 388 .addReg(Op0, Op0IsKill * RegState::Kill) 389 .addFPImm(FPImm)); 390 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 391 TII.get(TargetOpcode::COPY), ResultReg) 392 .addReg(II.ImplicitDefs[0])); 393 } 394 return ResultReg; 395} 396 397unsigned ARMFastISel::FastEmitInst_rri(unsigned MachineInstOpcode, 398 const TargetRegisterClass *RC, 399 unsigned Op0, bool Op0IsKill, 400 unsigned Op1, bool Op1IsKill, 401 uint64_t Imm) { 402 unsigned ResultReg = createResultReg(RC); 403 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 404 405 if (II.getNumDefs() >= 1) 406 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 407 .addReg(Op0, Op0IsKill * RegState::Kill) 408 .addReg(Op1, Op1IsKill * RegState::Kill) 409 .addImm(Imm)); 410 else { 411 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 412 .addReg(Op0, Op0IsKill * RegState::Kill) 413 .addReg(Op1, Op1IsKill * RegState::Kill) 414 .addImm(Imm)); 415 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 416 TII.get(TargetOpcode::COPY), ResultReg) 417 .addReg(II.ImplicitDefs[0])); 418 } 419 return ResultReg; 420} 421 422unsigned ARMFastISel::FastEmitInst_i(unsigned MachineInstOpcode, 423 const TargetRegisterClass *RC, 424 uint64_t Imm) { 425 unsigned ResultReg = createResultReg(RC); 426 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 427 428 if (II.getNumDefs() >= 1) 429 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 430 .addImm(Imm)); 431 else { 432 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 433 .addImm(Imm)); 434 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 435 TII.get(TargetOpcode::COPY), ResultReg) 436 .addReg(II.ImplicitDefs[0])); 437 } 438 return ResultReg; 439} 440 441unsigned ARMFastISel::FastEmitInst_ii(unsigned MachineInstOpcode, 442 const TargetRegisterClass *RC, 443 uint64_t Imm1, uint64_t Imm2) { 444 unsigned ResultReg = createResultReg(RC); 445 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 446 447 if (II.getNumDefs() >= 1) 448 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 449 .addImm(Imm1).addImm(Imm2)); 450 else { 451 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 452 .addImm(Imm1).addImm(Imm2)); 453 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 454 TII.get(TargetOpcode::COPY), 455 ResultReg) 456 .addReg(II.ImplicitDefs[0])); 457 } 458 return ResultReg; 459} 460 461unsigned ARMFastISel::FastEmitInst_extractsubreg(MVT RetVT, 462 unsigned Op0, bool Op0IsKill, 463 uint32_t Idx) { 464 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT)); 465 assert(TargetRegisterInfo::isVirtualRegister(Op0) && 466 "Cannot yet extract from physregs"); 467 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 468 DL, TII.get(TargetOpcode::COPY), ResultReg) 469 .addReg(Op0, getKillRegState(Op0IsKill), Idx)); 470 return ResultReg; 471} 472 473// TODO: Don't worry about 64-bit now, but when this is fixed remove the 474// checks from the various callers. 475unsigned ARMFastISel::ARMMoveToFPReg(EVT VT, unsigned SrcReg) { 476 if (VT == MVT::f64) return 0; 477 478 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 479 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 480 TII.get(ARM::VMOVRS), MoveReg) 481 .addReg(SrcReg)); 482 return MoveReg; 483} 484 485unsigned ARMFastISel::ARMMoveToIntReg(EVT VT, unsigned SrcReg) { 486 if (VT == MVT::i64) return 0; 487 488 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 489 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 490 TII.get(ARM::VMOVSR), MoveReg) 491 .addReg(SrcReg)); 492 return MoveReg; 493} 494 495// For double width floating point we need to materialize two constants 496// (the high and the low) into integer registers then use a move to get 497// the combined constant into an FP reg. 498unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, EVT VT) { 499 const APFloat Val = CFP->getValueAPF(); 500 bool is64bit = VT == MVT::f64; 501 502 // This checks to see if we can use VFP3 instructions to materialize 503 // a constant, otherwise we have to go through the constant pool. 504 if (TLI.isFPImmLegal(Val, VT)) { 505 unsigned Opc = is64bit ? ARM::FCONSTD : ARM::FCONSTS; 506 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 507 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 508 DestReg) 509 .addFPImm(CFP)); 510 return DestReg; 511 } 512 513 // Require VFP2 for loading fp constants. 514 if (!Subtarget->hasVFP2()) return false; 515 516 // MachineConstantPool wants an explicit alignment. 517 unsigned Align = TD.getPrefTypeAlignment(CFP->getType()); 518 if (Align == 0) { 519 // TODO: Figure out if this is correct. 520 Align = TD.getTypeAllocSize(CFP->getType()); 521 } 522 unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align); 523 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 524 unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS; 525 526 // The extra reg is for addrmode5. 527 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 528 DestReg) 529 .addConstantPoolIndex(Idx) 530 .addReg(0)); 531 return DestReg; 532} 533 534unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, EVT VT) { 535 536 // For now 32-bit only. 537 if (VT != MVT::i32) return false; 538 539 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 540 541 // If we can do this in a single instruction without a constant pool entry 542 // do so now. 543 const ConstantInt *CI = cast<ConstantInt>(C); 544 if (Subtarget->hasV6T2Ops() && isUInt<16>(CI->getSExtValue())) { 545 unsigned Opc = isThumb ? ARM::t2MOVi16 : ARM::MOVi16; 546 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 547 TII.get(Opc), DestReg) 548 .addImm(CI->getSExtValue())); 549 return DestReg; 550 } 551 552 // MachineConstantPool wants an explicit alignment. 553 unsigned Align = TD.getPrefTypeAlignment(C->getType()); 554 if (Align == 0) { 555 // TODO: Figure out if this is correct. 556 Align = TD.getTypeAllocSize(C->getType()); 557 } 558 unsigned Idx = MCP.getConstantPoolIndex(C, Align); 559 560 if (isThumb) 561 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 562 TII.get(ARM::t2LDRpci), DestReg) 563 .addConstantPoolIndex(Idx)); 564 else 565 // The extra immediate is for addrmode2. 566 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 567 TII.get(ARM::LDRcp), DestReg) 568 .addConstantPoolIndex(Idx) 569 .addImm(0)); 570 571 return DestReg; 572} 573 574unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, EVT VT) { 575 // For now 32-bit only. 576 if (VT != MVT::i32) return 0; 577 578 Reloc::Model RelocM = TM.getRelocationModel(); 579 580 // TODO: No external globals for now. 581 if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) return 0; 582 583 // TODO: Need more magic for ARM PIC. 584 if (!isThumb && (RelocM == Reloc::PIC_)) return 0; 585 586 // MachineConstantPool wants an explicit alignment. 587 unsigned Align = TD.getPrefTypeAlignment(GV->getType()); 588 if (Align == 0) { 589 // TODO: Figure out if this is correct. 590 Align = TD.getTypeAllocSize(GV->getType()); 591 } 592 593 // Grab index. 594 unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : (Subtarget->isThumb() ? 4 : 8); 595 unsigned Id = AFI->createPICLabelUId(); 596 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, Id, 597 ARMCP::CPValue, PCAdj); 598 unsigned Idx = MCP.getConstantPoolIndex(CPV, Align); 599 600 // Load value. 601 MachineInstrBuilder MIB; 602 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 603 if (isThumb) { 604 unsigned Opc = (RelocM != Reloc::PIC_) ? ARM::t2LDRpci : ARM::t2LDRpci_pic; 605 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg) 606 .addConstantPoolIndex(Idx); 607 if (RelocM == Reloc::PIC_) 608 MIB.addImm(Id); 609 } else { 610 // The extra immediate is for addrmode2. 611 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRcp), 612 DestReg) 613 .addConstantPoolIndex(Idx) 614 .addImm(0); 615 } 616 AddOptionalDefs(MIB); 617 return DestReg; 618} 619 620unsigned ARMFastISel::TargetMaterializeConstant(const Constant *C) { 621 EVT VT = TLI.getValueType(C->getType(), true); 622 623 // Only handle simple types. 624 if (!VT.isSimple()) return 0; 625 626 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) 627 return ARMMaterializeFP(CFP, VT); 628 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C)) 629 return ARMMaterializeGV(GV, VT); 630 else if (isa<ConstantInt>(C)) 631 return ARMMaterializeInt(C, VT); 632 633 return 0; 634} 635 636unsigned ARMFastISel::TargetMaterializeAlloca(const AllocaInst *AI) { 637 // Don't handle dynamic allocas. 638 if (!FuncInfo.StaticAllocaMap.count(AI)) return 0; 639 640 MVT VT; 641 if (!isLoadTypeLegal(AI->getType(), VT)) return false; 642 643 DenseMap<const AllocaInst*, int>::iterator SI = 644 FuncInfo.StaticAllocaMap.find(AI); 645 646 // This will get lowered later into the correct offsets and registers 647 // via rewriteXFrameIndex. 648 if (SI != FuncInfo.StaticAllocaMap.end()) { 649 TargetRegisterClass* RC = TLI.getRegClassFor(VT); 650 unsigned ResultReg = createResultReg(RC); 651 unsigned Opc = isThumb ? ARM::t2ADDri : ARM::ADDri; 652 AddOptionalDefs(BuildMI(*FuncInfo.MBB, *FuncInfo.InsertPt, DL, 653 TII.get(Opc), ResultReg) 654 .addFrameIndex(SI->second) 655 .addImm(0)); 656 return ResultReg; 657 } 658 659 return 0; 660} 661 662bool ARMFastISel::isTypeLegal(const Type *Ty, MVT &VT) { 663 EVT evt = TLI.getValueType(Ty, true); 664 665 // Only handle simple types. 666 if (evt == MVT::Other || !evt.isSimple()) return false; 667 VT = evt.getSimpleVT(); 668 669 // Handle all legal types, i.e. a register that will directly hold this 670 // value. 671 return TLI.isTypeLegal(VT); 672} 673 674bool ARMFastISel::isLoadTypeLegal(const Type *Ty, MVT &VT) { 675 if (isTypeLegal(Ty, VT)) return true; 676 677 // If this is a type than can be sign or zero-extended to a basic operation 678 // go ahead and accept it now. 679 if (VT == MVT::i8 || VT == MVT::i16) 680 return true; 681 682 return false; 683} 684 685// Computes the address to get to an object. 686bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) { 687 // Some boilerplate from the X86 FastISel. 688 const User *U = NULL; 689 unsigned Opcode = Instruction::UserOp1; 690 if (const Instruction *I = dyn_cast<Instruction>(Obj)) { 691 // Don't walk into other basic blocks unless the object is an alloca from 692 // another block, otherwise it may not have a virtual register assigned. 693 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) || 694 FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) { 695 Opcode = I->getOpcode(); 696 U = I; 697 } 698 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) { 699 Opcode = C->getOpcode(); 700 U = C; 701 } 702 703 if (const PointerType *Ty = dyn_cast<PointerType>(Obj->getType())) 704 if (Ty->getAddressSpace() > 255) 705 // Fast instruction selection doesn't support the special 706 // address spaces. 707 return false; 708 709 switch (Opcode) { 710 default: 711 break; 712 case Instruction::BitCast: { 713 // Look through bitcasts. 714 return ARMComputeAddress(U->getOperand(0), Addr); 715 } 716 case Instruction::IntToPtr: { 717 // Look past no-op inttoptrs. 718 if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy()) 719 return ARMComputeAddress(U->getOperand(0), Addr); 720 break; 721 } 722 case Instruction::PtrToInt: { 723 // Look past no-op ptrtoints. 724 if (TLI.getValueType(U->getType()) == TLI.getPointerTy()) 725 return ARMComputeAddress(U->getOperand(0), Addr); 726 break; 727 } 728 case Instruction::GetElementPtr: { 729 Address SavedAddr = Addr; 730 int TmpOffset = Addr.Offset; 731 732 // Iterate through the GEP folding the constants into offsets where 733 // we can. 734 gep_type_iterator GTI = gep_type_begin(U); 735 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); 736 i != e; ++i, ++GTI) { 737 const Value *Op = *i; 738 if (const StructType *STy = dyn_cast<StructType>(*GTI)) { 739 const StructLayout *SL = TD.getStructLayout(STy); 740 unsigned Idx = cast<ConstantInt>(Op)->getZExtValue(); 741 TmpOffset += SL->getElementOffset(Idx); 742 } else { 743 uint64_t S = TD.getTypeAllocSize(GTI.getIndexedType()); 744 for (;;) { 745 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) { 746 // Constant-offset addressing. 747 TmpOffset += CI->getSExtValue() * S; 748 break; 749 } 750 if (isa<AddOperator>(Op) && 751 (!isa<Instruction>(Op) || 752 FuncInfo.MBBMap[cast<Instruction>(Op)->getParent()] 753 == FuncInfo.MBB) && 754 isa<ConstantInt>(cast<AddOperator>(Op)->getOperand(1))) { 755 // An add (in the same block) with a constant operand. Fold the 756 // constant. 757 ConstantInt *CI = 758 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1)); 759 TmpOffset += CI->getSExtValue() * S; 760 // Iterate on the other operand. 761 Op = cast<AddOperator>(Op)->getOperand(0); 762 continue; 763 } 764 // Unsupported 765 goto unsupported_gep; 766 } 767 } 768 } 769 770 // Try to grab the base operand now. 771 Addr.Offset = TmpOffset; 772 if (ARMComputeAddress(U->getOperand(0), Addr)) return true; 773 774 // We failed, restore everything and try the other options. 775 Addr = SavedAddr; 776 777 unsupported_gep: 778 break; 779 } 780 case Instruction::Alloca: { 781 const AllocaInst *AI = cast<AllocaInst>(Obj); 782 DenseMap<const AllocaInst*, int>::iterator SI = 783 FuncInfo.StaticAllocaMap.find(AI); 784 if (SI != FuncInfo.StaticAllocaMap.end()) { 785 Addr.BaseType = Address::FrameIndexBase; 786 Addr.Base.FI = SI->second; 787 return true; 788 } 789 break; 790 } 791 } 792 793 // Materialize the global variable's address into a reg which can 794 // then be used later to load the variable. 795 if (const GlobalValue *GV = dyn_cast<GlobalValue>(Obj)) { 796 unsigned Tmp = ARMMaterializeGV(GV, TLI.getValueType(Obj->getType())); 797 if (Tmp == 0) return false; 798 799 Addr.Base.Reg = Tmp; 800 return true; 801 } 802 803 // Try to get this in a register if nothing else has worked. 804 if (Addr.Base.Reg == 0) Addr.Base.Reg = getRegForValue(Obj); 805 return Addr.Base.Reg != 0; 806} 807 808void ARMFastISel::ARMSimplifyAddress(Address &Addr, EVT VT) { 809 810 assert(VT.isSimple() && "Non-simple types are invalid here!"); 811 812 bool needsLowering = false; 813 switch (VT.getSimpleVT().SimpleTy) { 814 default: 815 assert(false && "Unhandled load/store type!"); 816 case MVT::i1: 817 case MVT::i8: 818 case MVT::i16: 819 case MVT::i32: 820 // Integer loads/stores handle 12-bit offsets. 821 needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset); 822 break; 823 case MVT::f32: 824 case MVT::f64: 825 // Floating point operands handle 8-bit offsets. 826 needsLowering = ((Addr.Offset & 0xff) != Addr.Offset); 827 break; 828 } 829 830 // If this is a stack pointer and the offset needs to be simplified then 831 // put the alloca address into a register, set the base type back to 832 // register and continue. This should almost never happen. 833 if (needsLowering && Addr.BaseType == Address::FrameIndexBase) { 834 TargetRegisterClass *RC = isThumb ? ARM::tGPRRegisterClass : 835 ARM::GPRRegisterClass; 836 unsigned ResultReg = createResultReg(RC); 837 unsigned Opc = isThumb ? ARM::t2ADDri : ARM::ADDri; 838 AddOptionalDefs(BuildMI(*FuncInfo.MBB, *FuncInfo.InsertPt, DL, 839 TII.get(Opc), ResultReg) 840 .addFrameIndex(Addr.Base.FI) 841 .addImm(0)); 842 Addr.Base.Reg = ResultReg; 843 Addr.BaseType = Address::RegBase; 844 } 845 846 // Since the offset is too large for the load/store instruction 847 // get the reg+offset into a register. 848 if (needsLowering) { 849 Addr.Base.Reg = FastEmit_ri_(MVT::i32, ISD::ADD, Addr.Base.Reg, 850 /*Op0IsKill*/false, Addr.Offset, MVT::i32); 851 Addr.Offset = 0; 852 } 853} 854 855void ARMFastISel::AddLoadStoreOperands(EVT VT, Address &Addr, 856 const MachineInstrBuilder &MIB) { 857 // addrmode5 output depends on the selection dag addressing dividing the 858 // offset by 4 that it then later multiplies. Do this here as well. 859 if (VT.getSimpleVT().SimpleTy == MVT::f32 || 860 VT.getSimpleVT().SimpleTy == MVT::f64) 861 Addr.Offset /= 4; 862 863 // Frame base works a bit differently. Handle it separately. 864 if (Addr.BaseType == Address::FrameIndexBase) { 865 int FI = Addr.Base.FI; 866 int Offset = Addr.Offset; 867 MachineMemOperand *MMO = 868 FuncInfo.MF->getMachineMemOperand( 869 MachinePointerInfo::getFixedStack(FI, Offset), 870 MachineMemOperand::MOLoad, 871 MFI.getObjectSize(FI), 872 MFI.getObjectAlignment(FI)); 873 // Now add the rest of the operands. 874 MIB.addFrameIndex(FI); 875 876 // ARM halfword load/stores need an additional operand. 877 if (!isThumb && VT.getSimpleVT().SimpleTy == MVT::i16) MIB.addReg(0); 878 879 MIB.addImm(Addr.Offset); 880 MIB.addMemOperand(MMO); 881 } else { 882 // Now add the rest of the operands. 883 MIB.addReg(Addr.Base.Reg); 884 885 // ARM halfword load/stores need an additional operand. 886 if (!isThumb && VT.getSimpleVT().SimpleTy == MVT::i16) MIB.addReg(0); 887 888 MIB.addImm(Addr.Offset); 889 } 890 AddOptionalDefs(MIB); 891} 892 893bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr) { 894 895 assert(VT.isSimple() && "Non-simple types are invalid here!"); 896 unsigned Opc; 897 TargetRegisterClass *RC; 898 switch (VT.getSimpleVT().SimpleTy) { 899 // This is mostly going to be Neon/vector support. 900 default: return false; 901 case MVT::i16: 902 Opc = isThumb ? ARM::t2LDRHi12 : ARM::LDRH; 903 RC = ARM::GPRRegisterClass; 904 break; 905 case MVT::i8: 906 Opc = isThumb ? ARM::t2LDRBi12 : ARM::LDRBi12; 907 RC = ARM::GPRRegisterClass; 908 break; 909 case MVT::i32: 910 Opc = isThumb ? ARM::t2LDRi12 : ARM::LDRi12; 911 RC = ARM::GPRRegisterClass; 912 break; 913 case MVT::f32: 914 Opc = ARM::VLDRS; 915 RC = TLI.getRegClassFor(VT); 916 break; 917 case MVT::f64: 918 Opc = ARM::VLDRD; 919 RC = TLI.getRegClassFor(VT); 920 break; 921 } 922 // Simplify this down to something we can handle. 923 ARMSimplifyAddress(Addr, VT); 924 925 // Create the base instruction, then add the operands. 926 ResultReg = createResultReg(RC); 927 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 928 TII.get(Opc), ResultReg); 929 AddLoadStoreOperands(VT, Addr, MIB); 930 return true; 931} 932 933bool ARMFastISel::SelectLoad(const Instruction *I) { 934 // Verify we have a legal type before going any further. 935 MVT VT; 936 if (!isLoadTypeLegal(I->getType(), VT)) 937 return false; 938 939 // See if we can handle this address. 940 Address Addr; 941 if (!ARMComputeAddress(I->getOperand(0), Addr)) return false; 942 943 unsigned ResultReg; 944 if (!ARMEmitLoad(VT, ResultReg, Addr)) return false; 945 UpdateValueMap(I, ResultReg); 946 return true; 947} 948 949bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr) { 950 unsigned StrOpc; 951 switch (VT.getSimpleVT().SimpleTy) { 952 // This is mostly going to be Neon/vector support. 953 default: return false; 954 case MVT::i1: { 955 unsigned Res = createResultReg(isThumb ? ARM::tGPRRegisterClass : 956 ARM::GPRRegisterClass); 957 unsigned Opc = isThumb ? ARM::t2ANDri : ARM::ANDri; 958 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 959 TII.get(Opc), Res) 960 .addReg(SrcReg).addImm(1)); 961 SrcReg = Res; 962 } // Fallthrough here. 963 case MVT::i8: 964 StrOpc = isThumb ? ARM::t2STRBi12 : ARM::STRBi12; 965 break; 966 case MVT::i16: 967 StrOpc = isThumb ? ARM::t2STRHi12 : ARM::STRH; 968 break; 969 case MVT::i32: 970 StrOpc = isThumb ? ARM::t2STRi12 : ARM::STRi12; 971 break; 972 case MVT::f32: 973 if (!Subtarget->hasVFP2()) return false; 974 StrOpc = ARM::VSTRS; 975 break; 976 case MVT::f64: 977 if (!Subtarget->hasVFP2()) return false; 978 StrOpc = ARM::VSTRD; 979 break; 980 } 981 // Simplify this down to something we can handle. 982 ARMSimplifyAddress(Addr, VT); 983 984 // Create the base instruction, then add the operands. 985 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 986 TII.get(StrOpc)) 987 .addReg(SrcReg, getKillRegState(true)); 988 AddLoadStoreOperands(VT, Addr, MIB); 989 return true; 990} 991 992bool ARMFastISel::SelectStore(const Instruction *I) { 993 Value *Op0 = I->getOperand(0); 994 unsigned SrcReg = 0; 995 996 // Verify we have a legal type before going any further. 997 MVT VT; 998 if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT)) 999 return false; 1000 1001 // Get the value to be stored into a register. 1002 SrcReg = getRegForValue(Op0); 1003 if (SrcReg == 0) return false; 1004 1005 // See if we can handle this address. 1006 Address Addr; 1007 if (!ARMComputeAddress(I->getOperand(1), Addr)) 1008 return false; 1009 1010 if (!ARMEmitStore(VT, SrcReg, Addr)) return false; 1011 return true; 1012} 1013 1014static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred) { 1015 switch (Pred) { 1016 // Needs two compares... 1017 case CmpInst::FCMP_ONE: 1018 case CmpInst::FCMP_UEQ: 1019 default: 1020 // AL is our "false" for now. The other two need more compares. 1021 return ARMCC::AL; 1022 case CmpInst::ICMP_EQ: 1023 case CmpInst::FCMP_OEQ: 1024 return ARMCC::EQ; 1025 case CmpInst::ICMP_SGT: 1026 case CmpInst::FCMP_OGT: 1027 return ARMCC::GT; 1028 case CmpInst::ICMP_SGE: 1029 case CmpInst::FCMP_OGE: 1030 return ARMCC::GE; 1031 case CmpInst::ICMP_UGT: 1032 case CmpInst::FCMP_UGT: 1033 return ARMCC::HI; 1034 case CmpInst::FCMP_OLT: 1035 return ARMCC::MI; 1036 case CmpInst::ICMP_ULE: 1037 case CmpInst::FCMP_OLE: 1038 return ARMCC::LS; 1039 case CmpInst::FCMP_ORD: 1040 return ARMCC::VC; 1041 case CmpInst::FCMP_UNO: 1042 return ARMCC::VS; 1043 case CmpInst::FCMP_UGE: 1044 return ARMCC::PL; 1045 case CmpInst::ICMP_SLT: 1046 case CmpInst::FCMP_ULT: 1047 return ARMCC::LT; 1048 case CmpInst::ICMP_SLE: 1049 case CmpInst::FCMP_ULE: 1050 return ARMCC::LE; 1051 case CmpInst::FCMP_UNE: 1052 case CmpInst::ICMP_NE: 1053 return ARMCC::NE; 1054 case CmpInst::ICMP_UGE: 1055 return ARMCC::HS; 1056 case CmpInst::ICMP_ULT: 1057 return ARMCC::LO; 1058 } 1059} 1060 1061bool ARMFastISel::SelectBranch(const Instruction *I) { 1062 const BranchInst *BI = cast<BranchInst>(I); 1063 MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)]; 1064 MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)]; 1065 1066 // Simple branch support. 1067 1068 // If we can, avoid recomputing the compare - redoing it could lead to wonky 1069 // behavior. 1070 // TODO: Factor this out. 1071 if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) { 1072 MVT SourceVT; 1073 const Type *Ty = CI->getOperand(0)->getType(); 1074 if (CI->hasOneUse() && (CI->getParent() == I->getParent()) 1075 && isTypeLegal(Ty, SourceVT)) { 1076 bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy()); 1077 if (isFloat && !Subtarget->hasVFP2()) 1078 return false; 1079 1080 unsigned CmpOpc; 1081 switch (SourceVT.SimpleTy) { 1082 default: return false; 1083 // TODO: Verify compares. 1084 case MVT::f32: 1085 CmpOpc = ARM::VCMPES; 1086 break; 1087 case MVT::f64: 1088 CmpOpc = ARM::VCMPED; 1089 break; 1090 case MVT::i32: 1091 CmpOpc = isThumb ? ARM::t2CMPrr : ARM::CMPrr; 1092 break; 1093 } 1094 1095 // Get the compare predicate. 1096 // Try to take advantage of fallthrough opportunities. 1097 CmpInst::Predicate Predicate = CI->getPredicate(); 1098 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1099 std::swap(TBB, FBB); 1100 Predicate = CmpInst::getInversePredicate(Predicate); 1101 } 1102 1103 ARMCC::CondCodes ARMPred = getComparePred(Predicate); 1104 1105 // We may not handle every CC for now. 1106 if (ARMPred == ARMCC::AL) return false; 1107 1108 unsigned Arg1 = getRegForValue(CI->getOperand(0)); 1109 if (Arg1 == 0) return false; 1110 1111 unsigned Arg2 = getRegForValue(CI->getOperand(1)); 1112 if (Arg2 == 0) return false; 1113 1114 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1115 TII.get(CmpOpc)) 1116 .addReg(Arg1).addReg(Arg2)); 1117 1118 // For floating point we need to move the result to a comparison register 1119 // that we can then use for branches. 1120 if (isFloat) 1121 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1122 TII.get(ARM::FMSTAT))); 1123 1124 unsigned BrOpc = isThumb ? ARM::t2Bcc : ARM::Bcc; 1125 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1126 .addMBB(TBB).addImm(ARMPred).addReg(ARM::CPSR); 1127 FastEmitBranch(FBB, DL); 1128 FuncInfo.MBB->addSuccessor(TBB); 1129 return true; 1130 } 1131 } else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) { 1132 MVT SourceVT; 1133 if (TI->hasOneUse() && TI->getParent() == I->getParent() && 1134 (isTypeLegal(TI->getOperand(0)->getType(), SourceVT))) { 1135 unsigned TstOpc = isThumb ? ARM::t2TSTri : ARM::TSTri; 1136 unsigned OpReg = getRegForValue(TI->getOperand(0)); 1137 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1138 TII.get(TstOpc)) 1139 .addReg(OpReg).addImm(1)); 1140 1141 unsigned CCMode = ARMCC::NE; 1142 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1143 std::swap(TBB, FBB); 1144 CCMode = ARMCC::EQ; 1145 } 1146 1147 unsigned BrOpc = isThumb ? ARM::t2Bcc : ARM::Bcc; 1148 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1149 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); 1150 1151 FastEmitBranch(FBB, DL); 1152 FuncInfo.MBB->addSuccessor(TBB); 1153 return true; 1154 } 1155 } 1156 1157 unsigned CmpReg = getRegForValue(BI->getCondition()); 1158 if (CmpReg == 0) return false; 1159 1160 // We've been divorced from our compare! Our block was split, and 1161 // now our compare lives in a predecessor block. We musn't 1162 // re-compare here, as the children of the compare aren't guaranteed 1163 // live across the block boundary (we *could* check for this). 1164 // Regardless, the compare has been done in the predecessor block, 1165 // and it left a value for us in a virtual register. Ergo, we test 1166 // the one-bit value left in the virtual register. 1167 unsigned TstOpc = isThumb ? ARM::t2TSTri : ARM::TSTri; 1168 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TstOpc)) 1169 .addReg(CmpReg).addImm(1)); 1170 1171 unsigned CCMode = ARMCC::NE; 1172 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1173 std::swap(TBB, FBB); 1174 CCMode = ARMCC::EQ; 1175 } 1176 1177 unsigned BrOpc = isThumb ? ARM::t2Bcc : ARM::Bcc; 1178 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1179 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); 1180 FastEmitBranch(FBB, DL); 1181 FuncInfo.MBB->addSuccessor(TBB); 1182 return true; 1183} 1184 1185bool ARMFastISel::SelectCmp(const Instruction *I) { 1186 const CmpInst *CI = cast<CmpInst>(I); 1187 1188 MVT VT; 1189 const Type *Ty = CI->getOperand(0)->getType(); 1190 if (!isTypeLegal(Ty, VT)) 1191 return false; 1192 1193 bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy()); 1194 if (isFloat && !Subtarget->hasVFP2()) 1195 return false; 1196 1197 unsigned CmpOpc; 1198 unsigned CondReg; 1199 switch (VT.SimpleTy) { 1200 default: return false; 1201 // TODO: Verify compares. 1202 case MVT::f32: 1203 CmpOpc = ARM::VCMPES; 1204 CondReg = ARM::FPSCR; 1205 break; 1206 case MVT::f64: 1207 CmpOpc = ARM::VCMPED; 1208 CondReg = ARM::FPSCR; 1209 break; 1210 case MVT::i32: 1211 CmpOpc = isThumb ? ARM::t2CMPrr : ARM::CMPrr; 1212 CondReg = ARM::CPSR; 1213 break; 1214 } 1215 1216 // Get the compare predicate. 1217 ARMCC::CondCodes ARMPred = getComparePred(CI->getPredicate()); 1218 1219 // We may not handle every CC for now. 1220 if (ARMPred == ARMCC::AL) return false; 1221 1222 unsigned Arg1 = getRegForValue(CI->getOperand(0)); 1223 if (Arg1 == 0) return false; 1224 1225 unsigned Arg2 = getRegForValue(CI->getOperand(1)); 1226 if (Arg2 == 0) return false; 1227 1228 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1229 .addReg(Arg1).addReg(Arg2)); 1230 1231 // For floating point we need to move the result to a comparison register 1232 // that we can then use for branches. 1233 if (isFloat) 1234 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1235 TII.get(ARM::FMSTAT))); 1236 1237 // Now set a register based on the comparison. Explicitly set the predicates 1238 // here. 1239 unsigned MovCCOpc = isThumb ? ARM::t2MOVCCi : ARM::MOVCCi; 1240 TargetRegisterClass *RC = isThumb ? ARM::rGPRRegisterClass 1241 : ARM::GPRRegisterClass; 1242 unsigned DestReg = createResultReg(RC); 1243 Constant *Zero 1244 = ConstantInt::get(Type::getInt32Ty(*Context), 0); 1245 unsigned ZeroReg = TargetMaterializeConstant(Zero); 1246 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), DestReg) 1247 .addReg(ZeroReg).addImm(1) 1248 .addImm(ARMPred).addReg(CondReg); 1249 1250 UpdateValueMap(I, DestReg); 1251 return true; 1252} 1253 1254bool ARMFastISel::SelectFPExt(const Instruction *I) { 1255 // Make sure we have VFP and that we're extending float to double. 1256 if (!Subtarget->hasVFP2()) return false; 1257 1258 Value *V = I->getOperand(0); 1259 if (!I->getType()->isDoubleTy() || 1260 !V->getType()->isFloatTy()) return false; 1261 1262 unsigned Op = getRegForValue(V); 1263 if (Op == 0) return false; 1264 1265 unsigned Result = createResultReg(ARM::DPRRegisterClass); 1266 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1267 TII.get(ARM::VCVTDS), Result) 1268 .addReg(Op)); 1269 UpdateValueMap(I, Result); 1270 return true; 1271} 1272 1273bool ARMFastISel::SelectFPTrunc(const Instruction *I) { 1274 // Make sure we have VFP and that we're truncating double to float. 1275 if (!Subtarget->hasVFP2()) return false; 1276 1277 Value *V = I->getOperand(0); 1278 if (!(I->getType()->isFloatTy() && 1279 V->getType()->isDoubleTy())) return false; 1280 1281 unsigned Op = getRegForValue(V); 1282 if (Op == 0) return false; 1283 1284 unsigned Result = createResultReg(ARM::SPRRegisterClass); 1285 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1286 TII.get(ARM::VCVTSD), Result) 1287 .addReg(Op)); 1288 UpdateValueMap(I, Result); 1289 return true; 1290} 1291 1292bool ARMFastISel::SelectSIToFP(const Instruction *I) { 1293 // Make sure we have VFP. 1294 if (!Subtarget->hasVFP2()) return false; 1295 1296 MVT DstVT; 1297 const Type *Ty = I->getType(); 1298 if (!isTypeLegal(Ty, DstVT)) 1299 return false; 1300 1301 unsigned Op = getRegForValue(I->getOperand(0)); 1302 if (Op == 0) return false; 1303 1304 // The conversion routine works on fp-reg to fp-reg and the operand above 1305 // was an integer, move it to the fp registers if possible. 1306 unsigned FP = ARMMoveToFPReg(MVT::f32, Op); 1307 if (FP == 0) return false; 1308 1309 unsigned Opc; 1310 if (Ty->isFloatTy()) Opc = ARM::VSITOS; 1311 else if (Ty->isDoubleTy()) Opc = ARM::VSITOD; 1312 else return 0; 1313 1314 unsigned ResultReg = createResultReg(TLI.getRegClassFor(DstVT)); 1315 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1316 ResultReg) 1317 .addReg(FP)); 1318 UpdateValueMap(I, ResultReg); 1319 return true; 1320} 1321 1322bool ARMFastISel::SelectFPToSI(const Instruction *I) { 1323 // Make sure we have VFP. 1324 if (!Subtarget->hasVFP2()) return false; 1325 1326 MVT DstVT; 1327 const Type *RetTy = I->getType(); 1328 if (!isTypeLegal(RetTy, DstVT)) 1329 return false; 1330 1331 unsigned Op = getRegForValue(I->getOperand(0)); 1332 if (Op == 0) return false; 1333 1334 unsigned Opc; 1335 const Type *OpTy = I->getOperand(0)->getType(); 1336 if (OpTy->isFloatTy()) Opc = ARM::VTOSIZS; 1337 else if (OpTy->isDoubleTy()) Opc = ARM::VTOSIZD; 1338 else return 0; 1339 1340 // f64->s32 or f32->s32 both need an intermediate f32 reg. 1341 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32)); 1342 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1343 ResultReg) 1344 .addReg(Op)); 1345 1346 // This result needs to be in an integer register, but the conversion only 1347 // takes place in fp-regs. 1348 unsigned IntReg = ARMMoveToIntReg(DstVT, ResultReg); 1349 if (IntReg == 0) return false; 1350 1351 UpdateValueMap(I, IntReg); 1352 return true; 1353} 1354 1355bool ARMFastISel::SelectSelect(const Instruction *I) { 1356 MVT VT; 1357 if (!isTypeLegal(I->getType(), VT)) 1358 return false; 1359 1360 // Things need to be register sized for register moves. 1361 if (VT != MVT::i32) return false; 1362 const TargetRegisterClass *RC = TLI.getRegClassFor(VT); 1363 1364 unsigned CondReg = getRegForValue(I->getOperand(0)); 1365 if (CondReg == 0) return false; 1366 unsigned Op1Reg = getRegForValue(I->getOperand(1)); 1367 if (Op1Reg == 0) return false; 1368 unsigned Op2Reg = getRegForValue(I->getOperand(2)); 1369 if (Op2Reg == 0) return false; 1370 1371 unsigned CmpOpc = isThumb ? ARM::t2TSTri : ARM::TSTri; 1372 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1373 .addReg(CondReg).addImm(1)); 1374 unsigned ResultReg = createResultReg(RC); 1375 unsigned MovCCOpc = isThumb ? ARM::t2MOVCCr : ARM::MOVCCr; 1376 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg) 1377 .addReg(Op1Reg).addReg(Op2Reg) 1378 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 1379 UpdateValueMap(I, ResultReg); 1380 return true; 1381} 1382 1383bool ARMFastISel::SelectSDiv(const Instruction *I) { 1384 MVT VT; 1385 const Type *Ty = I->getType(); 1386 if (!isTypeLegal(Ty, VT)) 1387 return false; 1388 1389 // If we have integer div support we should have selected this automagically. 1390 // In case we have a real miss go ahead and return false and we'll pick 1391 // it up later. 1392 if (Subtarget->hasDivide()) return false; 1393 1394 // Otherwise emit a libcall. 1395 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1396 if (VT == MVT::i8) 1397 LC = RTLIB::SDIV_I8; 1398 else if (VT == MVT::i16) 1399 LC = RTLIB::SDIV_I16; 1400 else if (VT == MVT::i32) 1401 LC = RTLIB::SDIV_I32; 1402 else if (VT == MVT::i64) 1403 LC = RTLIB::SDIV_I64; 1404 else if (VT == MVT::i128) 1405 LC = RTLIB::SDIV_I128; 1406 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SDIV!"); 1407 1408 return ARMEmitLibcall(I, LC); 1409} 1410 1411bool ARMFastISel::SelectSRem(const Instruction *I) { 1412 MVT VT; 1413 const Type *Ty = I->getType(); 1414 if (!isTypeLegal(Ty, VT)) 1415 return false; 1416 1417 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1418 if (VT == MVT::i8) 1419 LC = RTLIB::SREM_I8; 1420 else if (VT == MVT::i16) 1421 LC = RTLIB::SREM_I16; 1422 else if (VT == MVT::i32) 1423 LC = RTLIB::SREM_I32; 1424 else if (VT == MVT::i64) 1425 LC = RTLIB::SREM_I64; 1426 else if (VT == MVT::i128) 1427 LC = RTLIB::SREM_I128; 1428 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SREM!"); 1429 1430 return ARMEmitLibcall(I, LC); 1431} 1432 1433bool ARMFastISel::SelectBinaryOp(const Instruction *I, unsigned ISDOpcode) { 1434 EVT VT = TLI.getValueType(I->getType(), true); 1435 1436 // We can get here in the case when we want to use NEON for our fp 1437 // operations, but can't figure out how to. Just use the vfp instructions 1438 // if we have them. 1439 // FIXME: It'd be nice to use NEON instructions. 1440 const Type *Ty = I->getType(); 1441 bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy()); 1442 if (isFloat && !Subtarget->hasVFP2()) 1443 return false; 1444 1445 unsigned Op1 = getRegForValue(I->getOperand(0)); 1446 if (Op1 == 0) return false; 1447 1448 unsigned Op2 = getRegForValue(I->getOperand(1)); 1449 if (Op2 == 0) return false; 1450 1451 unsigned Opc; 1452 bool is64bit = VT == MVT::f64 || VT == MVT::i64; 1453 switch (ISDOpcode) { 1454 default: return false; 1455 case ISD::FADD: 1456 Opc = is64bit ? ARM::VADDD : ARM::VADDS; 1457 break; 1458 case ISD::FSUB: 1459 Opc = is64bit ? ARM::VSUBD : ARM::VSUBS; 1460 break; 1461 case ISD::FMUL: 1462 Opc = is64bit ? ARM::VMULD : ARM::VMULS; 1463 break; 1464 } 1465 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT)); 1466 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1467 TII.get(Opc), ResultReg) 1468 .addReg(Op1).addReg(Op2)); 1469 UpdateValueMap(I, ResultReg); 1470 return true; 1471} 1472 1473// Call Handling Code 1474 1475bool ARMFastISel::FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, 1476 EVT SrcVT, unsigned &ResultReg) { 1477 unsigned RR = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc, 1478 Src, /*TODO: Kill=*/false); 1479 1480 if (RR != 0) { 1481 ResultReg = RR; 1482 return true; 1483 } else 1484 return false; 1485} 1486 1487// This is largely taken directly from CCAssignFnForNode - we don't support 1488// varargs in FastISel so that part has been removed. 1489// TODO: We may not support all of this. 1490CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC, bool Return) { 1491 switch (CC) { 1492 default: 1493 llvm_unreachable("Unsupported calling convention"); 1494 case CallingConv::Fast: 1495 // Ignore fastcc. Silence compiler warnings. 1496 (void)RetFastCC_ARM_APCS; 1497 (void)FastCC_ARM_APCS; 1498 // Fallthrough 1499 case CallingConv::C: 1500 // Use target triple & subtarget features to do actual dispatch. 1501 if (Subtarget->isAAPCS_ABI()) { 1502 if (Subtarget->hasVFP2() && 1503 FloatABIType == FloatABI::Hard) 1504 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1505 else 1506 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1507 } else 1508 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1509 case CallingConv::ARM_AAPCS_VFP: 1510 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1511 case CallingConv::ARM_AAPCS: 1512 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1513 case CallingConv::ARM_APCS: 1514 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1515 } 1516} 1517 1518bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args, 1519 SmallVectorImpl<unsigned> &ArgRegs, 1520 SmallVectorImpl<MVT> &ArgVTs, 1521 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 1522 SmallVectorImpl<unsigned> &RegArgs, 1523 CallingConv::ID CC, 1524 unsigned &NumBytes) { 1525 SmallVector<CCValAssign, 16> ArgLocs; 1526 CCState CCInfo(CC, false, TM, ArgLocs, *Context); 1527 CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CCAssignFnForCall(CC, false)); 1528 1529 // Get a count of how many bytes are to be pushed on the stack. 1530 NumBytes = CCInfo.getNextStackOffset(); 1531 1532 // Issue CALLSEQ_START 1533 unsigned AdjStackDown = TM.getRegisterInfo()->getCallFrameSetupOpcode(); 1534 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1535 TII.get(AdjStackDown)) 1536 .addImm(NumBytes)); 1537 1538 // Process the args. 1539 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1540 CCValAssign &VA = ArgLocs[i]; 1541 unsigned Arg = ArgRegs[VA.getValNo()]; 1542 MVT ArgVT = ArgVTs[VA.getValNo()]; 1543 1544 // We don't handle NEON/vector parameters yet. 1545 if (ArgVT.isVector() || ArgVT.getSizeInBits() > 64) 1546 return false; 1547 1548 // Handle arg promotion, etc. 1549 switch (VA.getLocInfo()) { 1550 case CCValAssign::Full: break; 1551 case CCValAssign::SExt: { 1552 bool Emitted = FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(), 1553 Arg, ArgVT, Arg); 1554 assert(Emitted && "Failed to emit a sext!"); (void)Emitted; 1555 Emitted = true; 1556 ArgVT = VA.getLocVT(); 1557 break; 1558 } 1559 case CCValAssign::ZExt: { 1560 bool Emitted = FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(), 1561 Arg, ArgVT, Arg); 1562 assert(Emitted && "Failed to emit a zext!"); (void)Emitted; 1563 Emitted = true; 1564 ArgVT = VA.getLocVT(); 1565 break; 1566 } 1567 case CCValAssign::AExt: { 1568 bool Emitted = FastEmitExtend(ISD::ANY_EXTEND, VA.getLocVT(), 1569 Arg, ArgVT, Arg); 1570 if (!Emitted) 1571 Emitted = FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(), 1572 Arg, ArgVT, Arg); 1573 if (!Emitted) 1574 Emitted = FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(), 1575 Arg, ArgVT, Arg); 1576 1577 assert(Emitted && "Failed to emit a aext!"); (void)Emitted; 1578 ArgVT = VA.getLocVT(); 1579 break; 1580 } 1581 case CCValAssign::BCvt: { 1582 unsigned BC = FastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg, 1583 /*TODO: Kill=*/false); 1584 assert(BC != 0 && "Failed to emit a bitcast!"); 1585 Arg = BC; 1586 ArgVT = VA.getLocVT(); 1587 break; 1588 } 1589 default: llvm_unreachable("Unknown arg promotion!"); 1590 } 1591 1592 // Now copy/store arg to correct locations. 1593 if (VA.isRegLoc() && !VA.needsCustom()) { 1594 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1595 VA.getLocReg()) 1596 .addReg(Arg); 1597 RegArgs.push_back(VA.getLocReg()); 1598 } else if (VA.needsCustom()) { 1599 // TODO: We need custom lowering for vector (v2f64) args. 1600 if (VA.getLocVT() != MVT::f64) return false; 1601 1602 CCValAssign &NextVA = ArgLocs[++i]; 1603 1604 // TODO: Only handle register args for now. 1605 if(!(VA.isRegLoc() && NextVA.isRegLoc())) return false; 1606 1607 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1608 TII.get(ARM::VMOVRRD), VA.getLocReg()) 1609 .addReg(NextVA.getLocReg(), RegState::Define) 1610 .addReg(Arg)); 1611 RegArgs.push_back(VA.getLocReg()); 1612 RegArgs.push_back(NextVA.getLocReg()); 1613 } else { 1614 assert(VA.isMemLoc()); 1615 // Need to store on the stack. 1616 Address Addr; 1617 Addr.BaseType = Address::RegBase; 1618 Addr.Base.Reg = ARM::SP; 1619 Addr.Offset = VA.getLocMemOffset(); 1620 1621 if (!ARMEmitStore(ArgVT, Arg, Addr)) return false; 1622 } 1623 } 1624 return true; 1625} 1626 1627bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 1628 const Instruction *I, CallingConv::ID CC, 1629 unsigned &NumBytes) { 1630 // Issue CALLSEQ_END 1631 unsigned AdjStackUp = TM.getRegisterInfo()->getCallFrameDestroyOpcode(); 1632 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1633 TII.get(AdjStackUp)) 1634 .addImm(NumBytes).addImm(0)); 1635 1636 // Now the return value. 1637 if (RetVT != MVT::isVoid) { 1638 SmallVector<CCValAssign, 16> RVLocs; 1639 CCState CCInfo(CC, false, TM, RVLocs, *Context); 1640 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true)); 1641 1642 // Copy all of the result registers out of their specified physreg. 1643 if (RVLocs.size() == 2 && RetVT == MVT::f64) { 1644 // For this move we copy into two registers and then move into the 1645 // double fp reg we want. 1646 EVT DestVT = RVLocs[0].getValVT(); 1647 TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT); 1648 unsigned ResultReg = createResultReg(DstRC); 1649 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1650 TII.get(ARM::VMOVDRR), ResultReg) 1651 .addReg(RVLocs[0].getLocReg()) 1652 .addReg(RVLocs[1].getLocReg())); 1653 1654 UsedRegs.push_back(RVLocs[0].getLocReg()); 1655 UsedRegs.push_back(RVLocs[1].getLocReg()); 1656 1657 // Finally update the result. 1658 UpdateValueMap(I, ResultReg); 1659 } else { 1660 assert(RVLocs.size() == 1 &&"Can't handle non-double multi-reg retvals!"); 1661 EVT CopyVT = RVLocs[0].getValVT(); 1662 TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT); 1663 1664 unsigned ResultReg = createResultReg(DstRC); 1665 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1666 ResultReg).addReg(RVLocs[0].getLocReg()); 1667 UsedRegs.push_back(RVLocs[0].getLocReg()); 1668 1669 // Finally update the result. 1670 UpdateValueMap(I, ResultReg); 1671 } 1672 } 1673 1674 return true; 1675} 1676 1677bool ARMFastISel::SelectRet(const Instruction *I) { 1678 const ReturnInst *Ret = cast<ReturnInst>(I); 1679 const Function &F = *I->getParent()->getParent(); 1680 1681 if (!FuncInfo.CanLowerReturn) 1682 return false; 1683 1684 if (F.isVarArg()) 1685 return false; 1686 1687 CallingConv::ID CC = F.getCallingConv(); 1688 if (Ret->getNumOperands() > 0) { 1689 SmallVector<ISD::OutputArg, 4> Outs; 1690 GetReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(), 1691 Outs, TLI); 1692 1693 // Analyze operands of the call, assigning locations to each operand. 1694 SmallVector<CCValAssign, 16> ValLocs; 1695 CCState CCInfo(CC, F.isVarArg(), TM, ValLocs, I->getContext()); 1696 CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC, true /* is Ret */)); 1697 1698 const Value *RV = Ret->getOperand(0); 1699 unsigned Reg = getRegForValue(RV); 1700 if (Reg == 0) 1701 return false; 1702 1703 // Only handle a single return value for now. 1704 if (ValLocs.size() != 1) 1705 return false; 1706 1707 CCValAssign &VA = ValLocs[0]; 1708 1709 // Don't bother handling odd stuff for now. 1710 if (VA.getLocInfo() != CCValAssign::Full) 1711 return false; 1712 // Only handle register returns for now. 1713 if (!VA.isRegLoc()) 1714 return false; 1715 // TODO: For now, don't try to handle cases where getLocInfo() 1716 // says Full but the types don't match. 1717 if (TLI.getValueType(RV->getType()) != VA.getValVT()) 1718 return false; 1719 1720 // Make the copy. 1721 unsigned SrcReg = Reg + VA.getValNo(); 1722 unsigned DstReg = VA.getLocReg(); 1723 const TargetRegisterClass* SrcRC = MRI.getRegClass(SrcReg); 1724 // Avoid a cross-class copy. This is very unlikely. 1725 if (!SrcRC->contains(DstReg)) 1726 return false; 1727 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1728 DstReg).addReg(SrcReg); 1729 1730 // Mark the register as live out of the function. 1731 MRI.addLiveOut(VA.getLocReg()); 1732 } 1733 1734 unsigned RetOpc = isThumb ? ARM::tBX_RET : ARM::BX_RET; 1735 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1736 TII.get(RetOpc))); 1737 return true; 1738} 1739 1740unsigned ARMFastISel::ARMSelectCallOp(const GlobalValue *GV) { 1741 1742 // Darwin needs the r9 versions of the opcodes. 1743 bool isDarwin = Subtarget->isTargetDarwin(); 1744 if (isThumb) { 1745 return isDarwin ? ARM::tBLr9 : ARM::tBL; 1746 } else { 1747 return isDarwin ? ARM::BLr9 : ARM::BL; 1748 } 1749} 1750 1751// A quick function that will emit a call for a named libcall in F with the 1752// vector of passed arguments for the Instruction in I. We can assume that we 1753// can emit a call for any libcall we can produce. This is an abridged version 1754// of the full call infrastructure since we won't need to worry about things 1755// like computed function pointers or strange arguments at call sites. 1756// TODO: Try to unify this and the normal call bits for ARM, then try to unify 1757// with X86. 1758bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) { 1759 CallingConv::ID CC = TLI.getLibcallCallingConv(Call); 1760 1761 // Handle *simple* calls for now. 1762 const Type *RetTy = I->getType(); 1763 MVT RetVT; 1764 if (RetTy->isVoidTy()) 1765 RetVT = MVT::isVoid; 1766 else if (!isTypeLegal(RetTy, RetVT)) 1767 return false; 1768 1769 // TODO: For now if we have long calls specified we don't handle the call. 1770 if (EnableARMLongCalls) return false; 1771 1772 // Set up the argument vectors. 1773 SmallVector<Value*, 8> Args; 1774 SmallVector<unsigned, 8> ArgRegs; 1775 SmallVector<MVT, 8> ArgVTs; 1776 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 1777 Args.reserve(I->getNumOperands()); 1778 ArgRegs.reserve(I->getNumOperands()); 1779 ArgVTs.reserve(I->getNumOperands()); 1780 ArgFlags.reserve(I->getNumOperands()); 1781 for (unsigned i = 0; i < I->getNumOperands(); ++i) { 1782 Value *Op = I->getOperand(i); 1783 unsigned Arg = getRegForValue(Op); 1784 if (Arg == 0) return false; 1785 1786 const Type *ArgTy = Op->getType(); 1787 MVT ArgVT; 1788 if (!isTypeLegal(ArgTy, ArgVT)) return false; 1789 1790 ISD::ArgFlagsTy Flags; 1791 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 1792 Flags.setOrigAlign(OriginalAlignment); 1793 1794 Args.push_back(Op); 1795 ArgRegs.push_back(Arg); 1796 ArgVTs.push_back(ArgVT); 1797 ArgFlags.push_back(Flags); 1798 } 1799 1800 // Handle the arguments now that we've gotten them. 1801 SmallVector<unsigned, 4> RegArgs; 1802 unsigned NumBytes; 1803 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes)) 1804 return false; 1805 1806 // Issue the call, BLr9 for darwin, BL otherwise. 1807 // TODO: Turn this into the table of arm call ops. 1808 MachineInstrBuilder MIB; 1809 unsigned CallOpc = ARMSelectCallOp(NULL); 1810 if(isThumb) 1811 // Explicitly adding the predicate here. 1812 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1813 TII.get(CallOpc))) 1814 .addExternalSymbol(TLI.getLibcallName(Call)); 1815 else 1816 // Explicitly adding the predicate here. 1817 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1818 TII.get(CallOpc)) 1819 .addExternalSymbol(TLI.getLibcallName(Call))); 1820 1821 // Add implicit physical register uses to the call. 1822 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 1823 MIB.addReg(RegArgs[i]); 1824 1825 // Finish off the call including any return values. 1826 SmallVector<unsigned, 4> UsedRegs; 1827 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes)) return false; 1828 1829 // Set all unused physreg defs as dead. 1830 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 1831 1832 return true; 1833} 1834 1835bool ARMFastISel::SelectCall(const Instruction *I) { 1836 const CallInst *CI = cast<CallInst>(I); 1837 const Value *Callee = CI->getCalledValue(); 1838 1839 // Can't handle inline asm or worry about intrinsics yet. 1840 if (isa<InlineAsm>(Callee) || isa<IntrinsicInst>(CI)) return false; 1841 1842 // Only handle global variable Callees. 1843 const GlobalValue *GV = dyn_cast<GlobalValue>(Callee); 1844 if (!GV) 1845 return false; 1846 1847 // Check the calling convention. 1848 ImmutableCallSite CS(CI); 1849 CallingConv::ID CC = CS.getCallingConv(); 1850 1851 // TODO: Avoid some calling conventions? 1852 1853 // Let SDISel handle vararg functions. 1854 const PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType()); 1855 const FunctionType *FTy = cast<FunctionType>(PT->getElementType()); 1856 if (FTy->isVarArg()) 1857 return false; 1858 1859 // Handle *simple* calls for now. 1860 const Type *RetTy = I->getType(); 1861 MVT RetVT; 1862 if (RetTy->isVoidTy()) 1863 RetVT = MVT::isVoid; 1864 else if (!isTypeLegal(RetTy, RetVT)) 1865 return false; 1866 1867 // TODO: For now if we have long calls specified we don't handle the call. 1868 if (EnableARMLongCalls) return false; 1869 1870 // Set up the argument vectors. 1871 SmallVector<Value*, 8> Args; 1872 SmallVector<unsigned, 8> ArgRegs; 1873 SmallVector<MVT, 8> ArgVTs; 1874 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 1875 Args.reserve(CS.arg_size()); 1876 ArgRegs.reserve(CS.arg_size()); 1877 ArgVTs.reserve(CS.arg_size()); 1878 ArgFlags.reserve(CS.arg_size()); 1879 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end(); 1880 i != e; ++i) { 1881 unsigned Arg = getRegForValue(*i); 1882 1883 if (Arg == 0) 1884 return false; 1885 ISD::ArgFlagsTy Flags; 1886 unsigned AttrInd = i - CS.arg_begin() + 1; 1887 if (CS.paramHasAttr(AttrInd, Attribute::SExt)) 1888 Flags.setSExt(); 1889 if (CS.paramHasAttr(AttrInd, Attribute::ZExt)) 1890 Flags.setZExt(); 1891 1892 // FIXME: Only handle *easy* calls for now. 1893 if (CS.paramHasAttr(AttrInd, Attribute::InReg) || 1894 CS.paramHasAttr(AttrInd, Attribute::StructRet) || 1895 CS.paramHasAttr(AttrInd, Attribute::Nest) || 1896 CS.paramHasAttr(AttrInd, Attribute::ByVal)) 1897 return false; 1898 1899 const Type *ArgTy = (*i)->getType(); 1900 MVT ArgVT; 1901 if (!isTypeLegal(ArgTy, ArgVT)) 1902 return false; 1903 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 1904 Flags.setOrigAlign(OriginalAlignment); 1905 1906 Args.push_back(*i); 1907 ArgRegs.push_back(Arg); 1908 ArgVTs.push_back(ArgVT); 1909 ArgFlags.push_back(Flags); 1910 } 1911 1912 // Handle the arguments now that we've gotten them. 1913 SmallVector<unsigned, 4> RegArgs; 1914 unsigned NumBytes; 1915 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes)) 1916 return false; 1917 1918 // Issue the call, BLr9 for darwin, BL otherwise. 1919 // TODO: Turn this into the table of arm call ops. 1920 MachineInstrBuilder MIB; 1921 unsigned CallOpc = ARMSelectCallOp(GV); 1922 // Explicitly adding the predicate here. 1923 if(isThumb) 1924 // Explicitly adding the predicate here. 1925 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1926 TII.get(CallOpc))) 1927 .addGlobalAddress(GV, 0, 0); 1928 else 1929 // Explicitly adding the predicate here. 1930 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1931 TII.get(CallOpc)) 1932 .addGlobalAddress(GV, 0, 0)); 1933 1934 // Add implicit physical register uses to the call. 1935 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 1936 MIB.addReg(RegArgs[i]); 1937 1938 // Finish off the call including any return values. 1939 SmallVector<unsigned, 4> UsedRegs; 1940 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes)) return false; 1941 1942 // Set all unused physreg defs as dead. 1943 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 1944 1945 return true; 1946 1947} 1948 1949// TODO: SoftFP support. 1950bool ARMFastISel::TargetSelectInstruction(const Instruction *I) { 1951 1952 switch (I->getOpcode()) { 1953 case Instruction::Load: 1954 return SelectLoad(I); 1955 case Instruction::Store: 1956 return SelectStore(I); 1957 case Instruction::Br: 1958 return SelectBranch(I); 1959 case Instruction::ICmp: 1960 case Instruction::FCmp: 1961 return SelectCmp(I); 1962 case Instruction::FPExt: 1963 return SelectFPExt(I); 1964 case Instruction::FPTrunc: 1965 return SelectFPTrunc(I); 1966 case Instruction::SIToFP: 1967 return SelectSIToFP(I); 1968 case Instruction::FPToSI: 1969 return SelectFPToSI(I); 1970 case Instruction::FAdd: 1971 return SelectBinaryOp(I, ISD::FADD); 1972 case Instruction::FSub: 1973 return SelectBinaryOp(I, ISD::FSUB); 1974 case Instruction::FMul: 1975 return SelectBinaryOp(I, ISD::FMUL); 1976 case Instruction::SDiv: 1977 return SelectSDiv(I); 1978 case Instruction::SRem: 1979 return SelectSRem(I); 1980 case Instruction::Call: 1981 return SelectCall(I); 1982 case Instruction::Select: 1983 return SelectSelect(I); 1984 case Instruction::Ret: 1985 return SelectRet(I); 1986 default: break; 1987 } 1988 return false; 1989} 1990 1991namespace llvm { 1992 llvm::FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo) { 1993 // Completely untested on non-darwin. 1994 const TargetMachine &TM = funcInfo.MF->getTarget(); 1995 1996 // Darwin and thumb1 only for now. 1997 const ARMSubtarget *Subtarget = &TM.getSubtarget<ARMSubtarget>(); 1998 if (Subtarget->isTargetDarwin() && !Subtarget->isThumb1Only() && 1999 !DisableARMFastISel) 2000 return new ARMFastISel(funcInfo); 2001 return 0; 2002 } 2003} 2004