ARMFastISel.cpp revision 0eff39f2e25e9d8dd52b1eb7fa4e7cc6cc77875f
1//===-- ARMFastISel.cpp - ARM FastISel implementation ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the ARM-specific support for the FastISel class. Some 11// of the target-specific code is generated by tablegen in the file 12// ARMGenFastISel.inc, which is #included here. 13// 14//===----------------------------------------------------------------------===// 15 16#include "ARM.h" 17#include "ARMBaseInstrInfo.h" 18#include "ARMCallingConv.h" 19#include "ARMRegisterInfo.h" 20#include "ARMTargetMachine.h" 21#include "ARMSubtarget.h" 22#include "ARMConstantPoolValue.h" 23#include "MCTargetDesc/ARMAddressingModes.h" 24#include "llvm/CallingConv.h" 25#include "llvm/DerivedTypes.h" 26#include "llvm/GlobalVariable.h" 27#include "llvm/Instructions.h" 28#include "llvm/IntrinsicInst.h" 29#include "llvm/Module.h" 30#include "llvm/Operator.h" 31#include "llvm/CodeGen/Analysis.h" 32#include "llvm/CodeGen/FastISel.h" 33#include "llvm/CodeGen/FunctionLoweringInfo.h" 34#include "llvm/CodeGen/MachineInstrBuilder.h" 35#include "llvm/CodeGen/MachineModuleInfo.h" 36#include "llvm/CodeGen/MachineConstantPool.h" 37#include "llvm/CodeGen/MachineFrameInfo.h" 38#include "llvm/CodeGen/MachineMemOperand.h" 39#include "llvm/CodeGen/MachineRegisterInfo.h" 40#include "llvm/CodeGen/PseudoSourceValue.h" 41#include "llvm/Support/CallSite.h" 42#include "llvm/Support/CommandLine.h" 43#include "llvm/Support/ErrorHandling.h" 44#include "llvm/Support/GetElementPtrTypeIterator.h" 45#include "llvm/Target/TargetData.h" 46#include "llvm/Target/TargetInstrInfo.h" 47#include "llvm/Target/TargetLowering.h" 48#include "llvm/Target/TargetMachine.h" 49#include "llvm/Target/TargetOptions.h" 50using namespace llvm; 51 52static cl::opt<bool> 53DisableARMFastISel("disable-arm-fast-isel", 54 cl::desc("Turn off experimental ARM fast-isel support"), 55 cl::init(false), cl::Hidden); 56 57extern cl::opt<bool> EnableARMLongCalls; 58 59namespace { 60 61 // All possible address modes, plus some. 62 typedef struct Address { 63 enum { 64 RegBase, 65 FrameIndexBase 66 } BaseType; 67 68 union { 69 unsigned Reg; 70 int FI; 71 } Base; 72 73 int Offset; 74 75 // Innocuous defaults for our address. 76 Address() 77 : BaseType(RegBase), Offset(0) { 78 Base.Reg = 0; 79 } 80 } Address; 81 82class ARMFastISel : public FastISel { 83 84 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can 85 /// make the right decision when generating code for different targets. 86 const ARMSubtarget *Subtarget; 87 const TargetMachine &TM; 88 const TargetInstrInfo &TII; 89 const TargetLowering &TLI; 90 ARMFunctionInfo *AFI; 91 92 // Convenience variables to avoid some queries. 93 bool isThumb; 94 LLVMContext *Context; 95 96 public: 97 explicit ARMFastISel(FunctionLoweringInfo &funcInfo) 98 : FastISel(funcInfo), 99 TM(funcInfo.MF->getTarget()), 100 TII(*TM.getInstrInfo()), 101 TLI(*TM.getTargetLowering()) { 102 Subtarget = &TM.getSubtarget<ARMSubtarget>(); 103 AFI = funcInfo.MF->getInfo<ARMFunctionInfo>(); 104 isThumb = AFI->isThumbFunction(); 105 Context = &funcInfo.Fn->getContext(); 106 } 107 108 // Code from FastISel.cpp. 109 virtual unsigned FastEmitInst_(unsigned MachineInstOpcode, 110 const TargetRegisterClass *RC); 111 virtual unsigned FastEmitInst_r(unsigned MachineInstOpcode, 112 const TargetRegisterClass *RC, 113 unsigned Op0, bool Op0IsKill); 114 virtual unsigned FastEmitInst_rr(unsigned MachineInstOpcode, 115 const TargetRegisterClass *RC, 116 unsigned Op0, bool Op0IsKill, 117 unsigned Op1, bool Op1IsKill); 118 virtual unsigned FastEmitInst_rrr(unsigned MachineInstOpcode, 119 const TargetRegisterClass *RC, 120 unsigned Op0, bool Op0IsKill, 121 unsigned Op1, bool Op1IsKill, 122 unsigned Op2, bool Op2IsKill); 123 virtual unsigned FastEmitInst_ri(unsigned MachineInstOpcode, 124 const TargetRegisterClass *RC, 125 unsigned Op0, bool Op0IsKill, 126 uint64_t Imm); 127 virtual unsigned FastEmitInst_rf(unsigned MachineInstOpcode, 128 const TargetRegisterClass *RC, 129 unsigned Op0, bool Op0IsKill, 130 const ConstantFP *FPImm); 131 virtual unsigned FastEmitInst_rri(unsigned MachineInstOpcode, 132 const TargetRegisterClass *RC, 133 unsigned Op0, bool Op0IsKill, 134 unsigned Op1, bool Op1IsKill, 135 uint64_t Imm); 136 virtual unsigned FastEmitInst_i(unsigned MachineInstOpcode, 137 const TargetRegisterClass *RC, 138 uint64_t Imm); 139 virtual unsigned FastEmitInst_ii(unsigned MachineInstOpcode, 140 const TargetRegisterClass *RC, 141 uint64_t Imm1, uint64_t Imm2); 142 143 virtual unsigned FastEmitInst_extractsubreg(MVT RetVT, 144 unsigned Op0, bool Op0IsKill, 145 uint32_t Idx); 146 147 // Backend specific FastISel code. 148 virtual bool TargetSelectInstruction(const Instruction *I); 149 virtual unsigned TargetMaterializeConstant(const Constant *C); 150 virtual unsigned TargetMaterializeAlloca(const AllocaInst *AI); 151 152 #include "ARMGenFastISel.inc" 153 154 // Instruction selection routines. 155 private: 156 bool SelectLoad(const Instruction *I); 157 bool SelectStore(const Instruction *I); 158 bool SelectBranch(const Instruction *I); 159 bool SelectCmp(const Instruction *I); 160 bool SelectFPExt(const Instruction *I); 161 bool SelectFPTrunc(const Instruction *I); 162 bool SelectBinaryOp(const Instruction *I, unsigned ISDOpcode); 163 bool SelectSIToFP(const Instruction *I); 164 bool SelectFPToSI(const Instruction *I); 165 bool SelectSDiv(const Instruction *I); 166 bool SelectSRem(const Instruction *I); 167 bool SelectCall(const Instruction *I); 168 bool SelectSelect(const Instruction *I); 169 bool SelectRet(const Instruction *I); 170 bool SelectTrunc(const Instruction *I); 171 bool SelectIntExt(const Instruction *I); 172 173 // Utility routines. 174 private: 175 bool isTypeLegal(Type *Ty, MVT &VT); 176 bool isLoadTypeLegal(Type *Ty, MVT &VT); 177 bool ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, 178 bool isZExt); 179 bool ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr); 180 bool ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr); 181 bool ARMComputeAddress(const Value *Obj, Address &Addr); 182 void ARMSimplifyAddress(Address &Addr, EVT VT); 183 unsigned ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT, bool isZExt); 184 unsigned ARMMaterializeFP(const ConstantFP *CFP, EVT VT); 185 unsigned ARMMaterializeInt(const Constant *C, EVT VT); 186 unsigned ARMMaterializeGV(const GlobalValue *GV, EVT VT); 187 unsigned ARMMoveToFPReg(EVT VT, unsigned SrcReg); 188 unsigned ARMMoveToIntReg(EVT VT, unsigned SrcReg); 189 unsigned ARMSelectCallOp(const GlobalValue *GV); 190 191 // Call handling routines. 192 private: 193 bool FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, EVT SrcVT, 194 unsigned &ResultReg); 195 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool Return); 196 bool ProcessCallArgs(SmallVectorImpl<Value*> &Args, 197 SmallVectorImpl<unsigned> &ArgRegs, 198 SmallVectorImpl<MVT> &ArgVTs, 199 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 200 SmallVectorImpl<unsigned> &RegArgs, 201 CallingConv::ID CC, 202 unsigned &NumBytes); 203 bool FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 204 const Instruction *I, CallingConv::ID CC, 205 unsigned &NumBytes); 206 bool ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call); 207 208 // OptionalDef handling routines. 209 private: 210 bool isARMNEONPred(const MachineInstr *MI); 211 bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR); 212 const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB); 213 void AddLoadStoreOperands(EVT VT, Address &Addr, 214 const MachineInstrBuilder &MIB, 215 unsigned Flags); 216}; 217 218} // end anonymous namespace 219 220#include "ARMGenCallingConv.inc" 221 222// DefinesOptionalPredicate - This is different from DefinesPredicate in that 223// we don't care about implicit defs here, just places we'll need to add a 224// default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR. 225bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) { 226 const MCInstrDesc &MCID = MI->getDesc(); 227 if (!MCID.hasOptionalDef()) 228 return false; 229 230 // Look to see if our OptionalDef is defining CPSR or CCR. 231 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 232 const MachineOperand &MO = MI->getOperand(i); 233 if (!MO.isReg() || !MO.isDef()) continue; 234 if (MO.getReg() == ARM::CPSR) 235 *CPSR = true; 236 } 237 return true; 238} 239 240bool ARMFastISel::isARMNEONPred(const MachineInstr *MI) { 241 const MCInstrDesc &MCID = MI->getDesc(); 242 243 // If we're a thumb2 or not NEON function we were handled via isPredicable. 244 if ((MCID.TSFlags & ARMII::DomainMask) != ARMII::DomainNEON || 245 AFI->isThumb2Function()) 246 return false; 247 248 for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) 249 if (MCID.OpInfo[i].isPredicate()) 250 return true; 251 252 return false; 253} 254 255// If the machine is predicable go ahead and add the predicate operands, if 256// it needs default CC operands add those. 257// TODO: If we want to support thumb1 then we'll need to deal with optional 258// CPSR defs that need to be added before the remaining operands. See s_cc_out 259// for descriptions why. 260const MachineInstrBuilder & 261ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) { 262 MachineInstr *MI = &*MIB; 263 264 // Do we use a predicate? or... 265 // Are we NEON in ARM mode and have a predicate operand? If so, I know 266 // we're not predicable but add it anyways. 267 if (TII.isPredicable(MI) || isARMNEONPred(MI)) 268 AddDefaultPred(MIB); 269 270 // Do we optionally set a predicate? Preds is size > 0 iff the predicate 271 // defines CPSR. All other OptionalDefines in ARM are the CCR register. 272 bool CPSR = false; 273 if (DefinesOptionalPredicate(MI, &CPSR)) { 274 if (CPSR) 275 AddDefaultT1CC(MIB); 276 else 277 AddDefaultCC(MIB); 278 } 279 return MIB; 280} 281 282unsigned ARMFastISel::FastEmitInst_(unsigned MachineInstOpcode, 283 const TargetRegisterClass* RC) { 284 unsigned ResultReg = createResultReg(RC); 285 const MCInstrDesc &II = TII.get(MachineInstOpcode); 286 287 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)); 288 return ResultReg; 289} 290 291unsigned ARMFastISel::FastEmitInst_r(unsigned MachineInstOpcode, 292 const TargetRegisterClass *RC, 293 unsigned Op0, bool Op0IsKill) { 294 unsigned ResultReg = createResultReg(RC); 295 const MCInstrDesc &II = TII.get(MachineInstOpcode); 296 297 if (II.getNumDefs() >= 1) 298 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 299 .addReg(Op0, Op0IsKill * RegState::Kill)); 300 else { 301 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 302 .addReg(Op0, Op0IsKill * RegState::Kill)); 303 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 304 TII.get(TargetOpcode::COPY), ResultReg) 305 .addReg(II.ImplicitDefs[0])); 306 } 307 return ResultReg; 308} 309 310unsigned ARMFastISel::FastEmitInst_rr(unsigned MachineInstOpcode, 311 const TargetRegisterClass *RC, 312 unsigned Op0, bool Op0IsKill, 313 unsigned Op1, bool Op1IsKill) { 314 unsigned ResultReg = createResultReg(RC); 315 const MCInstrDesc &II = TII.get(MachineInstOpcode); 316 317 if (II.getNumDefs() >= 1) 318 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 319 .addReg(Op0, Op0IsKill * RegState::Kill) 320 .addReg(Op1, Op1IsKill * RegState::Kill)); 321 else { 322 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 323 .addReg(Op0, Op0IsKill * RegState::Kill) 324 .addReg(Op1, Op1IsKill * RegState::Kill)); 325 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 326 TII.get(TargetOpcode::COPY), ResultReg) 327 .addReg(II.ImplicitDefs[0])); 328 } 329 return ResultReg; 330} 331 332unsigned ARMFastISel::FastEmitInst_rrr(unsigned MachineInstOpcode, 333 const TargetRegisterClass *RC, 334 unsigned Op0, bool Op0IsKill, 335 unsigned Op1, bool Op1IsKill, 336 unsigned Op2, bool Op2IsKill) { 337 unsigned ResultReg = createResultReg(RC); 338 const MCInstrDesc &II = TII.get(MachineInstOpcode); 339 340 if (II.getNumDefs() >= 1) 341 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 342 .addReg(Op0, Op0IsKill * RegState::Kill) 343 .addReg(Op1, Op1IsKill * RegState::Kill) 344 .addReg(Op2, Op2IsKill * RegState::Kill)); 345 else { 346 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 347 .addReg(Op0, Op0IsKill * RegState::Kill) 348 .addReg(Op1, Op1IsKill * RegState::Kill) 349 .addReg(Op2, Op2IsKill * RegState::Kill)); 350 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 351 TII.get(TargetOpcode::COPY), ResultReg) 352 .addReg(II.ImplicitDefs[0])); 353 } 354 return ResultReg; 355} 356 357unsigned ARMFastISel::FastEmitInst_ri(unsigned MachineInstOpcode, 358 const TargetRegisterClass *RC, 359 unsigned Op0, bool Op0IsKill, 360 uint64_t Imm) { 361 unsigned ResultReg = createResultReg(RC); 362 const MCInstrDesc &II = TII.get(MachineInstOpcode); 363 364 if (II.getNumDefs() >= 1) 365 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 366 .addReg(Op0, Op0IsKill * RegState::Kill) 367 .addImm(Imm)); 368 else { 369 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 370 .addReg(Op0, Op0IsKill * RegState::Kill) 371 .addImm(Imm)); 372 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 373 TII.get(TargetOpcode::COPY), ResultReg) 374 .addReg(II.ImplicitDefs[0])); 375 } 376 return ResultReg; 377} 378 379unsigned ARMFastISel::FastEmitInst_rf(unsigned MachineInstOpcode, 380 const TargetRegisterClass *RC, 381 unsigned Op0, bool Op0IsKill, 382 const ConstantFP *FPImm) { 383 unsigned ResultReg = createResultReg(RC); 384 const MCInstrDesc &II = TII.get(MachineInstOpcode); 385 386 if (II.getNumDefs() >= 1) 387 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 388 .addReg(Op0, Op0IsKill * RegState::Kill) 389 .addFPImm(FPImm)); 390 else { 391 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 392 .addReg(Op0, Op0IsKill * RegState::Kill) 393 .addFPImm(FPImm)); 394 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 395 TII.get(TargetOpcode::COPY), ResultReg) 396 .addReg(II.ImplicitDefs[0])); 397 } 398 return ResultReg; 399} 400 401unsigned ARMFastISel::FastEmitInst_rri(unsigned MachineInstOpcode, 402 const TargetRegisterClass *RC, 403 unsigned Op0, bool Op0IsKill, 404 unsigned Op1, bool Op1IsKill, 405 uint64_t Imm) { 406 unsigned ResultReg = createResultReg(RC); 407 const MCInstrDesc &II = TII.get(MachineInstOpcode); 408 409 if (II.getNumDefs() >= 1) 410 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 411 .addReg(Op0, Op0IsKill * RegState::Kill) 412 .addReg(Op1, Op1IsKill * RegState::Kill) 413 .addImm(Imm)); 414 else { 415 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 416 .addReg(Op0, Op0IsKill * RegState::Kill) 417 .addReg(Op1, Op1IsKill * RegState::Kill) 418 .addImm(Imm)); 419 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 420 TII.get(TargetOpcode::COPY), ResultReg) 421 .addReg(II.ImplicitDefs[0])); 422 } 423 return ResultReg; 424} 425 426unsigned ARMFastISel::FastEmitInst_i(unsigned MachineInstOpcode, 427 const TargetRegisterClass *RC, 428 uint64_t Imm) { 429 unsigned ResultReg = createResultReg(RC); 430 const MCInstrDesc &II = TII.get(MachineInstOpcode); 431 432 if (II.getNumDefs() >= 1) 433 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 434 .addImm(Imm)); 435 else { 436 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 437 .addImm(Imm)); 438 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 439 TII.get(TargetOpcode::COPY), ResultReg) 440 .addReg(II.ImplicitDefs[0])); 441 } 442 return ResultReg; 443} 444 445unsigned ARMFastISel::FastEmitInst_ii(unsigned MachineInstOpcode, 446 const TargetRegisterClass *RC, 447 uint64_t Imm1, uint64_t Imm2) { 448 unsigned ResultReg = createResultReg(RC); 449 const MCInstrDesc &II = TII.get(MachineInstOpcode); 450 451 if (II.getNumDefs() >= 1) 452 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 453 .addImm(Imm1).addImm(Imm2)); 454 else { 455 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 456 .addImm(Imm1).addImm(Imm2)); 457 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 458 TII.get(TargetOpcode::COPY), 459 ResultReg) 460 .addReg(II.ImplicitDefs[0])); 461 } 462 return ResultReg; 463} 464 465unsigned ARMFastISel::FastEmitInst_extractsubreg(MVT RetVT, 466 unsigned Op0, bool Op0IsKill, 467 uint32_t Idx) { 468 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT)); 469 assert(TargetRegisterInfo::isVirtualRegister(Op0) && 470 "Cannot yet extract from physregs"); 471 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 472 DL, TII.get(TargetOpcode::COPY), ResultReg) 473 .addReg(Op0, getKillRegState(Op0IsKill), Idx)); 474 return ResultReg; 475} 476 477// TODO: Don't worry about 64-bit now, but when this is fixed remove the 478// checks from the various callers. 479unsigned ARMFastISel::ARMMoveToFPReg(EVT VT, unsigned SrcReg) { 480 if (VT == MVT::f64) return 0; 481 482 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 483 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 484 TII.get(ARM::VMOVRS), MoveReg) 485 .addReg(SrcReg)); 486 return MoveReg; 487} 488 489unsigned ARMFastISel::ARMMoveToIntReg(EVT VT, unsigned SrcReg) { 490 if (VT == MVT::i64) return 0; 491 492 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 493 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 494 TII.get(ARM::VMOVSR), MoveReg) 495 .addReg(SrcReg)); 496 return MoveReg; 497} 498 499// For double width floating point we need to materialize two constants 500// (the high and the low) into integer registers then use a move to get 501// the combined constant into an FP reg. 502unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, EVT VT) { 503 const APFloat Val = CFP->getValueAPF(); 504 bool is64bit = VT == MVT::f64; 505 506 // This checks to see if we can use VFP3 instructions to materialize 507 // a constant, otherwise we have to go through the constant pool. 508 if (TLI.isFPImmLegal(Val, VT)) { 509 int Imm; 510 unsigned Opc; 511 if (is64bit) { 512 Imm = ARM_AM::getFP64Imm(Val); 513 Opc = ARM::FCONSTD; 514 } else { 515 Imm = ARM_AM::getFP32Imm(Val); 516 Opc = ARM::FCONSTS; 517 } 518 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 519 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 520 DestReg) 521 .addImm(Imm)); 522 return DestReg; 523 } 524 525 // Require VFP2 for loading fp constants. 526 if (!Subtarget->hasVFP2()) return false; 527 528 // MachineConstantPool wants an explicit alignment. 529 unsigned Align = TD.getPrefTypeAlignment(CFP->getType()); 530 if (Align == 0) { 531 // TODO: Figure out if this is correct. 532 Align = TD.getTypeAllocSize(CFP->getType()); 533 } 534 unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align); 535 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 536 unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS; 537 538 // The extra reg is for addrmode5. 539 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 540 DestReg) 541 .addConstantPoolIndex(Idx) 542 .addReg(0)); 543 return DestReg; 544} 545 546unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, EVT VT) { 547 548 if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1) 549 return false; 550 551 // If we can do this in a single instruction without a constant pool entry 552 // do so now. 553 const ConstantInt *CI = cast<ConstantInt>(C); 554 if (Subtarget->hasV6T2Ops() && isUInt<16>(CI->getZExtValue())) { 555 EVT SrcVT = MVT::i32; 556 unsigned Opc = isThumb ? ARM::t2MOVi16 : ARM::MOVi16; 557 unsigned ImmReg = createResultReg(TLI.getRegClassFor(SrcVT)); 558 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 559 TII.get(Opc), ImmReg) 560 .addImm(CI->getZExtValue())); 561 return ImmReg; 562 } 563 564 // For now 32-bit only. 565 if (VT != MVT::i32) 566 return false; 567 568 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 569 570 // MachineConstantPool wants an explicit alignment. 571 unsigned Align = TD.getPrefTypeAlignment(C->getType()); 572 if (Align == 0) { 573 // TODO: Figure out if this is correct. 574 Align = TD.getTypeAllocSize(C->getType()); 575 } 576 unsigned Idx = MCP.getConstantPoolIndex(C, Align); 577 578 if (isThumb) 579 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 580 TII.get(ARM::t2LDRpci), DestReg) 581 .addConstantPoolIndex(Idx)); 582 else 583 // The extra immediate is for addrmode2. 584 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 585 TII.get(ARM::LDRcp), DestReg) 586 .addConstantPoolIndex(Idx) 587 .addImm(0)); 588 589 return DestReg; 590} 591 592unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, EVT VT) { 593 // For now 32-bit only. 594 if (VT != MVT::i32) return 0; 595 596 Reloc::Model RelocM = TM.getRelocationModel(); 597 598 // TODO: Need more magic for ARM PIC. 599 if (!isThumb && (RelocM == Reloc::PIC_)) return 0; 600 601 // MachineConstantPool wants an explicit alignment. 602 unsigned Align = TD.getPrefTypeAlignment(GV->getType()); 603 if (Align == 0) { 604 // TODO: Figure out if this is correct. 605 Align = TD.getTypeAllocSize(GV->getType()); 606 } 607 608 // Grab index. 609 unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : (Subtarget->isThumb() ? 4 : 8); 610 unsigned Id = AFI->createPICLabelUId(); 611 ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create(GV, Id, 612 ARMCP::CPValue, 613 PCAdj); 614 unsigned Idx = MCP.getConstantPoolIndex(CPV, Align); 615 616 // Load value. 617 MachineInstrBuilder MIB; 618 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 619 if (isThumb) { 620 unsigned Opc = (RelocM != Reloc::PIC_) ? ARM::t2LDRpci : ARM::t2LDRpci_pic; 621 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg) 622 .addConstantPoolIndex(Idx); 623 if (RelocM == Reloc::PIC_) 624 MIB.addImm(Id); 625 } else { 626 // The extra immediate is for addrmode2. 627 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRcp), 628 DestReg) 629 .addConstantPoolIndex(Idx) 630 .addImm(0); 631 } 632 AddOptionalDefs(MIB); 633 634 if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) { 635 unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT)); 636 if (isThumb) 637 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 638 TII.get(ARM::t2LDRi12), NewDestReg) 639 .addReg(DestReg) 640 .addImm(0); 641 else 642 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRi12), 643 NewDestReg) 644 .addReg(DestReg) 645 .addImm(0); 646 DestReg = NewDestReg; 647 AddOptionalDefs(MIB); 648 } 649 650 return DestReg; 651} 652 653unsigned ARMFastISel::TargetMaterializeConstant(const Constant *C) { 654 EVT VT = TLI.getValueType(C->getType(), true); 655 656 // Only handle simple types. 657 if (!VT.isSimple()) return 0; 658 659 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) 660 return ARMMaterializeFP(CFP, VT); 661 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C)) 662 return ARMMaterializeGV(GV, VT); 663 else if (isa<ConstantInt>(C)) 664 return ARMMaterializeInt(C, VT); 665 666 return 0; 667} 668 669unsigned ARMFastISel::TargetMaterializeAlloca(const AllocaInst *AI) { 670 // Don't handle dynamic allocas. 671 if (!FuncInfo.StaticAllocaMap.count(AI)) return 0; 672 673 MVT VT; 674 if (!isLoadTypeLegal(AI->getType(), VT)) return false; 675 676 DenseMap<const AllocaInst*, int>::iterator SI = 677 FuncInfo.StaticAllocaMap.find(AI); 678 679 // This will get lowered later into the correct offsets and registers 680 // via rewriteXFrameIndex. 681 if (SI != FuncInfo.StaticAllocaMap.end()) { 682 TargetRegisterClass* RC = TLI.getRegClassFor(VT); 683 unsigned ResultReg = createResultReg(RC); 684 unsigned Opc = isThumb ? ARM::t2ADDri : ARM::ADDri; 685 AddOptionalDefs(BuildMI(*FuncInfo.MBB, *FuncInfo.InsertPt, DL, 686 TII.get(Opc), ResultReg) 687 .addFrameIndex(SI->second) 688 .addImm(0)); 689 return ResultReg; 690 } 691 692 return 0; 693} 694 695bool ARMFastISel::isTypeLegal(Type *Ty, MVT &VT) { 696 EVT evt = TLI.getValueType(Ty, true); 697 698 // Only handle simple types. 699 if (evt == MVT::Other || !evt.isSimple()) return false; 700 VT = evt.getSimpleVT(); 701 702 // Handle all legal types, i.e. a register that will directly hold this 703 // value. 704 return TLI.isTypeLegal(VT); 705} 706 707bool ARMFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) { 708 if (isTypeLegal(Ty, VT)) return true; 709 710 // If this is a type than can be sign or zero-extended to a basic operation 711 // go ahead and accept it now. 712 if (VT == MVT::i8 || VT == MVT::i16) 713 return true; 714 715 return false; 716} 717 718// Computes the address to get to an object. 719bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) { 720 // Some boilerplate from the X86 FastISel. 721 const User *U = NULL; 722 unsigned Opcode = Instruction::UserOp1; 723 if (const Instruction *I = dyn_cast<Instruction>(Obj)) { 724 // Don't walk into other basic blocks unless the object is an alloca from 725 // another block, otherwise it may not have a virtual register assigned. 726 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) || 727 FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) { 728 Opcode = I->getOpcode(); 729 U = I; 730 } 731 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) { 732 Opcode = C->getOpcode(); 733 U = C; 734 } 735 736 if (PointerType *Ty = dyn_cast<PointerType>(Obj->getType())) 737 if (Ty->getAddressSpace() > 255) 738 // Fast instruction selection doesn't support the special 739 // address spaces. 740 return false; 741 742 switch (Opcode) { 743 default: 744 break; 745 case Instruction::BitCast: { 746 // Look through bitcasts. 747 return ARMComputeAddress(U->getOperand(0), Addr); 748 } 749 case Instruction::IntToPtr: { 750 // Look past no-op inttoptrs. 751 if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy()) 752 return ARMComputeAddress(U->getOperand(0), Addr); 753 break; 754 } 755 case Instruction::PtrToInt: { 756 // Look past no-op ptrtoints. 757 if (TLI.getValueType(U->getType()) == TLI.getPointerTy()) 758 return ARMComputeAddress(U->getOperand(0), Addr); 759 break; 760 } 761 case Instruction::GetElementPtr: { 762 Address SavedAddr = Addr; 763 int TmpOffset = Addr.Offset; 764 765 // Iterate through the GEP folding the constants into offsets where 766 // we can. 767 gep_type_iterator GTI = gep_type_begin(U); 768 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); 769 i != e; ++i, ++GTI) { 770 const Value *Op = *i; 771 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 772 const StructLayout *SL = TD.getStructLayout(STy); 773 unsigned Idx = cast<ConstantInt>(Op)->getZExtValue(); 774 TmpOffset += SL->getElementOffset(Idx); 775 } else { 776 uint64_t S = TD.getTypeAllocSize(GTI.getIndexedType()); 777 for (;;) { 778 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) { 779 // Constant-offset addressing. 780 TmpOffset += CI->getSExtValue() * S; 781 break; 782 } 783 if (isa<AddOperator>(Op) && 784 (!isa<Instruction>(Op) || 785 FuncInfo.MBBMap[cast<Instruction>(Op)->getParent()] 786 == FuncInfo.MBB) && 787 isa<ConstantInt>(cast<AddOperator>(Op)->getOperand(1))) { 788 // An add (in the same block) with a constant operand. Fold the 789 // constant. 790 ConstantInt *CI = 791 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1)); 792 TmpOffset += CI->getSExtValue() * S; 793 // Iterate on the other operand. 794 Op = cast<AddOperator>(Op)->getOperand(0); 795 continue; 796 } 797 // Unsupported 798 goto unsupported_gep; 799 } 800 } 801 } 802 803 // Try to grab the base operand now. 804 Addr.Offset = TmpOffset; 805 if (ARMComputeAddress(U->getOperand(0), Addr)) return true; 806 807 // We failed, restore everything and try the other options. 808 Addr = SavedAddr; 809 810 unsupported_gep: 811 break; 812 } 813 case Instruction::Alloca: { 814 const AllocaInst *AI = cast<AllocaInst>(Obj); 815 DenseMap<const AllocaInst*, int>::iterator SI = 816 FuncInfo.StaticAllocaMap.find(AI); 817 if (SI != FuncInfo.StaticAllocaMap.end()) { 818 Addr.BaseType = Address::FrameIndexBase; 819 Addr.Base.FI = SI->second; 820 return true; 821 } 822 break; 823 } 824 } 825 826 // Materialize the global variable's address into a reg which can 827 // then be used later to load the variable. 828 if (const GlobalValue *GV = dyn_cast<GlobalValue>(Obj)) { 829 unsigned Tmp = ARMMaterializeGV(GV, TLI.getValueType(Obj->getType())); 830 if (Tmp == 0) return false; 831 832 Addr.Base.Reg = Tmp; 833 return true; 834 } 835 836 // Try to get this in a register if nothing else has worked. 837 if (Addr.Base.Reg == 0) Addr.Base.Reg = getRegForValue(Obj); 838 return Addr.Base.Reg != 0; 839} 840 841void ARMFastISel::ARMSimplifyAddress(Address &Addr, EVT VT) { 842 843 assert(VT.isSimple() && "Non-simple types are invalid here!"); 844 845 bool needsLowering = false; 846 switch (VT.getSimpleVT().SimpleTy) { 847 default: 848 assert(false && "Unhandled load/store type!"); 849 case MVT::i1: 850 case MVT::i8: 851 case MVT::i16: 852 case MVT::i32: 853 // Integer loads/stores handle 12-bit offsets. 854 needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset); 855 break; 856 case MVT::f32: 857 case MVT::f64: 858 // Floating point operands handle 8-bit offsets. 859 needsLowering = ((Addr.Offset & 0xff) != Addr.Offset); 860 break; 861 } 862 863 // If this is a stack pointer and the offset needs to be simplified then 864 // put the alloca address into a register, set the base type back to 865 // register and continue. This should almost never happen. 866 if (needsLowering && Addr.BaseType == Address::FrameIndexBase) { 867 TargetRegisterClass *RC = isThumb ? ARM::tGPRRegisterClass : 868 ARM::GPRRegisterClass; 869 unsigned ResultReg = createResultReg(RC); 870 unsigned Opc = isThumb ? ARM::t2ADDri : ARM::ADDri; 871 AddOptionalDefs(BuildMI(*FuncInfo.MBB, *FuncInfo.InsertPt, DL, 872 TII.get(Opc), ResultReg) 873 .addFrameIndex(Addr.Base.FI) 874 .addImm(0)); 875 Addr.Base.Reg = ResultReg; 876 Addr.BaseType = Address::RegBase; 877 } 878 879 // Since the offset is too large for the load/store instruction 880 // get the reg+offset into a register. 881 if (needsLowering) { 882 Addr.Base.Reg = FastEmit_ri_(MVT::i32, ISD::ADD, Addr.Base.Reg, 883 /*Op0IsKill*/false, Addr.Offset, MVT::i32); 884 Addr.Offset = 0; 885 } 886} 887 888void ARMFastISel::AddLoadStoreOperands(EVT VT, Address &Addr, 889 const MachineInstrBuilder &MIB, 890 unsigned Flags) { 891 // addrmode5 output depends on the selection dag addressing dividing the 892 // offset by 4 that it then later multiplies. Do this here as well. 893 if (VT.getSimpleVT().SimpleTy == MVT::f32 || 894 VT.getSimpleVT().SimpleTy == MVT::f64) 895 Addr.Offset /= 4; 896 897 // Frame base works a bit differently. Handle it separately. 898 if (Addr.BaseType == Address::FrameIndexBase) { 899 int FI = Addr.Base.FI; 900 int Offset = Addr.Offset; 901 MachineMemOperand *MMO = 902 FuncInfo.MF->getMachineMemOperand( 903 MachinePointerInfo::getFixedStack(FI, Offset), 904 Flags, 905 MFI.getObjectSize(FI), 906 MFI.getObjectAlignment(FI)); 907 // Now add the rest of the operands. 908 MIB.addFrameIndex(FI); 909 910 // ARM halfword load/stores need an additional operand. 911 if (!isThumb && VT.getSimpleVT().SimpleTy == MVT::i16) MIB.addReg(0); 912 913 MIB.addImm(Addr.Offset); 914 MIB.addMemOperand(MMO); 915 } else { 916 // Now add the rest of the operands. 917 MIB.addReg(Addr.Base.Reg); 918 919 // ARM halfword load/stores need an additional operand. 920 if (!isThumb && VT.getSimpleVT().SimpleTy == MVT::i16) MIB.addReg(0); 921 922 MIB.addImm(Addr.Offset); 923 } 924 AddOptionalDefs(MIB); 925} 926 927bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr) { 928 929 assert(VT.isSimple() && "Non-simple types are invalid here!"); 930 unsigned Opc; 931 TargetRegisterClass *RC; 932 switch (VT.getSimpleVT().SimpleTy) { 933 // This is mostly going to be Neon/vector support. 934 default: return false; 935 case MVT::i16: 936 Opc = isThumb ? ARM::t2LDRHi12 : ARM::LDRH; 937 RC = ARM::GPRRegisterClass; 938 break; 939 case MVT::i8: 940 Opc = isThumb ? ARM::t2LDRBi12 : ARM::LDRBi12; 941 RC = ARM::GPRRegisterClass; 942 break; 943 case MVT::i32: 944 Opc = isThumb ? ARM::t2LDRi12 : ARM::LDRi12; 945 RC = ARM::GPRRegisterClass; 946 break; 947 case MVT::f32: 948 Opc = ARM::VLDRS; 949 RC = TLI.getRegClassFor(VT); 950 break; 951 case MVT::f64: 952 Opc = ARM::VLDRD; 953 RC = TLI.getRegClassFor(VT); 954 break; 955 } 956 // Simplify this down to something we can handle. 957 ARMSimplifyAddress(Addr, VT); 958 959 // Create the base instruction, then add the operands. 960 ResultReg = createResultReg(RC); 961 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 962 TII.get(Opc), ResultReg); 963 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOLoad); 964 return true; 965} 966 967bool ARMFastISel::SelectLoad(const Instruction *I) { 968 // Atomic loads need special handling. 969 if (cast<LoadInst>(I)->isAtomic()) 970 return false; 971 972 // Verify we have a legal type before going any further. 973 MVT VT; 974 if (!isLoadTypeLegal(I->getType(), VT)) 975 return false; 976 977 // See if we can handle this address. 978 Address Addr; 979 if (!ARMComputeAddress(I->getOperand(0), Addr)) return false; 980 981 unsigned ResultReg; 982 if (!ARMEmitLoad(VT, ResultReg, Addr)) return false; 983 UpdateValueMap(I, ResultReg); 984 return true; 985} 986 987bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr) { 988 unsigned StrOpc; 989 switch (VT.getSimpleVT().SimpleTy) { 990 // This is mostly going to be Neon/vector support. 991 default: return false; 992 case MVT::i1: { 993 unsigned Res = createResultReg(isThumb ? ARM::tGPRRegisterClass : 994 ARM::GPRRegisterClass); 995 unsigned Opc = isThumb ? ARM::t2ANDri : ARM::ANDri; 996 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 997 TII.get(Opc), Res) 998 .addReg(SrcReg).addImm(1)); 999 SrcReg = Res; 1000 } // Fallthrough here. 1001 case MVT::i8: 1002 StrOpc = isThumb ? ARM::t2STRBi12 : ARM::STRBi12; 1003 break; 1004 case MVT::i16: 1005 StrOpc = isThumb ? ARM::t2STRHi12 : ARM::STRH; 1006 break; 1007 case MVT::i32: 1008 StrOpc = isThumb ? ARM::t2STRi12 : ARM::STRi12; 1009 break; 1010 case MVT::f32: 1011 if (!Subtarget->hasVFP2()) return false; 1012 StrOpc = ARM::VSTRS; 1013 break; 1014 case MVT::f64: 1015 if (!Subtarget->hasVFP2()) return false; 1016 StrOpc = ARM::VSTRD; 1017 break; 1018 } 1019 // Simplify this down to something we can handle. 1020 ARMSimplifyAddress(Addr, VT); 1021 1022 // Create the base instruction, then add the operands. 1023 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1024 TII.get(StrOpc)) 1025 .addReg(SrcReg, getKillRegState(true)); 1026 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOStore); 1027 return true; 1028} 1029 1030bool ARMFastISel::SelectStore(const Instruction *I) { 1031 Value *Op0 = I->getOperand(0); 1032 unsigned SrcReg = 0; 1033 1034 // Atomic stores need special handling. 1035 if (cast<StoreInst>(I)->isAtomic()) 1036 return false; 1037 1038 // Verify we have a legal type before going any further. 1039 MVT VT; 1040 if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT)) 1041 return false; 1042 1043 // Get the value to be stored into a register. 1044 SrcReg = getRegForValue(Op0); 1045 if (SrcReg == 0) return false; 1046 1047 // See if we can handle this address. 1048 Address Addr; 1049 if (!ARMComputeAddress(I->getOperand(1), Addr)) 1050 return false; 1051 1052 if (!ARMEmitStore(VT, SrcReg, Addr)) return false; 1053 return true; 1054} 1055 1056static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred) { 1057 switch (Pred) { 1058 // Needs two compares... 1059 case CmpInst::FCMP_ONE: 1060 case CmpInst::FCMP_UEQ: 1061 default: 1062 // AL is our "false" for now. The other two need more compares. 1063 return ARMCC::AL; 1064 case CmpInst::ICMP_EQ: 1065 case CmpInst::FCMP_OEQ: 1066 return ARMCC::EQ; 1067 case CmpInst::ICMP_SGT: 1068 case CmpInst::FCMP_OGT: 1069 return ARMCC::GT; 1070 case CmpInst::ICMP_SGE: 1071 case CmpInst::FCMP_OGE: 1072 return ARMCC::GE; 1073 case CmpInst::ICMP_UGT: 1074 case CmpInst::FCMP_UGT: 1075 return ARMCC::HI; 1076 case CmpInst::FCMP_OLT: 1077 return ARMCC::MI; 1078 case CmpInst::ICMP_ULE: 1079 case CmpInst::FCMP_OLE: 1080 return ARMCC::LS; 1081 case CmpInst::FCMP_ORD: 1082 return ARMCC::VC; 1083 case CmpInst::FCMP_UNO: 1084 return ARMCC::VS; 1085 case CmpInst::FCMP_UGE: 1086 return ARMCC::PL; 1087 case CmpInst::ICMP_SLT: 1088 case CmpInst::FCMP_ULT: 1089 return ARMCC::LT; 1090 case CmpInst::ICMP_SLE: 1091 case CmpInst::FCMP_ULE: 1092 return ARMCC::LE; 1093 case CmpInst::FCMP_UNE: 1094 case CmpInst::ICMP_NE: 1095 return ARMCC::NE; 1096 case CmpInst::ICMP_UGE: 1097 return ARMCC::HS; 1098 case CmpInst::ICMP_ULT: 1099 return ARMCC::LO; 1100 } 1101} 1102 1103bool ARMFastISel::SelectBranch(const Instruction *I) { 1104 const BranchInst *BI = cast<BranchInst>(I); 1105 MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)]; 1106 MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)]; 1107 1108 // Simple branch support. 1109 1110 // If we can, avoid recomputing the compare - redoing it could lead to wonky 1111 // behavior. 1112 if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) { 1113 if (CI->hasOneUse() && (CI->getParent() == I->getParent())) { 1114 1115 // Get the compare predicate. 1116 // Try to take advantage of fallthrough opportunities. 1117 CmpInst::Predicate Predicate = CI->getPredicate(); 1118 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1119 std::swap(TBB, FBB); 1120 Predicate = CmpInst::getInversePredicate(Predicate); 1121 } 1122 1123 ARMCC::CondCodes ARMPred = getComparePred(Predicate); 1124 1125 // We may not handle every CC for now. 1126 if (ARMPred == ARMCC::AL) return false; 1127 1128 // Emit the compare. 1129 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) 1130 return false; 1131 1132 unsigned BrOpc = isThumb ? ARM::t2Bcc : ARM::Bcc; 1133 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1134 .addMBB(TBB).addImm(ARMPred).addReg(ARM::CPSR); 1135 FastEmitBranch(FBB, DL); 1136 FuncInfo.MBB->addSuccessor(TBB); 1137 return true; 1138 } 1139 } else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) { 1140 MVT SourceVT; 1141 if (TI->hasOneUse() && TI->getParent() == I->getParent() && 1142 (isLoadTypeLegal(TI->getOperand(0)->getType(), SourceVT))) { 1143 unsigned TstOpc = isThumb ? ARM::t2TSTri : ARM::TSTri; 1144 unsigned OpReg = getRegForValue(TI->getOperand(0)); 1145 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1146 TII.get(TstOpc)) 1147 .addReg(OpReg).addImm(1)); 1148 1149 unsigned CCMode = ARMCC::NE; 1150 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1151 std::swap(TBB, FBB); 1152 CCMode = ARMCC::EQ; 1153 } 1154 1155 unsigned BrOpc = isThumb ? ARM::t2Bcc : ARM::Bcc; 1156 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1157 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); 1158 1159 FastEmitBranch(FBB, DL); 1160 FuncInfo.MBB->addSuccessor(TBB); 1161 return true; 1162 } 1163 } else if (const ConstantInt *CI = 1164 dyn_cast<ConstantInt>(BI->getCondition())) { 1165 uint64_t Imm = CI->getZExtValue(); 1166 MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB; 1167 FastEmitBranch(Target, DL); 1168 return true; 1169 } 1170 1171 unsigned CmpReg = getRegForValue(BI->getCondition()); 1172 if (CmpReg == 0) return false; 1173 1174 // We've been divorced from our compare! Our block was split, and 1175 // now our compare lives in a predecessor block. We musn't 1176 // re-compare here, as the children of the compare aren't guaranteed 1177 // live across the block boundary (we *could* check for this). 1178 // Regardless, the compare has been done in the predecessor block, 1179 // and it left a value for us in a virtual register. Ergo, we test 1180 // the one-bit value left in the virtual register. 1181 unsigned TstOpc = isThumb ? ARM::t2TSTri : ARM::TSTri; 1182 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TstOpc)) 1183 .addReg(CmpReg).addImm(1)); 1184 1185 unsigned CCMode = ARMCC::NE; 1186 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1187 std::swap(TBB, FBB); 1188 CCMode = ARMCC::EQ; 1189 } 1190 1191 unsigned BrOpc = isThumb ? ARM::t2Bcc : ARM::Bcc; 1192 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1193 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); 1194 FastEmitBranch(FBB, DL); 1195 FuncInfo.MBB->addSuccessor(TBB); 1196 return true; 1197} 1198 1199bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, 1200 bool isZExt) { 1201 Type *Ty = Src1Value->getType(); 1202 EVT SrcVT = TLI.getValueType(Ty, true); 1203 if (!SrcVT.isSimple()) return false; 1204 1205 bool isFloat = (Ty->isFloatTy() || Ty->isDoubleTy()); 1206 if (isFloat && !Subtarget->hasVFP2()) 1207 return false; 1208 1209 unsigned CmpOpc; 1210 bool needsExt = false; 1211 switch (SrcVT.getSimpleVT().SimpleTy) { 1212 default: return false; 1213 // TODO: Verify compares. 1214 case MVT::f32: 1215 CmpOpc = ARM::VCMPES; 1216 break; 1217 case MVT::f64: 1218 CmpOpc = ARM::VCMPED; 1219 break; 1220 case MVT::i1: 1221 case MVT::i8: 1222 case MVT::i16: 1223 needsExt = true; 1224 // Intentional fall-through. 1225 case MVT::i32: 1226 CmpOpc = isThumb ? ARM::t2CMPrr : ARM::CMPrr; 1227 break; 1228 } 1229 1230 unsigned SrcReg1 = getRegForValue(Src1Value); 1231 if (SrcReg1 == 0) return false; 1232 1233 unsigned SrcReg2 = getRegForValue(Src2Value); 1234 if (SrcReg2 == 0) return false; 1235 1236 // We have i1, i8, or i16, we need to either zero extend or sign extend. 1237 if (needsExt) { 1238 unsigned ResultReg; 1239 EVT DestVT = MVT::i32; 1240 ResultReg = ARMEmitIntExt(SrcVT, SrcReg1, DestVT, isZExt); 1241 if (ResultReg == 0) return false; 1242 SrcReg1 = ResultReg; 1243 ResultReg = ARMEmitIntExt(SrcVT, SrcReg2, DestVT, isZExt); 1244 if (ResultReg == 0) return false; 1245 SrcReg2 = ResultReg; 1246 } 1247 1248 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1249 .addReg(SrcReg1).addReg(SrcReg2)); 1250 1251 // For floating point we need to move the result to a comparison register 1252 // that we can then use for branches. 1253 if (Ty->isFloatTy() || Ty->isDoubleTy()) 1254 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1255 TII.get(ARM::FMSTAT))); 1256 return true; 1257} 1258 1259bool ARMFastISel::SelectCmp(const Instruction *I) { 1260 const CmpInst *CI = cast<CmpInst>(I); 1261 Type *Ty = CI->getOperand(0)->getType(); 1262 1263 // Get the compare predicate. 1264 ARMCC::CondCodes ARMPred = getComparePred(CI->getPredicate()); 1265 1266 // We may not handle every CC for now. 1267 if (ARMPred == ARMCC::AL) return false; 1268 1269 // Emit the compare. 1270 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) 1271 return false; 1272 1273 // Now set a register based on the comparison. Explicitly set the predicates 1274 // here. 1275 unsigned MovCCOpc = isThumb ? ARM::t2MOVCCi : ARM::MOVCCi; 1276 TargetRegisterClass *RC = isThumb ? ARM::rGPRRegisterClass 1277 : ARM::GPRRegisterClass; 1278 unsigned DestReg = createResultReg(RC); 1279 Constant *Zero = ConstantInt::get(Type::getInt32Ty(*Context), 0); 1280 unsigned ZeroReg = TargetMaterializeConstant(Zero); 1281 bool isFloat = (Ty->isFloatTy() || Ty->isDoubleTy()); 1282 unsigned CondReg = isFloat ? ARM::FPSCR : ARM::CPSR; 1283 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), DestReg) 1284 .addReg(ZeroReg).addImm(1) 1285 .addImm(ARMPred).addReg(CondReg); 1286 1287 UpdateValueMap(I, DestReg); 1288 return true; 1289} 1290 1291bool ARMFastISel::SelectFPExt(const Instruction *I) { 1292 // Make sure we have VFP and that we're extending float to double. 1293 if (!Subtarget->hasVFP2()) return false; 1294 1295 Value *V = I->getOperand(0); 1296 if (!I->getType()->isDoubleTy() || 1297 !V->getType()->isFloatTy()) return false; 1298 1299 unsigned Op = getRegForValue(V); 1300 if (Op == 0) return false; 1301 1302 unsigned Result = createResultReg(ARM::DPRRegisterClass); 1303 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1304 TII.get(ARM::VCVTDS), Result) 1305 .addReg(Op)); 1306 UpdateValueMap(I, Result); 1307 return true; 1308} 1309 1310bool ARMFastISel::SelectFPTrunc(const Instruction *I) { 1311 // Make sure we have VFP and that we're truncating double to float. 1312 if (!Subtarget->hasVFP2()) return false; 1313 1314 Value *V = I->getOperand(0); 1315 if (!(I->getType()->isFloatTy() && 1316 V->getType()->isDoubleTy())) return false; 1317 1318 unsigned Op = getRegForValue(V); 1319 if (Op == 0) return false; 1320 1321 unsigned Result = createResultReg(ARM::SPRRegisterClass); 1322 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1323 TII.get(ARM::VCVTSD), Result) 1324 .addReg(Op)); 1325 UpdateValueMap(I, Result); 1326 return true; 1327} 1328 1329bool ARMFastISel::SelectSIToFP(const Instruction *I) { 1330 // Make sure we have VFP. 1331 if (!Subtarget->hasVFP2()) return false; 1332 1333 MVT DstVT; 1334 Type *Ty = I->getType(); 1335 if (!isTypeLegal(Ty, DstVT)) 1336 return false; 1337 1338 Value *Src = I->getOperand(0); 1339 EVT SrcVT = TLI.getValueType(Src->getType(), true); 1340 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) 1341 return false; 1342 1343 unsigned SrcReg = getRegForValue(Src); 1344 if (SrcReg == 0) return false; 1345 1346 // Handle sign-extension. 1347 if (SrcVT == MVT::i16 || SrcVT == MVT::i8) { 1348 EVT DestVT = MVT::i32; 1349 unsigned ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, /*isZExt*/ false); 1350 if (ResultReg == 0) return false; 1351 SrcReg = ResultReg; 1352 } 1353 1354 // The conversion routine works on fp-reg to fp-reg and the operand above 1355 // was an integer, move it to the fp registers if possible. 1356 unsigned FP = ARMMoveToFPReg(MVT::f32, SrcReg); 1357 if (FP == 0) return false; 1358 1359 unsigned Opc; 1360 if (Ty->isFloatTy()) Opc = ARM::VSITOS; 1361 else if (Ty->isDoubleTy()) Opc = ARM::VSITOD; 1362 else return false; 1363 1364 unsigned ResultReg = createResultReg(TLI.getRegClassFor(DstVT)); 1365 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1366 ResultReg) 1367 .addReg(FP)); 1368 UpdateValueMap(I, ResultReg); 1369 return true; 1370} 1371 1372bool ARMFastISel::SelectFPToSI(const Instruction *I) { 1373 // Make sure we have VFP. 1374 if (!Subtarget->hasVFP2()) return false; 1375 1376 MVT DstVT; 1377 Type *RetTy = I->getType(); 1378 if (!isTypeLegal(RetTy, DstVT)) 1379 return false; 1380 1381 unsigned Op = getRegForValue(I->getOperand(0)); 1382 if (Op == 0) return false; 1383 1384 unsigned Opc; 1385 Type *OpTy = I->getOperand(0)->getType(); 1386 if (OpTy->isFloatTy()) Opc = ARM::VTOSIZS; 1387 else if (OpTy->isDoubleTy()) Opc = ARM::VTOSIZD; 1388 else return false; 1389 1390 // f64->s32 or f32->s32 both need an intermediate f32 reg. 1391 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32)); 1392 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1393 ResultReg) 1394 .addReg(Op)); 1395 1396 // This result needs to be in an integer register, but the conversion only 1397 // takes place in fp-regs. 1398 unsigned IntReg = ARMMoveToIntReg(DstVT, ResultReg); 1399 if (IntReg == 0) return false; 1400 1401 UpdateValueMap(I, IntReg); 1402 return true; 1403} 1404 1405bool ARMFastISel::SelectSelect(const Instruction *I) { 1406 MVT VT; 1407 if (!isTypeLegal(I->getType(), VT)) 1408 return false; 1409 1410 // Things need to be register sized for register moves. 1411 if (VT != MVT::i32) return false; 1412 const TargetRegisterClass *RC = TLI.getRegClassFor(VT); 1413 1414 unsigned CondReg = getRegForValue(I->getOperand(0)); 1415 if (CondReg == 0) return false; 1416 unsigned Op1Reg = getRegForValue(I->getOperand(1)); 1417 if (Op1Reg == 0) return false; 1418 unsigned Op2Reg = getRegForValue(I->getOperand(2)); 1419 if (Op2Reg == 0) return false; 1420 1421 unsigned CmpOpc = isThumb ? ARM::t2TSTri : ARM::TSTri; 1422 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1423 .addReg(CondReg).addImm(1)); 1424 unsigned ResultReg = createResultReg(RC); 1425 unsigned MovCCOpc = isThumb ? ARM::t2MOVCCr : ARM::MOVCCr; 1426 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg) 1427 .addReg(Op1Reg).addReg(Op2Reg) 1428 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 1429 UpdateValueMap(I, ResultReg); 1430 return true; 1431} 1432 1433bool ARMFastISel::SelectSDiv(const Instruction *I) { 1434 MVT VT; 1435 Type *Ty = I->getType(); 1436 if (!isTypeLegal(Ty, VT)) 1437 return false; 1438 1439 // If we have integer div support we should have selected this automagically. 1440 // In case we have a real miss go ahead and return false and we'll pick 1441 // it up later. 1442 if (Subtarget->hasDivide()) return false; 1443 1444 // Otherwise emit a libcall. 1445 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1446 if (VT == MVT::i8) 1447 LC = RTLIB::SDIV_I8; 1448 else if (VT == MVT::i16) 1449 LC = RTLIB::SDIV_I16; 1450 else if (VT == MVT::i32) 1451 LC = RTLIB::SDIV_I32; 1452 else if (VT == MVT::i64) 1453 LC = RTLIB::SDIV_I64; 1454 else if (VT == MVT::i128) 1455 LC = RTLIB::SDIV_I128; 1456 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SDIV!"); 1457 1458 return ARMEmitLibcall(I, LC); 1459} 1460 1461bool ARMFastISel::SelectSRem(const Instruction *I) { 1462 MVT VT; 1463 Type *Ty = I->getType(); 1464 if (!isTypeLegal(Ty, VT)) 1465 return false; 1466 1467 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1468 if (VT == MVT::i8) 1469 LC = RTLIB::SREM_I8; 1470 else if (VT == MVT::i16) 1471 LC = RTLIB::SREM_I16; 1472 else if (VT == MVT::i32) 1473 LC = RTLIB::SREM_I32; 1474 else if (VT == MVT::i64) 1475 LC = RTLIB::SREM_I64; 1476 else if (VT == MVT::i128) 1477 LC = RTLIB::SREM_I128; 1478 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SREM!"); 1479 1480 return ARMEmitLibcall(I, LC); 1481} 1482 1483bool ARMFastISel::SelectBinaryOp(const Instruction *I, unsigned ISDOpcode) { 1484 EVT VT = TLI.getValueType(I->getType(), true); 1485 1486 // We can get here in the case when we want to use NEON for our fp 1487 // operations, but can't figure out how to. Just use the vfp instructions 1488 // if we have them. 1489 // FIXME: It'd be nice to use NEON instructions. 1490 Type *Ty = I->getType(); 1491 bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy()); 1492 if (isFloat && !Subtarget->hasVFP2()) 1493 return false; 1494 1495 unsigned Op1 = getRegForValue(I->getOperand(0)); 1496 if (Op1 == 0) return false; 1497 1498 unsigned Op2 = getRegForValue(I->getOperand(1)); 1499 if (Op2 == 0) return false; 1500 1501 unsigned Opc; 1502 bool is64bit = VT == MVT::f64 || VT == MVT::i64; 1503 switch (ISDOpcode) { 1504 default: return false; 1505 case ISD::FADD: 1506 Opc = is64bit ? ARM::VADDD : ARM::VADDS; 1507 break; 1508 case ISD::FSUB: 1509 Opc = is64bit ? ARM::VSUBD : ARM::VSUBS; 1510 break; 1511 case ISD::FMUL: 1512 Opc = is64bit ? ARM::VMULD : ARM::VMULS; 1513 break; 1514 } 1515 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT)); 1516 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1517 TII.get(Opc), ResultReg) 1518 .addReg(Op1).addReg(Op2)); 1519 UpdateValueMap(I, ResultReg); 1520 return true; 1521} 1522 1523// Call Handling Code 1524 1525bool ARMFastISel::FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, 1526 EVT SrcVT, unsigned &ResultReg) { 1527 unsigned RR = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc, 1528 Src, /*TODO: Kill=*/false); 1529 1530 if (RR != 0) { 1531 ResultReg = RR; 1532 return true; 1533 } else 1534 return false; 1535} 1536 1537// This is largely taken directly from CCAssignFnForNode - we don't support 1538// varargs in FastISel so that part has been removed. 1539// TODO: We may not support all of this. 1540CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC, bool Return) { 1541 switch (CC) { 1542 default: 1543 llvm_unreachable("Unsupported calling convention"); 1544 case CallingConv::Fast: 1545 // Ignore fastcc. Silence compiler warnings. 1546 (void)RetFastCC_ARM_APCS; 1547 (void)FastCC_ARM_APCS; 1548 // Fallthrough 1549 case CallingConv::C: 1550 // Use target triple & subtarget features to do actual dispatch. 1551 if (Subtarget->isAAPCS_ABI()) { 1552 if (Subtarget->hasVFP2() && 1553 FloatABIType == FloatABI::Hard) 1554 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1555 else 1556 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1557 } else 1558 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1559 case CallingConv::ARM_AAPCS_VFP: 1560 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1561 case CallingConv::ARM_AAPCS: 1562 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1563 case CallingConv::ARM_APCS: 1564 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1565 } 1566} 1567 1568bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args, 1569 SmallVectorImpl<unsigned> &ArgRegs, 1570 SmallVectorImpl<MVT> &ArgVTs, 1571 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 1572 SmallVectorImpl<unsigned> &RegArgs, 1573 CallingConv::ID CC, 1574 unsigned &NumBytes) { 1575 SmallVector<CCValAssign, 16> ArgLocs; 1576 CCState CCInfo(CC, false, *FuncInfo.MF, TM, ArgLocs, *Context); 1577 CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CCAssignFnForCall(CC, false)); 1578 1579 // Get a count of how many bytes are to be pushed on the stack. 1580 NumBytes = CCInfo.getNextStackOffset(); 1581 1582 // Issue CALLSEQ_START 1583 unsigned AdjStackDown = TII.getCallFrameSetupOpcode(); 1584 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1585 TII.get(AdjStackDown)) 1586 .addImm(NumBytes)); 1587 1588 // Process the args. 1589 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1590 CCValAssign &VA = ArgLocs[i]; 1591 unsigned Arg = ArgRegs[VA.getValNo()]; 1592 MVT ArgVT = ArgVTs[VA.getValNo()]; 1593 1594 // We don't handle NEON/vector parameters yet. 1595 if (ArgVT.isVector() || ArgVT.getSizeInBits() > 64) 1596 return false; 1597 1598 // Handle arg promotion, etc. 1599 switch (VA.getLocInfo()) { 1600 case CCValAssign::Full: break; 1601 case CCValAssign::SExt: { 1602 EVT DestVT = VA.getLocVT(); 1603 unsigned ResultReg = ARMEmitIntExt(ArgVT, Arg, DestVT, 1604 /*isZExt*/false); 1605 assert (ResultReg != 0 && "Failed to emit a sext"); 1606 Arg = ResultReg; 1607 break; 1608 } 1609 case CCValAssign::AExt: 1610 // Intentional fall-through. Handle AExt and ZExt. 1611 case CCValAssign::ZExt: { 1612 EVT DestVT = VA.getLocVT(); 1613 unsigned ResultReg = ARMEmitIntExt(ArgVT, Arg, DestVT, 1614 /*isZExt*/true); 1615 assert (ResultReg != 0 && "Failed to emit a sext"); 1616 Arg = ResultReg; 1617 break; 1618 } 1619 case CCValAssign::BCvt: { 1620 unsigned BC = FastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg, 1621 /*TODO: Kill=*/false); 1622 assert(BC != 0 && "Failed to emit a bitcast!"); 1623 Arg = BC; 1624 ArgVT = VA.getLocVT(); 1625 break; 1626 } 1627 default: llvm_unreachable("Unknown arg promotion!"); 1628 } 1629 1630 // Now copy/store arg to correct locations. 1631 if (VA.isRegLoc() && !VA.needsCustom()) { 1632 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1633 VA.getLocReg()) 1634 .addReg(Arg); 1635 RegArgs.push_back(VA.getLocReg()); 1636 } else if (VA.needsCustom()) { 1637 // TODO: We need custom lowering for vector (v2f64) args. 1638 if (VA.getLocVT() != MVT::f64) return false; 1639 1640 CCValAssign &NextVA = ArgLocs[++i]; 1641 1642 // TODO: Only handle register args for now. 1643 if(!(VA.isRegLoc() && NextVA.isRegLoc())) return false; 1644 1645 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1646 TII.get(ARM::VMOVRRD), VA.getLocReg()) 1647 .addReg(NextVA.getLocReg(), RegState::Define) 1648 .addReg(Arg)); 1649 RegArgs.push_back(VA.getLocReg()); 1650 RegArgs.push_back(NextVA.getLocReg()); 1651 } else { 1652 assert(VA.isMemLoc()); 1653 // Need to store on the stack. 1654 Address Addr; 1655 Addr.BaseType = Address::RegBase; 1656 Addr.Base.Reg = ARM::SP; 1657 Addr.Offset = VA.getLocMemOffset(); 1658 1659 if (!ARMEmitStore(ArgVT, Arg, Addr)) return false; 1660 } 1661 } 1662 return true; 1663} 1664 1665bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 1666 const Instruction *I, CallingConv::ID CC, 1667 unsigned &NumBytes) { 1668 // Issue CALLSEQ_END 1669 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); 1670 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1671 TII.get(AdjStackUp)) 1672 .addImm(NumBytes).addImm(0)); 1673 1674 // Now the return value. 1675 if (RetVT != MVT::isVoid) { 1676 SmallVector<CCValAssign, 16> RVLocs; 1677 CCState CCInfo(CC, false, *FuncInfo.MF, TM, RVLocs, *Context); 1678 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true)); 1679 1680 // Copy all of the result registers out of their specified physreg. 1681 if (RVLocs.size() == 2 && RetVT == MVT::f64) { 1682 // For this move we copy into two registers and then move into the 1683 // double fp reg we want. 1684 EVT DestVT = RVLocs[0].getValVT(); 1685 TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT); 1686 unsigned ResultReg = createResultReg(DstRC); 1687 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1688 TII.get(ARM::VMOVDRR), ResultReg) 1689 .addReg(RVLocs[0].getLocReg()) 1690 .addReg(RVLocs[1].getLocReg())); 1691 1692 UsedRegs.push_back(RVLocs[0].getLocReg()); 1693 UsedRegs.push_back(RVLocs[1].getLocReg()); 1694 1695 // Finally update the result. 1696 UpdateValueMap(I, ResultReg); 1697 } else { 1698 assert(RVLocs.size() == 1 &&"Can't handle non-double multi-reg retvals!"); 1699 EVT CopyVT = RVLocs[0].getValVT(); 1700 1701 // Special handling for extended integers. 1702 if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16) 1703 CopyVT = MVT::i32; 1704 1705 TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT); 1706 1707 unsigned ResultReg = createResultReg(DstRC); 1708 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1709 ResultReg).addReg(RVLocs[0].getLocReg()); 1710 UsedRegs.push_back(RVLocs[0].getLocReg()); 1711 1712 // Finally update the result. 1713 UpdateValueMap(I, ResultReg); 1714 } 1715 } 1716 1717 return true; 1718} 1719 1720bool ARMFastISel::SelectRet(const Instruction *I) { 1721 const ReturnInst *Ret = cast<ReturnInst>(I); 1722 const Function &F = *I->getParent()->getParent(); 1723 1724 if (!FuncInfo.CanLowerReturn) 1725 return false; 1726 1727 if (F.isVarArg()) 1728 return false; 1729 1730 CallingConv::ID CC = F.getCallingConv(); 1731 if (Ret->getNumOperands() > 0) { 1732 SmallVector<ISD::OutputArg, 4> Outs; 1733 GetReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(), 1734 Outs, TLI); 1735 1736 // Analyze operands of the call, assigning locations to each operand. 1737 SmallVector<CCValAssign, 16> ValLocs; 1738 CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, TM, ValLocs,I->getContext()); 1739 CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC, true /* is Ret */)); 1740 1741 const Value *RV = Ret->getOperand(0); 1742 unsigned Reg = getRegForValue(RV); 1743 if (Reg == 0) 1744 return false; 1745 1746 // Only handle a single return value for now. 1747 if (ValLocs.size() != 1) 1748 return false; 1749 1750 CCValAssign &VA = ValLocs[0]; 1751 1752 // Don't bother handling odd stuff for now. 1753 if (VA.getLocInfo() != CCValAssign::Full) 1754 return false; 1755 // Only handle register returns for now. 1756 if (!VA.isRegLoc()) 1757 return false; 1758 1759 unsigned SrcReg = Reg + VA.getValNo(); 1760 EVT RVVT = TLI.getValueType(RV->getType()); 1761 EVT DestVT = VA.getValVT(); 1762 // Special handling for extended integers. 1763 if (RVVT != DestVT) { 1764 if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16) 1765 return false; 1766 1767 if (!Outs[0].Flags.isZExt() && !Outs[0].Flags.isSExt()) 1768 return false; 1769 1770 assert(DestVT == MVT::i32 && "ARM should always ext to i32"); 1771 1772 bool isZExt = Outs[0].Flags.isZExt(); 1773 unsigned ResultReg = ARMEmitIntExt(RVVT, SrcReg, DestVT, isZExt); 1774 if (ResultReg == 0) return false; 1775 SrcReg = ResultReg; 1776 } 1777 1778 // Make the copy. 1779 unsigned DstReg = VA.getLocReg(); 1780 const TargetRegisterClass* SrcRC = MRI.getRegClass(SrcReg); 1781 // Avoid a cross-class copy. This is very unlikely. 1782 if (!SrcRC->contains(DstReg)) 1783 return false; 1784 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1785 DstReg).addReg(SrcReg); 1786 1787 // Mark the register as live out of the function. 1788 MRI.addLiveOut(VA.getLocReg()); 1789 } 1790 1791 unsigned RetOpc = isThumb ? ARM::tBX_RET : ARM::BX_RET; 1792 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1793 TII.get(RetOpc))); 1794 return true; 1795} 1796 1797unsigned ARMFastISel::ARMSelectCallOp(const GlobalValue *GV) { 1798 1799 // Darwin needs the r9 versions of the opcodes. 1800 bool isDarwin = Subtarget->isTargetDarwin(); 1801 if (isThumb) { 1802 return isDarwin ? ARM::tBLr9 : ARM::tBL; 1803 } else { 1804 return isDarwin ? ARM::BLr9 : ARM::BL; 1805 } 1806} 1807 1808// A quick function that will emit a call for a named libcall in F with the 1809// vector of passed arguments for the Instruction in I. We can assume that we 1810// can emit a call for any libcall we can produce. This is an abridged version 1811// of the full call infrastructure since we won't need to worry about things 1812// like computed function pointers or strange arguments at call sites. 1813// TODO: Try to unify this and the normal call bits for ARM, then try to unify 1814// with X86. 1815bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) { 1816 CallingConv::ID CC = TLI.getLibcallCallingConv(Call); 1817 1818 // Handle *simple* calls for now. 1819 Type *RetTy = I->getType(); 1820 MVT RetVT; 1821 if (RetTy->isVoidTy()) 1822 RetVT = MVT::isVoid; 1823 else if (!isTypeLegal(RetTy, RetVT)) 1824 return false; 1825 1826 // TODO: For now if we have long calls specified we don't handle the call. 1827 if (EnableARMLongCalls) return false; 1828 1829 // Set up the argument vectors. 1830 SmallVector<Value*, 8> Args; 1831 SmallVector<unsigned, 8> ArgRegs; 1832 SmallVector<MVT, 8> ArgVTs; 1833 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 1834 Args.reserve(I->getNumOperands()); 1835 ArgRegs.reserve(I->getNumOperands()); 1836 ArgVTs.reserve(I->getNumOperands()); 1837 ArgFlags.reserve(I->getNumOperands()); 1838 for (unsigned i = 0; i < I->getNumOperands(); ++i) { 1839 Value *Op = I->getOperand(i); 1840 unsigned Arg = getRegForValue(Op); 1841 if (Arg == 0) return false; 1842 1843 Type *ArgTy = Op->getType(); 1844 MVT ArgVT; 1845 if (!isTypeLegal(ArgTy, ArgVT)) return false; 1846 1847 ISD::ArgFlagsTy Flags; 1848 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 1849 Flags.setOrigAlign(OriginalAlignment); 1850 1851 Args.push_back(Op); 1852 ArgRegs.push_back(Arg); 1853 ArgVTs.push_back(ArgVT); 1854 ArgFlags.push_back(Flags); 1855 } 1856 1857 // Handle the arguments now that we've gotten them. 1858 SmallVector<unsigned, 4> RegArgs; 1859 unsigned NumBytes; 1860 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes)) 1861 return false; 1862 1863 // Issue the call, BLr9 for darwin, BL otherwise. 1864 // TODO: Turn this into the table of arm call ops. 1865 MachineInstrBuilder MIB; 1866 unsigned CallOpc = ARMSelectCallOp(NULL); 1867 if(isThumb) 1868 // Explicitly adding the predicate here. 1869 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1870 TII.get(CallOpc))) 1871 .addExternalSymbol(TLI.getLibcallName(Call)); 1872 else 1873 // Explicitly adding the predicate here. 1874 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1875 TII.get(CallOpc)) 1876 .addExternalSymbol(TLI.getLibcallName(Call))); 1877 1878 // Add implicit physical register uses to the call. 1879 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 1880 MIB.addReg(RegArgs[i]); 1881 1882 // Finish off the call including any return values. 1883 SmallVector<unsigned, 4> UsedRegs; 1884 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes)) return false; 1885 1886 // Set all unused physreg defs as dead. 1887 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 1888 1889 return true; 1890} 1891 1892bool ARMFastISel::SelectCall(const Instruction *I) { 1893 const CallInst *CI = cast<CallInst>(I); 1894 const Value *Callee = CI->getCalledValue(); 1895 1896 // Can't handle inline asm or worry about intrinsics yet. 1897 if (isa<InlineAsm>(Callee) || isa<IntrinsicInst>(CI)) return false; 1898 1899 // Only handle global variable Callees. 1900 const GlobalValue *GV = dyn_cast<GlobalValue>(Callee); 1901 if (!GV) 1902 return false; 1903 1904 // Check the calling convention. 1905 ImmutableCallSite CS(CI); 1906 CallingConv::ID CC = CS.getCallingConv(); 1907 1908 // TODO: Avoid some calling conventions? 1909 1910 // Let SDISel handle vararg functions. 1911 PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType()); 1912 FunctionType *FTy = cast<FunctionType>(PT->getElementType()); 1913 if (FTy->isVarArg()) 1914 return false; 1915 1916 // Handle *simple* calls for now. 1917 Type *RetTy = I->getType(); 1918 MVT RetVT; 1919 if (RetTy->isVoidTy()) 1920 RetVT = MVT::isVoid; 1921 else if (!isTypeLegal(RetTy, RetVT) && RetVT != MVT::i16 && 1922 RetVT != MVT::i8 && RetVT != MVT::i1) 1923 return false; 1924 1925 // TODO: For now if we have long calls specified we don't handle the call. 1926 if (EnableARMLongCalls) return false; 1927 1928 // Set up the argument vectors. 1929 SmallVector<Value*, 8> Args; 1930 SmallVector<unsigned, 8> ArgRegs; 1931 SmallVector<MVT, 8> ArgVTs; 1932 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 1933 Args.reserve(CS.arg_size()); 1934 ArgRegs.reserve(CS.arg_size()); 1935 ArgVTs.reserve(CS.arg_size()); 1936 ArgFlags.reserve(CS.arg_size()); 1937 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end(); 1938 i != e; ++i) { 1939 unsigned Arg = getRegForValue(*i); 1940 1941 if (Arg == 0) 1942 return false; 1943 ISD::ArgFlagsTy Flags; 1944 unsigned AttrInd = i - CS.arg_begin() + 1; 1945 if (CS.paramHasAttr(AttrInd, Attribute::SExt)) 1946 Flags.setSExt(); 1947 if (CS.paramHasAttr(AttrInd, Attribute::ZExt)) 1948 Flags.setZExt(); 1949 1950 // FIXME: Only handle *easy* calls for now. 1951 if (CS.paramHasAttr(AttrInd, Attribute::InReg) || 1952 CS.paramHasAttr(AttrInd, Attribute::StructRet) || 1953 CS.paramHasAttr(AttrInd, Attribute::Nest) || 1954 CS.paramHasAttr(AttrInd, Attribute::ByVal)) 1955 return false; 1956 1957 Type *ArgTy = (*i)->getType(); 1958 MVT ArgVT; 1959 if (!isTypeLegal(ArgTy, ArgVT) && ArgVT != MVT::i16 && ArgVT != MVT::i8 && 1960 ArgVT != MVT::i1) 1961 return false; 1962 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 1963 Flags.setOrigAlign(OriginalAlignment); 1964 1965 Args.push_back(*i); 1966 ArgRegs.push_back(Arg); 1967 ArgVTs.push_back(ArgVT); 1968 ArgFlags.push_back(Flags); 1969 } 1970 1971 // Handle the arguments now that we've gotten them. 1972 SmallVector<unsigned, 4> RegArgs; 1973 unsigned NumBytes; 1974 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes)) 1975 return false; 1976 1977 // Issue the call, BLr9 for darwin, BL otherwise. 1978 // TODO: Turn this into the table of arm call ops. 1979 MachineInstrBuilder MIB; 1980 unsigned CallOpc = ARMSelectCallOp(GV); 1981 // Explicitly adding the predicate here. 1982 if(isThumb) 1983 // Explicitly adding the predicate here. 1984 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1985 TII.get(CallOpc))) 1986 .addGlobalAddress(GV, 0, 0); 1987 else 1988 // Explicitly adding the predicate here. 1989 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1990 TII.get(CallOpc)) 1991 .addGlobalAddress(GV, 0, 0)); 1992 1993 // Add implicit physical register uses to the call. 1994 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 1995 MIB.addReg(RegArgs[i]); 1996 1997 // Finish off the call including any return values. 1998 SmallVector<unsigned, 4> UsedRegs; 1999 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes)) return false; 2000 2001 // Set all unused physreg defs as dead. 2002 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 2003 2004 return true; 2005} 2006 2007bool ARMFastISel::SelectTrunc(const Instruction *I) { 2008 // The high bits for a type smaller than the register size are assumed to be 2009 // undefined. 2010 Value *Op = I->getOperand(0); 2011 2012 EVT SrcVT, DestVT; 2013 SrcVT = TLI.getValueType(Op->getType(), true); 2014 DestVT = TLI.getValueType(I->getType(), true); 2015 2016 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) 2017 return false; 2018 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1) 2019 return false; 2020 2021 unsigned SrcReg = getRegForValue(Op); 2022 if (!SrcReg) return false; 2023 2024 // Because the high bits are undefined, a truncate doesn't generate 2025 // any code. 2026 UpdateValueMap(I, SrcReg); 2027 return true; 2028} 2029 2030unsigned ARMFastISel::ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT, 2031 bool isZExt) { 2032 if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8) 2033 return 0; 2034 2035 unsigned Opc; 2036 bool isBoolZext = false; 2037 if (!SrcVT.isSimple()) return 0; 2038 switch (SrcVT.getSimpleVT().SimpleTy) { 2039 default: return 0; 2040 case MVT::i16: 2041 if (!Subtarget->hasV6Ops()) return 0; 2042 if (isZExt) 2043 Opc = isThumb ? ARM::t2UXTH : ARM::UXTH; 2044 else 2045 Opc = isThumb ? ARM::t2SXTH : ARM::SXTH; 2046 break; 2047 case MVT::i8: 2048 if (!Subtarget->hasV6Ops()) return 0; 2049 if (isZExt) 2050 Opc = isThumb ? ARM::t2UXTB : ARM::UXTB; 2051 else 2052 Opc = isThumb ? ARM::t2SXTB : ARM::SXTB; 2053 break; 2054 case MVT::i1: 2055 if (isZExt) { 2056 Opc = isThumb ? ARM::t2ANDri : ARM::ANDri; 2057 isBoolZext = true; 2058 break; 2059 } 2060 return 0; 2061 } 2062 2063 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 2064 MachineInstrBuilder MIB; 2065 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg) 2066 .addReg(SrcReg); 2067 if (isBoolZext) 2068 MIB.addImm(1); 2069 else 2070 MIB.addImm(0); 2071 AddOptionalDefs(MIB); 2072 return ResultReg; 2073} 2074 2075bool ARMFastISel::SelectIntExt(const Instruction *I) { 2076 // On ARM, in general, integer casts don't involve legal types; this code 2077 // handles promotable integers. 2078 // FIXME: We could save an instruction in many cases by special-casing 2079 // load instructions. 2080 Type *DestTy = I->getType(); 2081 Value *Src = I->getOperand(0); 2082 Type *SrcTy = Src->getType(); 2083 2084 EVT SrcVT, DestVT; 2085 SrcVT = TLI.getValueType(SrcTy, true); 2086 DestVT = TLI.getValueType(DestTy, true); 2087 2088 bool isZExt = isa<ZExtInst>(I); 2089 unsigned SrcReg = getRegForValue(Src); 2090 if (!SrcReg) return false; 2091 2092 unsigned ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, isZExt); 2093 if (ResultReg == 0) return false; 2094 UpdateValueMap(I, ResultReg); 2095 return true; 2096} 2097 2098// TODO: SoftFP support. 2099bool ARMFastISel::TargetSelectInstruction(const Instruction *I) { 2100 2101 switch (I->getOpcode()) { 2102 case Instruction::Load: 2103 return SelectLoad(I); 2104 case Instruction::Store: 2105 return SelectStore(I); 2106 case Instruction::Br: 2107 return SelectBranch(I); 2108 case Instruction::ICmp: 2109 case Instruction::FCmp: 2110 return SelectCmp(I); 2111 case Instruction::FPExt: 2112 return SelectFPExt(I); 2113 case Instruction::FPTrunc: 2114 return SelectFPTrunc(I); 2115 case Instruction::SIToFP: 2116 return SelectSIToFP(I); 2117 case Instruction::FPToSI: 2118 return SelectFPToSI(I); 2119 case Instruction::FAdd: 2120 return SelectBinaryOp(I, ISD::FADD); 2121 case Instruction::FSub: 2122 return SelectBinaryOp(I, ISD::FSUB); 2123 case Instruction::FMul: 2124 return SelectBinaryOp(I, ISD::FMUL); 2125 case Instruction::SDiv: 2126 return SelectSDiv(I); 2127 case Instruction::SRem: 2128 return SelectSRem(I); 2129 case Instruction::Call: 2130 return SelectCall(I); 2131 case Instruction::Select: 2132 return SelectSelect(I); 2133 case Instruction::Ret: 2134 return SelectRet(I); 2135 case Instruction::Trunc: 2136 return SelectTrunc(I); 2137 case Instruction::ZExt: 2138 case Instruction::SExt: 2139 return SelectIntExt(I); 2140 default: break; 2141 } 2142 return false; 2143} 2144 2145namespace llvm { 2146 llvm::FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo) { 2147 // Completely untested on non-darwin. 2148 const TargetMachine &TM = funcInfo.MF->getTarget(); 2149 2150 // Darwin and thumb1 only for now. 2151 const ARMSubtarget *Subtarget = &TM.getSubtarget<ARMSubtarget>(); 2152 if (Subtarget->isTargetDarwin() && !Subtarget->isThumb1Only() && 2153 !DisableARMFastISel) 2154 return new ARMFastISel(funcInfo); 2155 return 0; 2156 } 2157} 2158