ARMFastISel.cpp revision 6d64b3adab682aea9c0b4dd665acc5e863ac6d21
1//===-- ARMFastISel.cpp - ARM FastISel implementation ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the ARM-specific support for the FastISel class. Some 11// of the target-specific code is generated by tablegen in the file 12// ARMGenFastISel.inc, which is #included here. 13// 14//===----------------------------------------------------------------------===// 15 16#include "ARM.h" 17#include "ARMBaseInstrInfo.h" 18#include "ARMCallingConv.h" 19#include "ARMRegisterInfo.h" 20#include "ARMTargetMachine.h" 21#include "ARMSubtarget.h" 22#include "ARMConstantPoolValue.h" 23#include "MCTargetDesc/ARMAddressingModes.h" 24#include "llvm/CallingConv.h" 25#include "llvm/DerivedTypes.h" 26#include "llvm/GlobalVariable.h" 27#include "llvm/Instructions.h" 28#include "llvm/IntrinsicInst.h" 29#include "llvm/Module.h" 30#include "llvm/Operator.h" 31#include "llvm/CodeGen/Analysis.h" 32#include "llvm/CodeGen/FastISel.h" 33#include "llvm/CodeGen/FunctionLoweringInfo.h" 34#include "llvm/CodeGen/MachineInstrBuilder.h" 35#include "llvm/CodeGen/MachineModuleInfo.h" 36#include "llvm/CodeGen/MachineConstantPool.h" 37#include "llvm/CodeGen/MachineFrameInfo.h" 38#include "llvm/CodeGen/MachineMemOperand.h" 39#include "llvm/CodeGen/MachineRegisterInfo.h" 40#include "llvm/CodeGen/PseudoSourceValue.h" 41#include "llvm/Support/CallSite.h" 42#include "llvm/Support/CommandLine.h" 43#include "llvm/Support/ErrorHandling.h" 44#include "llvm/Support/GetElementPtrTypeIterator.h" 45#include "llvm/Target/TargetData.h" 46#include "llvm/Target/TargetInstrInfo.h" 47#include "llvm/Target/TargetLowering.h" 48#include "llvm/Target/TargetMachine.h" 49#include "llvm/Target/TargetOptions.h" 50using namespace llvm; 51 52static cl::opt<bool> 53DisableARMFastISel("disable-arm-fast-isel", 54 cl::desc("Turn off experimental ARM fast-isel support"), 55 cl::init(false), cl::Hidden); 56 57extern cl::opt<bool> EnableARMLongCalls; 58 59namespace { 60 61 // All possible address modes, plus some. 62 typedef struct Address { 63 enum { 64 RegBase, 65 FrameIndexBase 66 } BaseType; 67 68 union { 69 unsigned Reg; 70 int FI; 71 } Base; 72 73 int Offset; 74 75 // Innocuous defaults for our address. 76 Address() 77 : BaseType(RegBase), Offset(0) { 78 Base.Reg = 0; 79 } 80 } Address; 81 82class ARMFastISel : public FastISel { 83 84 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can 85 /// make the right decision when generating code for different targets. 86 const ARMSubtarget *Subtarget; 87 const TargetMachine &TM; 88 const TargetInstrInfo &TII; 89 const TargetLowering &TLI; 90 ARMFunctionInfo *AFI; 91 92 // Convenience variables to avoid some queries. 93 bool isThumb; 94 LLVMContext *Context; 95 96 public: 97 explicit ARMFastISel(FunctionLoweringInfo &funcInfo) 98 : FastISel(funcInfo), 99 TM(funcInfo.MF->getTarget()), 100 TII(*TM.getInstrInfo()), 101 TLI(*TM.getTargetLowering()) { 102 Subtarget = &TM.getSubtarget<ARMSubtarget>(); 103 AFI = funcInfo.MF->getInfo<ARMFunctionInfo>(); 104 isThumb = AFI->isThumbFunction(); 105 Context = &funcInfo.Fn->getContext(); 106 } 107 108 // Code from FastISel.cpp. 109 virtual unsigned FastEmitInst_(unsigned MachineInstOpcode, 110 const TargetRegisterClass *RC); 111 virtual unsigned FastEmitInst_r(unsigned MachineInstOpcode, 112 const TargetRegisterClass *RC, 113 unsigned Op0, bool Op0IsKill); 114 virtual unsigned FastEmitInst_rr(unsigned MachineInstOpcode, 115 const TargetRegisterClass *RC, 116 unsigned Op0, bool Op0IsKill, 117 unsigned Op1, bool Op1IsKill); 118 virtual unsigned FastEmitInst_rrr(unsigned MachineInstOpcode, 119 const TargetRegisterClass *RC, 120 unsigned Op0, bool Op0IsKill, 121 unsigned Op1, bool Op1IsKill, 122 unsigned Op2, bool Op2IsKill); 123 virtual unsigned FastEmitInst_ri(unsigned MachineInstOpcode, 124 const TargetRegisterClass *RC, 125 unsigned Op0, bool Op0IsKill, 126 uint64_t Imm); 127 virtual unsigned FastEmitInst_rf(unsigned MachineInstOpcode, 128 const TargetRegisterClass *RC, 129 unsigned Op0, bool Op0IsKill, 130 const ConstantFP *FPImm); 131 virtual unsigned FastEmitInst_rri(unsigned MachineInstOpcode, 132 const TargetRegisterClass *RC, 133 unsigned Op0, bool Op0IsKill, 134 unsigned Op1, bool Op1IsKill, 135 uint64_t Imm); 136 virtual unsigned FastEmitInst_i(unsigned MachineInstOpcode, 137 const TargetRegisterClass *RC, 138 uint64_t Imm); 139 virtual unsigned FastEmitInst_ii(unsigned MachineInstOpcode, 140 const TargetRegisterClass *RC, 141 uint64_t Imm1, uint64_t Imm2); 142 143 virtual unsigned FastEmitInst_extractsubreg(MVT RetVT, 144 unsigned Op0, bool Op0IsKill, 145 uint32_t Idx); 146 147 // Backend specific FastISel code. 148 virtual bool TargetSelectInstruction(const Instruction *I); 149 virtual unsigned TargetMaterializeConstant(const Constant *C); 150 virtual unsigned TargetMaterializeAlloca(const AllocaInst *AI); 151 152 #include "ARMGenFastISel.inc" 153 154 // Instruction selection routines. 155 private: 156 bool SelectLoad(const Instruction *I); 157 bool SelectStore(const Instruction *I); 158 bool SelectBranch(const Instruction *I); 159 bool SelectCmp(const Instruction *I); 160 bool SelectFPExt(const Instruction *I); 161 bool SelectFPTrunc(const Instruction *I); 162 bool SelectBinaryOp(const Instruction *I, unsigned ISDOpcode); 163 bool SelectSIToFP(const Instruction *I); 164 bool SelectFPToSI(const Instruction *I); 165 bool SelectSDiv(const Instruction *I); 166 bool SelectSRem(const Instruction *I); 167 bool SelectCall(const Instruction *I); 168 bool SelectSelect(const Instruction *I); 169 bool SelectRet(const Instruction *I); 170 bool SelectIntCast(const Instruction *I); 171 172 // Utility routines. 173 private: 174 bool isTypeLegal(Type *Ty, MVT &VT); 175 bool isLoadTypeLegal(Type *Ty, MVT &VT); 176 bool ARMEmitCmp(const Value *Src1Value, const Value *Src2Value); 177 bool ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr); 178 bool ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr); 179 bool ARMComputeAddress(const Value *Obj, Address &Addr); 180 void ARMSimplifyAddress(Address &Addr, EVT VT); 181 unsigned ARMMaterializeFP(const ConstantFP *CFP, EVT VT); 182 unsigned ARMMaterializeInt(const Constant *C, EVT VT); 183 unsigned ARMMaterializeGV(const GlobalValue *GV, EVT VT); 184 unsigned ARMMoveToFPReg(EVT VT, unsigned SrcReg); 185 unsigned ARMMoveToIntReg(EVT VT, unsigned SrcReg); 186 unsigned ARMSelectCallOp(const GlobalValue *GV); 187 188 // Call handling routines. 189 private: 190 bool FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, EVT SrcVT, 191 unsigned &ResultReg); 192 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool Return); 193 bool ProcessCallArgs(SmallVectorImpl<Value*> &Args, 194 SmallVectorImpl<unsigned> &ArgRegs, 195 SmallVectorImpl<MVT> &ArgVTs, 196 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 197 SmallVectorImpl<unsigned> &RegArgs, 198 CallingConv::ID CC, 199 unsigned &NumBytes); 200 bool FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 201 const Instruction *I, CallingConv::ID CC, 202 unsigned &NumBytes); 203 bool ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call); 204 205 // OptionalDef handling routines. 206 private: 207 bool isARMNEONPred(const MachineInstr *MI); 208 bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR); 209 const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB); 210 void AddLoadStoreOperands(EVT VT, Address &Addr, 211 const MachineInstrBuilder &MIB, 212 unsigned Flags); 213}; 214 215} // end anonymous namespace 216 217#include "ARMGenCallingConv.inc" 218 219// DefinesOptionalPredicate - This is different from DefinesPredicate in that 220// we don't care about implicit defs here, just places we'll need to add a 221// default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR. 222bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) { 223 const MCInstrDesc &MCID = MI->getDesc(); 224 if (!MCID.hasOptionalDef()) 225 return false; 226 227 // Look to see if our OptionalDef is defining CPSR or CCR. 228 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 229 const MachineOperand &MO = MI->getOperand(i); 230 if (!MO.isReg() || !MO.isDef()) continue; 231 if (MO.getReg() == ARM::CPSR) 232 *CPSR = true; 233 } 234 return true; 235} 236 237bool ARMFastISel::isARMNEONPred(const MachineInstr *MI) { 238 const MCInstrDesc &MCID = MI->getDesc(); 239 240 // If we're a thumb2 or not NEON function we were handled via isPredicable. 241 if ((MCID.TSFlags & ARMII::DomainMask) != ARMII::DomainNEON || 242 AFI->isThumb2Function()) 243 return false; 244 245 for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) 246 if (MCID.OpInfo[i].isPredicate()) 247 return true; 248 249 return false; 250} 251 252// If the machine is predicable go ahead and add the predicate operands, if 253// it needs default CC operands add those. 254// TODO: If we want to support thumb1 then we'll need to deal with optional 255// CPSR defs that need to be added before the remaining operands. See s_cc_out 256// for descriptions why. 257const MachineInstrBuilder & 258ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) { 259 MachineInstr *MI = &*MIB; 260 261 // Do we use a predicate? or... 262 // Are we NEON in ARM mode and have a predicate operand? If so, I know 263 // we're not predicable but add it anyways. 264 if (TII.isPredicable(MI) || isARMNEONPred(MI)) 265 AddDefaultPred(MIB); 266 267 // Do we optionally set a predicate? Preds is size > 0 iff the predicate 268 // defines CPSR. All other OptionalDefines in ARM are the CCR register. 269 bool CPSR = false; 270 if (DefinesOptionalPredicate(MI, &CPSR)) { 271 if (CPSR) 272 AddDefaultT1CC(MIB); 273 else 274 AddDefaultCC(MIB); 275 } 276 return MIB; 277} 278 279unsigned ARMFastISel::FastEmitInst_(unsigned MachineInstOpcode, 280 const TargetRegisterClass* RC) { 281 unsigned ResultReg = createResultReg(RC); 282 const MCInstrDesc &II = TII.get(MachineInstOpcode); 283 284 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)); 285 return ResultReg; 286} 287 288unsigned ARMFastISel::FastEmitInst_r(unsigned MachineInstOpcode, 289 const TargetRegisterClass *RC, 290 unsigned Op0, bool Op0IsKill) { 291 unsigned ResultReg = createResultReg(RC); 292 const MCInstrDesc &II = TII.get(MachineInstOpcode); 293 294 if (II.getNumDefs() >= 1) 295 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 296 .addReg(Op0, Op0IsKill * RegState::Kill)); 297 else { 298 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 299 .addReg(Op0, Op0IsKill * RegState::Kill)); 300 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 301 TII.get(TargetOpcode::COPY), ResultReg) 302 .addReg(II.ImplicitDefs[0])); 303 } 304 return ResultReg; 305} 306 307unsigned ARMFastISel::FastEmitInst_rr(unsigned MachineInstOpcode, 308 const TargetRegisterClass *RC, 309 unsigned Op0, bool Op0IsKill, 310 unsigned Op1, bool Op1IsKill) { 311 unsigned ResultReg = createResultReg(RC); 312 const MCInstrDesc &II = TII.get(MachineInstOpcode); 313 314 if (II.getNumDefs() >= 1) 315 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 316 .addReg(Op0, Op0IsKill * RegState::Kill) 317 .addReg(Op1, Op1IsKill * RegState::Kill)); 318 else { 319 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 320 .addReg(Op0, Op0IsKill * RegState::Kill) 321 .addReg(Op1, Op1IsKill * RegState::Kill)); 322 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 323 TII.get(TargetOpcode::COPY), ResultReg) 324 .addReg(II.ImplicitDefs[0])); 325 } 326 return ResultReg; 327} 328 329unsigned ARMFastISel::FastEmitInst_rrr(unsigned MachineInstOpcode, 330 const TargetRegisterClass *RC, 331 unsigned Op0, bool Op0IsKill, 332 unsigned Op1, bool Op1IsKill, 333 unsigned Op2, bool Op2IsKill) { 334 unsigned ResultReg = createResultReg(RC); 335 const MCInstrDesc &II = TII.get(MachineInstOpcode); 336 337 if (II.getNumDefs() >= 1) 338 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 339 .addReg(Op0, Op0IsKill * RegState::Kill) 340 .addReg(Op1, Op1IsKill * RegState::Kill) 341 .addReg(Op2, Op2IsKill * RegState::Kill)); 342 else { 343 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 344 .addReg(Op0, Op0IsKill * RegState::Kill) 345 .addReg(Op1, Op1IsKill * RegState::Kill) 346 .addReg(Op2, Op2IsKill * RegState::Kill)); 347 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 348 TII.get(TargetOpcode::COPY), ResultReg) 349 .addReg(II.ImplicitDefs[0])); 350 } 351 return ResultReg; 352} 353 354unsigned ARMFastISel::FastEmitInst_ri(unsigned MachineInstOpcode, 355 const TargetRegisterClass *RC, 356 unsigned Op0, bool Op0IsKill, 357 uint64_t Imm) { 358 unsigned ResultReg = createResultReg(RC); 359 const MCInstrDesc &II = TII.get(MachineInstOpcode); 360 361 if (II.getNumDefs() >= 1) 362 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 363 .addReg(Op0, Op0IsKill * RegState::Kill) 364 .addImm(Imm)); 365 else { 366 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 367 .addReg(Op0, Op0IsKill * RegState::Kill) 368 .addImm(Imm)); 369 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 370 TII.get(TargetOpcode::COPY), ResultReg) 371 .addReg(II.ImplicitDefs[0])); 372 } 373 return ResultReg; 374} 375 376unsigned ARMFastISel::FastEmitInst_rf(unsigned MachineInstOpcode, 377 const TargetRegisterClass *RC, 378 unsigned Op0, bool Op0IsKill, 379 const ConstantFP *FPImm) { 380 unsigned ResultReg = createResultReg(RC); 381 const MCInstrDesc &II = TII.get(MachineInstOpcode); 382 383 if (II.getNumDefs() >= 1) 384 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 385 .addReg(Op0, Op0IsKill * RegState::Kill) 386 .addFPImm(FPImm)); 387 else { 388 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 389 .addReg(Op0, Op0IsKill * RegState::Kill) 390 .addFPImm(FPImm)); 391 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 392 TII.get(TargetOpcode::COPY), ResultReg) 393 .addReg(II.ImplicitDefs[0])); 394 } 395 return ResultReg; 396} 397 398unsigned ARMFastISel::FastEmitInst_rri(unsigned MachineInstOpcode, 399 const TargetRegisterClass *RC, 400 unsigned Op0, bool Op0IsKill, 401 unsigned Op1, bool Op1IsKill, 402 uint64_t Imm) { 403 unsigned ResultReg = createResultReg(RC); 404 const MCInstrDesc &II = TII.get(MachineInstOpcode); 405 406 if (II.getNumDefs() >= 1) 407 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 408 .addReg(Op0, Op0IsKill * RegState::Kill) 409 .addReg(Op1, Op1IsKill * RegState::Kill) 410 .addImm(Imm)); 411 else { 412 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 413 .addReg(Op0, Op0IsKill * RegState::Kill) 414 .addReg(Op1, Op1IsKill * RegState::Kill) 415 .addImm(Imm)); 416 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 417 TII.get(TargetOpcode::COPY), ResultReg) 418 .addReg(II.ImplicitDefs[0])); 419 } 420 return ResultReg; 421} 422 423unsigned ARMFastISel::FastEmitInst_i(unsigned MachineInstOpcode, 424 const TargetRegisterClass *RC, 425 uint64_t Imm) { 426 unsigned ResultReg = createResultReg(RC); 427 const MCInstrDesc &II = TII.get(MachineInstOpcode); 428 429 if (II.getNumDefs() >= 1) 430 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 431 .addImm(Imm)); 432 else { 433 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 434 .addImm(Imm)); 435 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 436 TII.get(TargetOpcode::COPY), ResultReg) 437 .addReg(II.ImplicitDefs[0])); 438 } 439 return ResultReg; 440} 441 442unsigned ARMFastISel::FastEmitInst_ii(unsigned MachineInstOpcode, 443 const TargetRegisterClass *RC, 444 uint64_t Imm1, uint64_t Imm2) { 445 unsigned ResultReg = createResultReg(RC); 446 const MCInstrDesc &II = TII.get(MachineInstOpcode); 447 448 if (II.getNumDefs() >= 1) 449 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 450 .addImm(Imm1).addImm(Imm2)); 451 else { 452 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 453 .addImm(Imm1).addImm(Imm2)); 454 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 455 TII.get(TargetOpcode::COPY), 456 ResultReg) 457 .addReg(II.ImplicitDefs[0])); 458 } 459 return ResultReg; 460} 461 462unsigned ARMFastISel::FastEmitInst_extractsubreg(MVT RetVT, 463 unsigned Op0, bool Op0IsKill, 464 uint32_t Idx) { 465 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT)); 466 assert(TargetRegisterInfo::isVirtualRegister(Op0) && 467 "Cannot yet extract from physregs"); 468 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 469 DL, TII.get(TargetOpcode::COPY), ResultReg) 470 .addReg(Op0, getKillRegState(Op0IsKill), Idx)); 471 return ResultReg; 472} 473 474// TODO: Don't worry about 64-bit now, but when this is fixed remove the 475// checks from the various callers. 476unsigned ARMFastISel::ARMMoveToFPReg(EVT VT, unsigned SrcReg) { 477 if (VT == MVT::f64) return 0; 478 479 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 480 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 481 TII.get(ARM::VMOVRS), MoveReg) 482 .addReg(SrcReg)); 483 return MoveReg; 484} 485 486unsigned ARMFastISel::ARMMoveToIntReg(EVT VT, unsigned SrcReg) { 487 if (VT == MVT::i64) return 0; 488 489 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 490 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 491 TII.get(ARM::VMOVSR), MoveReg) 492 .addReg(SrcReg)); 493 return MoveReg; 494} 495 496// For double width floating point we need to materialize two constants 497// (the high and the low) into integer registers then use a move to get 498// the combined constant into an FP reg. 499unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, EVT VT) { 500 const APFloat Val = CFP->getValueAPF(); 501 bool is64bit = VT == MVT::f64; 502 503 // This checks to see if we can use VFP3 instructions to materialize 504 // a constant, otherwise we have to go through the constant pool. 505 if (TLI.isFPImmLegal(Val, VT)) { 506 int Imm; 507 unsigned Opc; 508 if (is64bit) { 509 Imm = ARM_AM::getFP64Imm(Val); 510 Opc = ARM::FCONSTD; 511 } else { 512 Imm = ARM_AM::getFP32Imm(Val); 513 Opc = ARM::FCONSTS; 514 } 515 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 516 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 517 DestReg) 518 .addImm(Imm)); 519 return DestReg; 520 } 521 522 // Require VFP2 for loading fp constants. 523 if (!Subtarget->hasVFP2()) return false; 524 525 // MachineConstantPool wants an explicit alignment. 526 unsigned Align = TD.getPrefTypeAlignment(CFP->getType()); 527 if (Align == 0) { 528 // TODO: Figure out if this is correct. 529 Align = TD.getTypeAllocSize(CFP->getType()); 530 } 531 unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align); 532 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 533 unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS; 534 535 // The extra reg is for addrmode5. 536 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 537 DestReg) 538 .addConstantPoolIndex(Idx) 539 .addReg(0)); 540 return DestReg; 541} 542 543unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, EVT VT) { 544 545 // For now 32-bit only. 546 if (VT != MVT::i32) return false; 547 548 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 549 550 // If we can do this in a single instruction without a constant pool entry 551 // do so now. 552 const ConstantInt *CI = cast<ConstantInt>(C); 553 if (Subtarget->hasV6T2Ops() && isUInt<16>(CI->getSExtValue())) { 554 unsigned Opc = isThumb ? ARM::t2MOVi16 : ARM::MOVi16; 555 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 556 TII.get(Opc), DestReg) 557 .addImm(CI->getSExtValue())); 558 return DestReg; 559 } 560 561 // MachineConstantPool wants an explicit alignment. 562 unsigned Align = TD.getPrefTypeAlignment(C->getType()); 563 if (Align == 0) { 564 // TODO: Figure out if this is correct. 565 Align = TD.getTypeAllocSize(C->getType()); 566 } 567 unsigned Idx = MCP.getConstantPoolIndex(C, Align); 568 569 if (isThumb) 570 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 571 TII.get(ARM::t2LDRpci), DestReg) 572 .addConstantPoolIndex(Idx)); 573 else 574 // The extra immediate is for addrmode2. 575 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 576 TII.get(ARM::LDRcp), DestReg) 577 .addConstantPoolIndex(Idx) 578 .addImm(0)); 579 580 return DestReg; 581} 582 583unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, EVT VT) { 584 // For now 32-bit only. 585 if (VT != MVT::i32) return 0; 586 587 Reloc::Model RelocM = TM.getRelocationModel(); 588 589 // TODO: Need more magic for ARM PIC. 590 if (!isThumb && (RelocM == Reloc::PIC_)) return 0; 591 592 // MachineConstantPool wants an explicit alignment. 593 unsigned Align = TD.getPrefTypeAlignment(GV->getType()); 594 if (Align == 0) { 595 // TODO: Figure out if this is correct. 596 Align = TD.getTypeAllocSize(GV->getType()); 597 } 598 599 // Grab index. 600 unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : (Subtarget->isThumb() ? 4 : 8); 601 unsigned Id = AFI->createPICLabelUId(); 602 ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create(GV, Id, 603 ARMCP::CPValue, 604 PCAdj); 605 unsigned Idx = MCP.getConstantPoolIndex(CPV, Align); 606 607 // Load value. 608 MachineInstrBuilder MIB; 609 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 610 if (isThumb) { 611 unsigned Opc = (RelocM != Reloc::PIC_) ? ARM::t2LDRpci : ARM::t2LDRpci_pic; 612 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg) 613 .addConstantPoolIndex(Idx); 614 if (RelocM == Reloc::PIC_) 615 MIB.addImm(Id); 616 } else { 617 // The extra immediate is for addrmode2. 618 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRcp), 619 DestReg) 620 .addConstantPoolIndex(Idx) 621 .addImm(0); 622 } 623 AddOptionalDefs(MIB); 624 625 if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) { 626 unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT)); 627 if (isThumb) 628 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 629 TII.get(ARM::t2LDRi12), NewDestReg) 630 .addReg(DestReg) 631 .addImm(0); 632 else 633 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRi12), 634 NewDestReg) 635 .addReg(DestReg) 636 .addImm(0); 637 DestReg = NewDestReg; 638 AddOptionalDefs(MIB); 639 } 640 641 return DestReg; 642} 643 644unsigned ARMFastISel::TargetMaterializeConstant(const Constant *C) { 645 EVT VT = TLI.getValueType(C->getType(), true); 646 647 // Only handle simple types. 648 if (!VT.isSimple()) return 0; 649 650 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) 651 return ARMMaterializeFP(CFP, VT); 652 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C)) 653 return ARMMaterializeGV(GV, VT); 654 else if (isa<ConstantInt>(C)) 655 return ARMMaterializeInt(C, VT); 656 657 return 0; 658} 659 660unsigned ARMFastISel::TargetMaterializeAlloca(const AllocaInst *AI) { 661 // Don't handle dynamic allocas. 662 if (!FuncInfo.StaticAllocaMap.count(AI)) return 0; 663 664 MVT VT; 665 if (!isLoadTypeLegal(AI->getType(), VT)) return false; 666 667 DenseMap<const AllocaInst*, int>::iterator SI = 668 FuncInfo.StaticAllocaMap.find(AI); 669 670 // This will get lowered later into the correct offsets and registers 671 // via rewriteXFrameIndex. 672 if (SI != FuncInfo.StaticAllocaMap.end()) { 673 TargetRegisterClass* RC = TLI.getRegClassFor(VT); 674 unsigned ResultReg = createResultReg(RC); 675 unsigned Opc = isThumb ? ARM::t2ADDri : ARM::ADDri; 676 AddOptionalDefs(BuildMI(*FuncInfo.MBB, *FuncInfo.InsertPt, DL, 677 TII.get(Opc), ResultReg) 678 .addFrameIndex(SI->second) 679 .addImm(0)); 680 return ResultReg; 681 } 682 683 return 0; 684} 685 686bool ARMFastISel::isTypeLegal(Type *Ty, MVT &VT) { 687 EVT evt = TLI.getValueType(Ty, true); 688 689 // Only handle simple types. 690 if (evt == MVT::Other || !evt.isSimple()) return false; 691 VT = evt.getSimpleVT(); 692 693 // Handle all legal types, i.e. a register that will directly hold this 694 // value. 695 return TLI.isTypeLegal(VT); 696} 697 698bool ARMFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) { 699 if (isTypeLegal(Ty, VT)) return true; 700 701 // If this is a type than can be sign or zero-extended to a basic operation 702 // go ahead and accept it now. 703 if (VT == MVT::i8 || VT == MVT::i16) 704 return true; 705 706 return false; 707} 708 709// Computes the address to get to an object. 710bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) { 711 // Some boilerplate from the X86 FastISel. 712 const User *U = NULL; 713 unsigned Opcode = Instruction::UserOp1; 714 if (const Instruction *I = dyn_cast<Instruction>(Obj)) { 715 // Don't walk into other basic blocks unless the object is an alloca from 716 // another block, otherwise it may not have a virtual register assigned. 717 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) || 718 FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) { 719 Opcode = I->getOpcode(); 720 U = I; 721 } 722 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) { 723 Opcode = C->getOpcode(); 724 U = C; 725 } 726 727 if (PointerType *Ty = dyn_cast<PointerType>(Obj->getType())) 728 if (Ty->getAddressSpace() > 255) 729 // Fast instruction selection doesn't support the special 730 // address spaces. 731 return false; 732 733 switch (Opcode) { 734 default: 735 break; 736 case Instruction::BitCast: { 737 // Look through bitcasts. 738 return ARMComputeAddress(U->getOperand(0), Addr); 739 } 740 case Instruction::IntToPtr: { 741 // Look past no-op inttoptrs. 742 if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy()) 743 return ARMComputeAddress(U->getOperand(0), Addr); 744 break; 745 } 746 case Instruction::PtrToInt: { 747 // Look past no-op ptrtoints. 748 if (TLI.getValueType(U->getType()) == TLI.getPointerTy()) 749 return ARMComputeAddress(U->getOperand(0), Addr); 750 break; 751 } 752 case Instruction::GetElementPtr: { 753 Address SavedAddr = Addr; 754 int TmpOffset = Addr.Offset; 755 756 // Iterate through the GEP folding the constants into offsets where 757 // we can. 758 gep_type_iterator GTI = gep_type_begin(U); 759 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); 760 i != e; ++i, ++GTI) { 761 const Value *Op = *i; 762 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 763 const StructLayout *SL = TD.getStructLayout(STy); 764 unsigned Idx = cast<ConstantInt>(Op)->getZExtValue(); 765 TmpOffset += SL->getElementOffset(Idx); 766 } else { 767 uint64_t S = TD.getTypeAllocSize(GTI.getIndexedType()); 768 for (;;) { 769 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) { 770 // Constant-offset addressing. 771 TmpOffset += CI->getSExtValue() * S; 772 break; 773 } 774 if (isa<AddOperator>(Op) && 775 (!isa<Instruction>(Op) || 776 FuncInfo.MBBMap[cast<Instruction>(Op)->getParent()] 777 == FuncInfo.MBB) && 778 isa<ConstantInt>(cast<AddOperator>(Op)->getOperand(1))) { 779 // An add (in the same block) with a constant operand. Fold the 780 // constant. 781 ConstantInt *CI = 782 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1)); 783 TmpOffset += CI->getSExtValue() * S; 784 // Iterate on the other operand. 785 Op = cast<AddOperator>(Op)->getOperand(0); 786 continue; 787 } 788 // Unsupported 789 goto unsupported_gep; 790 } 791 } 792 } 793 794 // Try to grab the base operand now. 795 Addr.Offset = TmpOffset; 796 if (ARMComputeAddress(U->getOperand(0), Addr)) return true; 797 798 // We failed, restore everything and try the other options. 799 Addr = SavedAddr; 800 801 unsupported_gep: 802 break; 803 } 804 case Instruction::Alloca: { 805 const AllocaInst *AI = cast<AllocaInst>(Obj); 806 DenseMap<const AllocaInst*, int>::iterator SI = 807 FuncInfo.StaticAllocaMap.find(AI); 808 if (SI != FuncInfo.StaticAllocaMap.end()) { 809 Addr.BaseType = Address::FrameIndexBase; 810 Addr.Base.FI = SI->second; 811 return true; 812 } 813 break; 814 } 815 } 816 817 // Materialize the global variable's address into a reg which can 818 // then be used later to load the variable. 819 if (const GlobalValue *GV = dyn_cast<GlobalValue>(Obj)) { 820 unsigned Tmp = ARMMaterializeGV(GV, TLI.getValueType(Obj->getType())); 821 if (Tmp == 0) return false; 822 823 Addr.Base.Reg = Tmp; 824 return true; 825 } 826 827 // Try to get this in a register if nothing else has worked. 828 if (Addr.Base.Reg == 0) Addr.Base.Reg = getRegForValue(Obj); 829 return Addr.Base.Reg != 0; 830} 831 832void ARMFastISel::ARMSimplifyAddress(Address &Addr, EVT VT) { 833 834 assert(VT.isSimple() && "Non-simple types are invalid here!"); 835 836 bool needsLowering = false; 837 switch (VT.getSimpleVT().SimpleTy) { 838 default: 839 assert(false && "Unhandled load/store type!"); 840 case MVT::i1: 841 case MVT::i8: 842 case MVT::i16: 843 case MVT::i32: 844 // Integer loads/stores handle 12-bit offsets. 845 needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset); 846 break; 847 case MVT::f32: 848 case MVT::f64: 849 // Floating point operands handle 8-bit offsets. 850 needsLowering = ((Addr.Offset & 0xff) != Addr.Offset); 851 break; 852 } 853 854 // If this is a stack pointer and the offset needs to be simplified then 855 // put the alloca address into a register, set the base type back to 856 // register and continue. This should almost never happen. 857 if (needsLowering && Addr.BaseType == Address::FrameIndexBase) { 858 TargetRegisterClass *RC = isThumb ? ARM::tGPRRegisterClass : 859 ARM::GPRRegisterClass; 860 unsigned ResultReg = createResultReg(RC); 861 unsigned Opc = isThumb ? ARM::t2ADDri : ARM::ADDri; 862 AddOptionalDefs(BuildMI(*FuncInfo.MBB, *FuncInfo.InsertPt, DL, 863 TII.get(Opc), ResultReg) 864 .addFrameIndex(Addr.Base.FI) 865 .addImm(0)); 866 Addr.Base.Reg = ResultReg; 867 Addr.BaseType = Address::RegBase; 868 } 869 870 // Since the offset is too large for the load/store instruction 871 // get the reg+offset into a register. 872 if (needsLowering) { 873 Addr.Base.Reg = FastEmit_ri_(MVT::i32, ISD::ADD, Addr.Base.Reg, 874 /*Op0IsKill*/false, Addr.Offset, MVT::i32); 875 Addr.Offset = 0; 876 } 877} 878 879void ARMFastISel::AddLoadStoreOperands(EVT VT, Address &Addr, 880 const MachineInstrBuilder &MIB, 881 unsigned Flags) { 882 // addrmode5 output depends on the selection dag addressing dividing the 883 // offset by 4 that it then later multiplies. Do this here as well. 884 if (VT.getSimpleVT().SimpleTy == MVT::f32 || 885 VT.getSimpleVT().SimpleTy == MVT::f64) 886 Addr.Offset /= 4; 887 888 // Frame base works a bit differently. Handle it separately. 889 if (Addr.BaseType == Address::FrameIndexBase) { 890 int FI = Addr.Base.FI; 891 int Offset = Addr.Offset; 892 MachineMemOperand *MMO = 893 FuncInfo.MF->getMachineMemOperand( 894 MachinePointerInfo::getFixedStack(FI, Offset), 895 Flags, 896 MFI.getObjectSize(FI), 897 MFI.getObjectAlignment(FI)); 898 // Now add the rest of the operands. 899 MIB.addFrameIndex(FI); 900 901 // ARM halfword load/stores need an additional operand. 902 if (!isThumb && VT.getSimpleVT().SimpleTy == MVT::i16) MIB.addReg(0); 903 904 MIB.addImm(Addr.Offset); 905 MIB.addMemOperand(MMO); 906 } else { 907 // Now add the rest of the operands. 908 MIB.addReg(Addr.Base.Reg); 909 910 // ARM halfword load/stores need an additional operand. 911 if (!isThumb && VT.getSimpleVT().SimpleTy == MVT::i16) MIB.addReg(0); 912 913 MIB.addImm(Addr.Offset); 914 } 915 AddOptionalDefs(MIB); 916} 917 918bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr) { 919 920 assert(VT.isSimple() && "Non-simple types are invalid here!"); 921 unsigned Opc; 922 TargetRegisterClass *RC; 923 switch (VT.getSimpleVT().SimpleTy) { 924 // This is mostly going to be Neon/vector support. 925 default: return false; 926 case MVT::i16: 927 Opc = isThumb ? ARM::t2LDRHi12 : ARM::LDRH; 928 RC = ARM::GPRRegisterClass; 929 break; 930 case MVT::i8: 931 Opc = isThumb ? ARM::t2LDRBi12 : ARM::LDRBi12; 932 RC = ARM::GPRRegisterClass; 933 break; 934 case MVT::i32: 935 Opc = isThumb ? ARM::t2LDRi12 : ARM::LDRi12; 936 RC = ARM::GPRRegisterClass; 937 break; 938 case MVT::f32: 939 Opc = ARM::VLDRS; 940 RC = TLI.getRegClassFor(VT); 941 break; 942 case MVT::f64: 943 Opc = ARM::VLDRD; 944 RC = TLI.getRegClassFor(VT); 945 break; 946 } 947 // Simplify this down to something we can handle. 948 ARMSimplifyAddress(Addr, VT); 949 950 // Create the base instruction, then add the operands. 951 ResultReg = createResultReg(RC); 952 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 953 TII.get(Opc), ResultReg); 954 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOLoad); 955 return true; 956} 957 958bool ARMFastISel::SelectLoad(const Instruction *I) { 959 // Atomic loads need special handling. 960 if (cast<LoadInst>(I)->isAtomic()) 961 return false; 962 963 // Verify we have a legal type before going any further. 964 MVT VT; 965 if (!isLoadTypeLegal(I->getType(), VT)) 966 return false; 967 968 // See if we can handle this address. 969 Address Addr; 970 if (!ARMComputeAddress(I->getOperand(0), Addr)) return false; 971 972 unsigned ResultReg; 973 if (!ARMEmitLoad(VT, ResultReg, Addr)) return false; 974 UpdateValueMap(I, ResultReg); 975 return true; 976} 977 978bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr) { 979 unsigned StrOpc; 980 switch (VT.getSimpleVT().SimpleTy) { 981 // This is mostly going to be Neon/vector support. 982 default: return false; 983 case MVT::i1: { 984 unsigned Res = createResultReg(isThumb ? ARM::tGPRRegisterClass : 985 ARM::GPRRegisterClass); 986 unsigned Opc = isThumb ? ARM::t2ANDri : ARM::ANDri; 987 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 988 TII.get(Opc), Res) 989 .addReg(SrcReg).addImm(1)); 990 SrcReg = Res; 991 } // Fallthrough here. 992 case MVT::i8: 993 StrOpc = isThumb ? ARM::t2STRBi12 : ARM::STRBi12; 994 break; 995 case MVT::i16: 996 StrOpc = isThumb ? ARM::t2STRHi12 : ARM::STRH; 997 break; 998 case MVT::i32: 999 StrOpc = isThumb ? ARM::t2STRi12 : ARM::STRi12; 1000 break; 1001 case MVT::f32: 1002 if (!Subtarget->hasVFP2()) return false; 1003 StrOpc = ARM::VSTRS; 1004 break; 1005 case MVT::f64: 1006 if (!Subtarget->hasVFP2()) return false; 1007 StrOpc = ARM::VSTRD; 1008 break; 1009 } 1010 // Simplify this down to something we can handle. 1011 ARMSimplifyAddress(Addr, VT); 1012 1013 // Create the base instruction, then add the operands. 1014 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1015 TII.get(StrOpc)) 1016 .addReg(SrcReg, getKillRegState(true)); 1017 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOStore); 1018 return true; 1019} 1020 1021bool ARMFastISel::SelectStore(const Instruction *I) { 1022 Value *Op0 = I->getOperand(0); 1023 unsigned SrcReg = 0; 1024 1025 // Atomic stores need special handling. 1026 if (cast<StoreInst>(I)->isAtomic()) 1027 return false; 1028 1029 // Verify we have a legal type before going any further. 1030 MVT VT; 1031 if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT)) 1032 return false; 1033 1034 // Get the value to be stored into a register. 1035 SrcReg = getRegForValue(Op0); 1036 if (SrcReg == 0) return false; 1037 1038 // See if we can handle this address. 1039 Address Addr; 1040 if (!ARMComputeAddress(I->getOperand(1), Addr)) 1041 return false; 1042 1043 if (!ARMEmitStore(VT, SrcReg, Addr)) return false; 1044 return true; 1045} 1046 1047static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred) { 1048 switch (Pred) { 1049 // Needs two compares... 1050 case CmpInst::FCMP_ONE: 1051 case CmpInst::FCMP_UEQ: 1052 default: 1053 // AL is our "false" for now. The other two need more compares. 1054 return ARMCC::AL; 1055 case CmpInst::ICMP_EQ: 1056 case CmpInst::FCMP_OEQ: 1057 return ARMCC::EQ; 1058 case CmpInst::ICMP_SGT: 1059 case CmpInst::FCMP_OGT: 1060 return ARMCC::GT; 1061 case CmpInst::ICMP_SGE: 1062 case CmpInst::FCMP_OGE: 1063 return ARMCC::GE; 1064 case CmpInst::ICMP_UGT: 1065 case CmpInst::FCMP_UGT: 1066 return ARMCC::HI; 1067 case CmpInst::FCMP_OLT: 1068 return ARMCC::MI; 1069 case CmpInst::ICMP_ULE: 1070 case CmpInst::FCMP_OLE: 1071 return ARMCC::LS; 1072 case CmpInst::FCMP_ORD: 1073 return ARMCC::VC; 1074 case CmpInst::FCMP_UNO: 1075 return ARMCC::VS; 1076 case CmpInst::FCMP_UGE: 1077 return ARMCC::PL; 1078 case CmpInst::ICMP_SLT: 1079 case CmpInst::FCMP_ULT: 1080 return ARMCC::LT; 1081 case CmpInst::ICMP_SLE: 1082 case CmpInst::FCMP_ULE: 1083 return ARMCC::LE; 1084 case CmpInst::FCMP_UNE: 1085 case CmpInst::ICMP_NE: 1086 return ARMCC::NE; 1087 case CmpInst::ICMP_UGE: 1088 return ARMCC::HS; 1089 case CmpInst::ICMP_ULT: 1090 return ARMCC::LO; 1091 } 1092} 1093 1094bool ARMFastISel::SelectBranch(const Instruction *I) { 1095 const BranchInst *BI = cast<BranchInst>(I); 1096 MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)]; 1097 MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)]; 1098 1099 // Simple branch support. 1100 1101 // If we can, avoid recomputing the compare - redoing it could lead to wonky 1102 // behavior. 1103 if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) { 1104 if (CI->hasOneUse() && (CI->getParent() == I->getParent())) { 1105 1106 // Get the compare predicate. 1107 // Try to take advantage of fallthrough opportunities. 1108 CmpInst::Predicate Predicate = CI->getPredicate(); 1109 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1110 std::swap(TBB, FBB); 1111 Predicate = CmpInst::getInversePredicate(Predicate); 1112 } 1113 1114 ARMCC::CondCodes ARMPred = getComparePred(Predicate); 1115 1116 // We may not handle every CC for now. 1117 if (ARMPred == ARMCC::AL) return false; 1118 1119 // Emit the compare. 1120 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1))) 1121 return false; 1122 1123 unsigned BrOpc = isThumb ? ARM::t2Bcc : ARM::Bcc; 1124 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1125 .addMBB(TBB).addImm(ARMPred).addReg(ARM::CPSR); 1126 FastEmitBranch(FBB, DL); 1127 FuncInfo.MBB->addSuccessor(TBB); 1128 return true; 1129 } 1130 } else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) { 1131 MVT SourceVT; 1132 if (TI->hasOneUse() && TI->getParent() == I->getParent() && 1133 (isLoadTypeLegal(TI->getOperand(0)->getType(), SourceVT))) { 1134 unsigned TstOpc = isThumb ? ARM::t2TSTri : ARM::TSTri; 1135 unsigned OpReg = getRegForValue(TI->getOperand(0)); 1136 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1137 TII.get(TstOpc)) 1138 .addReg(OpReg).addImm(1)); 1139 1140 unsigned CCMode = ARMCC::NE; 1141 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1142 std::swap(TBB, FBB); 1143 CCMode = ARMCC::EQ; 1144 } 1145 1146 unsigned BrOpc = isThumb ? ARM::t2Bcc : ARM::Bcc; 1147 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1148 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); 1149 1150 FastEmitBranch(FBB, DL); 1151 FuncInfo.MBB->addSuccessor(TBB); 1152 return true; 1153 } 1154 } else if (const ConstantInt *CI = 1155 dyn_cast<ConstantInt>(BI->getCondition())) { 1156 uint64_t Imm = CI->getZExtValue(); 1157 MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB; 1158 FastEmitBranch(Target, DL); 1159 return true; 1160 } 1161 1162 unsigned CmpReg = getRegForValue(BI->getCondition()); 1163 if (CmpReg == 0) return false; 1164 1165 // We've been divorced from our compare! Our block was split, and 1166 // now our compare lives in a predecessor block. We musn't 1167 // re-compare here, as the children of the compare aren't guaranteed 1168 // live across the block boundary (we *could* check for this). 1169 // Regardless, the compare has been done in the predecessor block, 1170 // and it left a value for us in a virtual register. Ergo, we test 1171 // the one-bit value left in the virtual register. 1172 unsigned TstOpc = isThumb ? ARM::t2TSTri : ARM::TSTri; 1173 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TstOpc)) 1174 .addReg(CmpReg).addImm(1)); 1175 1176 unsigned CCMode = ARMCC::NE; 1177 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1178 std::swap(TBB, FBB); 1179 CCMode = ARMCC::EQ; 1180 } 1181 1182 unsigned BrOpc = isThumb ? ARM::t2Bcc : ARM::Bcc; 1183 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1184 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); 1185 FastEmitBranch(FBB, DL); 1186 FuncInfo.MBB->addSuccessor(TBB); 1187 return true; 1188} 1189 1190bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value) { 1191 MVT VT; 1192 Type *Ty = Src1Value->getType(); 1193 if (!isTypeLegal(Ty, VT)) 1194 return false; 1195 1196 bool isFloat = (Ty->isFloatTy() || Ty->isDoubleTy()); 1197 if (isFloat && !Subtarget->hasVFP2()) 1198 return false; 1199 1200 unsigned CmpOpc; 1201 switch (VT.SimpleTy) { 1202 // TODO: Add support for non-legal types (i.e., i1, i8, i16). 1203 default: return false; 1204 // TODO: Verify compares. 1205 case MVT::f32: 1206 CmpOpc = ARM::VCMPES; 1207 break; 1208 case MVT::f64: 1209 CmpOpc = ARM::VCMPED; 1210 break; 1211 case MVT::i32: 1212 CmpOpc = isThumb ? ARM::t2CMPrr : ARM::CMPrr; 1213 break; 1214 } 1215 1216 unsigned Src1 = getRegForValue(Src1Value); 1217 if (Src1 == 0) return false; 1218 1219 unsigned Src2 = getRegForValue(Src2Value); 1220 if (Src2 == 0) return false; 1221 1222 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1223 .addReg(Src1).addReg(Src2)); 1224 1225 // For floating point we need to move the result to a comparison register 1226 // that we can then use for branches. 1227 if (Ty->isFloatTy() || Ty->isDoubleTy()) 1228 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1229 TII.get(ARM::FMSTAT))); 1230 return true; 1231} 1232 1233bool ARMFastISel::SelectCmp(const Instruction *I) { 1234 const CmpInst *CI = cast<CmpInst>(I); 1235 Type *Ty = CI->getOperand(0)->getType(); 1236 1237 // Get the compare predicate. 1238 ARMCC::CondCodes ARMPred = getComparePred(CI->getPredicate()); 1239 1240 // We may not handle every CC for now. 1241 if (ARMPred == ARMCC::AL) return false; 1242 1243 // Emit the compare. 1244 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1))) 1245 return false; 1246 1247 // Now set a register based on the comparison. Explicitly set the predicates 1248 // here. 1249 unsigned MovCCOpc = isThumb ? ARM::t2MOVCCi : ARM::MOVCCi; 1250 TargetRegisterClass *RC = isThumb ? ARM::rGPRRegisterClass 1251 : ARM::GPRRegisterClass; 1252 unsigned DestReg = createResultReg(RC); 1253 Constant *Zero = ConstantInt::get(Type::getInt32Ty(*Context), 0); 1254 unsigned ZeroReg = TargetMaterializeConstant(Zero); 1255 bool isFloat = (Ty->isFloatTy() || Ty->isDoubleTy()); 1256 unsigned CondReg = isFloat ? ARM::FPSCR : ARM::CPSR; 1257 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), DestReg) 1258 .addReg(ZeroReg).addImm(1) 1259 .addImm(ARMPred).addReg(CondReg); 1260 1261 UpdateValueMap(I, DestReg); 1262 return true; 1263} 1264 1265bool ARMFastISel::SelectFPExt(const Instruction *I) { 1266 // Make sure we have VFP and that we're extending float to double. 1267 if (!Subtarget->hasVFP2()) return false; 1268 1269 Value *V = I->getOperand(0); 1270 if (!I->getType()->isDoubleTy() || 1271 !V->getType()->isFloatTy()) return false; 1272 1273 unsigned Op = getRegForValue(V); 1274 if (Op == 0) return false; 1275 1276 unsigned Result = createResultReg(ARM::DPRRegisterClass); 1277 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1278 TII.get(ARM::VCVTDS), Result) 1279 .addReg(Op)); 1280 UpdateValueMap(I, Result); 1281 return true; 1282} 1283 1284bool ARMFastISel::SelectFPTrunc(const Instruction *I) { 1285 // Make sure we have VFP and that we're truncating double to float. 1286 if (!Subtarget->hasVFP2()) return false; 1287 1288 Value *V = I->getOperand(0); 1289 if (!(I->getType()->isFloatTy() && 1290 V->getType()->isDoubleTy())) return false; 1291 1292 unsigned Op = getRegForValue(V); 1293 if (Op == 0) return false; 1294 1295 unsigned Result = createResultReg(ARM::SPRRegisterClass); 1296 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1297 TII.get(ARM::VCVTSD), Result) 1298 .addReg(Op)); 1299 UpdateValueMap(I, Result); 1300 return true; 1301} 1302 1303bool ARMFastISel::SelectSIToFP(const Instruction *I) { 1304 // Make sure we have VFP. 1305 if (!Subtarget->hasVFP2()) return false; 1306 1307 MVT DstVT; 1308 Type *Ty = I->getType(); 1309 if (!isTypeLegal(Ty, DstVT)) 1310 return false; 1311 1312 // FIXME: Handle sign-extension where necessary. 1313 if (!I->getOperand(0)->getType()->isIntegerTy(32)) 1314 return false; 1315 1316 unsigned Op = getRegForValue(I->getOperand(0)); 1317 if (Op == 0) return false; 1318 1319 // The conversion routine works on fp-reg to fp-reg and the operand above 1320 // was an integer, move it to the fp registers if possible. 1321 unsigned FP = ARMMoveToFPReg(MVT::f32, Op); 1322 if (FP == 0) return false; 1323 1324 unsigned Opc; 1325 if (Ty->isFloatTy()) Opc = ARM::VSITOS; 1326 else if (Ty->isDoubleTy()) Opc = ARM::VSITOD; 1327 else return false; 1328 1329 unsigned ResultReg = createResultReg(TLI.getRegClassFor(DstVT)); 1330 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1331 ResultReg) 1332 .addReg(FP)); 1333 UpdateValueMap(I, ResultReg); 1334 return true; 1335} 1336 1337bool ARMFastISel::SelectFPToSI(const Instruction *I) { 1338 // Make sure we have VFP. 1339 if (!Subtarget->hasVFP2()) return false; 1340 1341 MVT DstVT; 1342 Type *RetTy = I->getType(); 1343 if (!isTypeLegal(RetTy, DstVT)) 1344 return false; 1345 1346 unsigned Op = getRegForValue(I->getOperand(0)); 1347 if (Op == 0) return false; 1348 1349 unsigned Opc; 1350 Type *OpTy = I->getOperand(0)->getType(); 1351 if (OpTy->isFloatTy()) Opc = ARM::VTOSIZS; 1352 else if (OpTy->isDoubleTy()) Opc = ARM::VTOSIZD; 1353 else return false; 1354 1355 // f64->s32 or f32->s32 both need an intermediate f32 reg. 1356 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32)); 1357 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1358 ResultReg) 1359 .addReg(Op)); 1360 1361 // This result needs to be in an integer register, but the conversion only 1362 // takes place in fp-regs. 1363 unsigned IntReg = ARMMoveToIntReg(DstVT, ResultReg); 1364 if (IntReg == 0) return false; 1365 1366 UpdateValueMap(I, IntReg); 1367 return true; 1368} 1369 1370bool ARMFastISel::SelectSelect(const Instruction *I) { 1371 MVT VT; 1372 if (!isTypeLegal(I->getType(), VT)) 1373 return false; 1374 1375 // Things need to be register sized for register moves. 1376 if (VT != MVT::i32) return false; 1377 const TargetRegisterClass *RC = TLI.getRegClassFor(VT); 1378 1379 unsigned CondReg = getRegForValue(I->getOperand(0)); 1380 if (CondReg == 0) return false; 1381 unsigned Op1Reg = getRegForValue(I->getOperand(1)); 1382 if (Op1Reg == 0) return false; 1383 unsigned Op2Reg = getRegForValue(I->getOperand(2)); 1384 if (Op2Reg == 0) return false; 1385 1386 unsigned CmpOpc = isThumb ? ARM::t2TSTri : ARM::TSTri; 1387 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1388 .addReg(CondReg).addImm(1)); 1389 unsigned ResultReg = createResultReg(RC); 1390 unsigned MovCCOpc = isThumb ? ARM::t2MOVCCr : ARM::MOVCCr; 1391 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg) 1392 .addReg(Op1Reg).addReg(Op2Reg) 1393 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 1394 UpdateValueMap(I, ResultReg); 1395 return true; 1396} 1397 1398bool ARMFastISel::SelectSDiv(const Instruction *I) { 1399 MVT VT; 1400 Type *Ty = I->getType(); 1401 if (!isTypeLegal(Ty, VT)) 1402 return false; 1403 1404 // If we have integer div support we should have selected this automagically. 1405 // In case we have a real miss go ahead and return false and we'll pick 1406 // it up later. 1407 if (Subtarget->hasDivide()) return false; 1408 1409 // Otherwise emit a libcall. 1410 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1411 if (VT == MVT::i8) 1412 LC = RTLIB::SDIV_I8; 1413 else if (VT == MVT::i16) 1414 LC = RTLIB::SDIV_I16; 1415 else if (VT == MVT::i32) 1416 LC = RTLIB::SDIV_I32; 1417 else if (VT == MVT::i64) 1418 LC = RTLIB::SDIV_I64; 1419 else if (VT == MVT::i128) 1420 LC = RTLIB::SDIV_I128; 1421 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SDIV!"); 1422 1423 return ARMEmitLibcall(I, LC); 1424} 1425 1426bool ARMFastISel::SelectSRem(const Instruction *I) { 1427 MVT VT; 1428 Type *Ty = I->getType(); 1429 if (!isTypeLegal(Ty, VT)) 1430 return false; 1431 1432 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1433 if (VT == MVT::i8) 1434 LC = RTLIB::SREM_I8; 1435 else if (VT == MVT::i16) 1436 LC = RTLIB::SREM_I16; 1437 else if (VT == MVT::i32) 1438 LC = RTLIB::SREM_I32; 1439 else if (VT == MVT::i64) 1440 LC = RTLIB::SREM_I64; 1441 else if (VT == MVT::i128) 1442 LC = RTLIB::SREM_I128; 1443 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SREM!"); 1444 1445 return ARMEmitLibcall(I, LC); 1446} 1447 1448bool ARMFastISel::SelectBinaryOp(const Instruction *I, unsigned ISDOpcode) { 1449 EVT VT = TLI.getValueType(I->getType(), true); 1450 1451 // We can get here in the case when we want to use NEON for our fp 1452 // operations, but can't figure out how to. Just use the vfp instructions 1453 // if we have them. 1454 // FIXME: It'd be nice to use NEON instructions. 1455 Type *Ty = I->getType(); 1456 bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy()); 1457 if (isFloat && !Subtarget->hasVFP2()) 1458 return false; 1459 1460 unsigned Op1 = getRegForValue(I->getOperand(0)); 1461 if (Op1 == 0) return false; 1462 1463 unsigned Op2 = getRegForValue(I->getOperand(1)); 1464 if (Op2 == 0) return false; 1465 1466 unsigned Opc; 1467 bool is64bit = VT == MVT::f64 || VT == MVT::i64; 1468 switch (ISDOpcode) { 1469 default: return false; 1470 case ISD::FADD: 1471 Opc = is64bit ? ARM::VADDD : ARM::VADDS; 1472 break; 1473 case ISD::FSUB: 1474 Opc = is64bit ? ARM::VSUBD : ARM::VSUBS; 1475 break; 1476 case ISD::FMUL: 1477 Opc = is64bit ? ARM::VMULD : ARM::VMULS; 1478 break; 1479 } 1480 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT)); 1481 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1482 TII.get(Opc), ResultReg) 1483 .addReg(Op1).addReg(Op2)); 1484 UpdateValueMap(I, ResultReg); 1485 return true; 1486} 1487 1488// Call Handling Code 1489 1490bool ARMFastISel::FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, 1491 EVT SrcVT, unsigned &ResultReg) { 1492 unsigned RR = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc, 1493 Src, /*TODO: Kill=*/false); 1494 1495 if (RR != 0) { 1496 ResultReg = RR; 1497 return true; 1498 } else 1499 return false; 1500} 1501 1502// This is largely taken directly from CCAssignFnForNode - we don't support 1503// varargs in FastISel so that part has been removed. 1504// TODO: We may not support all of this. 1505CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC, bool Return) { 1506 switch (CC) { 1507 default: 1508 llvm_unreachable("Unsupported calling convention"); 1509 case CallingConv::Fast: 1510 // Ignore fastcc. Silence compiler warnings. 1511 (void)RetFastCC_ARM_APCS; 1512 (void)FastCC_ARM_APCS; 1513 // Fallthrough 1514 case CallingConv::C: 1515 // Use target triple & subtarget features to do actual dispatch. 1516 if (Subtarget->isAAPCS_ABI()) { 1517 if (Subtarget->hasVFP2() && 1518 FloatABIType == FloatABI::Hard) 1519 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1520 else 1521 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1522 } else 1523 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1524 case CallingConv::ARM_AAPCS_VFP: 1525 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1526 case CallingConv::ARM_AAPCS: 1527 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1528 case CallingConv::ARM_APCS: 1529 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1530 } 1531} 1532 1533bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args, 1534 SmallVectorImpl<unsigned> &ArgRegs, 1535 SmallVectorImpl<MVT> &ArgVTs, 1536 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 1537 SmallVectorImpl<unsigned> &RegArgs, 1538 CallingConv::ID CC, 1539 unsigned &NumBytes) { 1540 SmallVector<CCValAssign, 16> ArgLocs; 1541 CCState CCInfo(CC, false, *FuncInfo.MF, TM, ArgLocs, *Context); 1542 CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CCAssignFnForCall(CC, false)); 1543 1544 // Get a count of how many bytes are to be pushed on the stack. 1545 NumBytes = CCInfo.getNextStackOffset(); 1546 1547 // Issue CALLSEQ_START 1548 unsigned AdjStackDown = TII.getCallFrameSetupOpcode(); 1549 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1550 TII.get(AdjStackDown)) 1551 .addImm(NumBytes)); 1552 1553 // Process the args. 1554 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1555 CCValAssign &VA = ArgLocs[i]; 1556 unsigned Arg = ArgRegs[VA.getValNo()]; 1557 MVT ArgVT = ArgVTs[VA.getValNo()]; 1558 1559 // We don't handle NEON/vector parameters yet. 1560 if (ArgVT.isVector() || ArgVT.getSizeInBits() > 64) 1561 return false; 1562 1563 // Handle arg promotion, etc. 1564 switch (VA.getLocInfo()) { 1565 case CCValAssign::Full: break; 1566 case CCValAssign::SExt: { 1567 bool Emitted = FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(), 1568 Arg, ArgVT, Arg); 1569 assert(Emitted && "Failed to emit a sext!"); (void)Emitted; 1570 Emitted = true; 1571 ArgVT = VA.getLocVT(); 1572 break; 1573 } 1574 case CCValAssign::ZExt: { 1575 bool Emitted = FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(), 1576 Arg, ArgVT, Arg); 1577 assert(Emitted && "Failed to emit a zext!"); (void)Emitted; 1578 Emitted = true; 1579 ArgVT = VA.getLocVT(); 1580 break; 1581 } 1582 case CCValAssign::AExt: { 1583 bool Emitted = FastEmitExtend(ISD::ANY_EXTEND, VA.getLocVT(), 1584 Arg, ArgVT, Arg); 1585 if (!Emitted) 1586 Emitted = FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(), 1587 Arg, ArgVT, Arg); 1588 if (!Emitted) 1589 Emitted = FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(), 1590 Arg, ArgVT, Arg); 1591 1592 assert(Emitted && "Failed to emit a aext!"); (void)Emitted; 1593 ArgVT = VA.getLocVT(); 1594 break; 1595 } 1596 case CCValAssign::BCvt: { 1597 unsigned BC = FastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg, 1598 /*TODO: Kill=*/false); 1599 assert(BC != 0 && "Failed to emit a bitcast!"); 1600 Arg = BC; 1601 ArgVT = VA.getLocVT(); 1602 break; 1603 } 1604 default: llvm_unreachable("Unknown arg promotion!"); 1605 } 1606 1607 // Now copy/store arg to correct locations. 1608 if (VA.isRegLoc() && !VA.needsCustom()) { 1609 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1610 VA.getLocReg()) 1611 .addReg(Arg); 1612 RegArgs.push_back(VA.getLocReg()); 1613 } else if (VA.needsCustom()) { 1614 // TODO: We need custom lowering for vector (v2f64) args. 1615 if (VA.getLocVT() != MVT::f64) return false; 1616 1617 CCValAssign &NextVA = ArgLocs[++i]; 1618 1619 // TODO: Only handle register args for now. 1620 if(!(VA.isRegLoc() && NextVA.isRegLoc())) return false; 1621 1622 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1623 TII.get(ARM::VMOVRRD), VA.getLocReg()) 1624 .addReg(NextVA.getLocReg(), RegState::Define) 1625 .addReg(Arg)); 1626 RegArgs.push_back(VA.getLocReg()); 1627 RegArgs.push_back(NextVA.getLocReg()); 1628 } else { 1629 assert(VA.isMemLoc()); 1630 // Need to store on the stack. 1631 Address Addr; 1632 Addr.BaseType = Address::RegBase; 1633 Addr.Base.Reg = ARM::SP; 1634 Addr.Offset = VA.getLocMemOffset(); 1635 1636 if (!ARMEmitStore(ArgVT, Arg, Addr)) return false; 1637 } 1638 } 1639 return true; 1640} 1641 1642bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 1643 const Instruction *I, CallingConv::ID CC, 1644 unsigned &NumBytes) { 1645 // Issue CALLSEQ_END 1646 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); 1647 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1648 TII.get(AdjStackUp)) 1649 .addImm(NumBytes).addImm(0)); 1650 1651 // Now the return value. 1652 if (RetVT != MVT::isVoid) { 1653 SmallVector<CCValAssign, 16> RVLocs; 1654 CCState CCInfo(CC, false, *FuncInfo.MF, TM, RVLocs, *Context); 1655 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true)); 1656 1657 // Copy all of the result registers out of their specified physreg. 1658 if (RVLocs.size() == 2 && RetVT == MVT::f64) { 1659 // For this move we copy into two registers and then move into the 1660 // double fp reg we want. 1661 EVT DestVT = RVLocs[0].getValVT(); 1662 TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT); 1663 unsigned ResultReg = createResultReg(DstRC); 1664 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1665 TII.get(ARM::VMOVDRR), ResultReg) 1666 .addReg(RVLocs[0].getLocReg()) 1667 .addReg(RVLocs[1].getLocReg())); 1668 1669 UsedRegs.push_back(RVLocs[0].getLocReg()); 1670 UsedRegs.push_back(RVLocs[1].getLocReg()); 1671 1672 // Finally update the result. 1673 UpdateValueMap(I, ResultReg); 1674 } else { 1675 assert(RVLocs.size() == 1 &&"Can't handle non-double multi-reg retvals!"); 1676 EVT CopyVT = RVLocs[0].getValVT(); 1677 TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT); 1678 1679 unsigned ResultReg = createResultReg(DstRC); 1680 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1681 ResultReg).addReg(RVLocs[0].getLocReg()); 1682 UsedRegs.push_back(RVLocs[0].getLocReg()); 1683 1684 // Finally update the result. 1685 UpdateValueMap(I, ResultReg); 1686 } 1687 } 1688 1689 return true; 1690} 1691 1692bool ARMFastISel::SelectRet(const Instruction *I) { 1693 const ReturnInst *Ret = cast<ReturnInst>(I); 1694 const Function &F = *I->getParent()->getParent(); 1695 1696 if (!FuncInfo.CanLowerReturn) 1697 return false; 1698 1699 if (F.isVarArg()) 1700 return false; 1701 1702 CallingConv::ID CC = F.getCallingConv(); 1703 if (Ret->getNumOperands() > 0) { 1704 SmallVector<ISD::OutputArg, 4> Outs; 1705 GetReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(), 1706 Outs, TLI); 1707 1708 // Analyze operands of the call, assigning locations to each operand. 1709 SmallVector<CCValAssign, 16> ValLocs; 1710 CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, TM, ValLocs,I->getContext()); 1711 CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC, true /* is Ret */)); 1712 1713 const Value *RV = Ret->getOperand(0); 1714 unsigned Reg = getRegForValue(RV); 1715 if (Reg == 0) 1716 return false; 1717 1718 // Only handle a single return value for now. 1719 if (ValLocs.size() != 1) 1720 return false; 1721 1722 CCValAssign &VA = ValLocs[0]; 1723 1724 // Don't bother handling odd stuff for now. 1725 // FIXME: Should be able to handle i1, i8, and/or i16 return types. 1726 if (VA.getLocInfo() != CCValAssign::Full) 1727 return false; 1728 // Only handle register returns for now. 1729 if (!VA.isRegLoc()) 1730 return false; 1731 // TODO: For now, don't try to handle cases where getLocInfo() 1732 // says Full but the types don't match. 1733 if (TLI.getValueType(RV->getType()) != VA.getValVT()) 1734 return false; 1735 1736 // Make the copy. 1737 unsigned SrcReg = Reg + VA.getValNo(); 1738 unsigned DstReg = VA.getLocReg(); 1739 const TargetRegisterClass* SrcRC = MRI.getRegClass(SrcReg); 1740 // Avoid a cross-class copy. This is very unlikely. 1741 if (!SrcRC->contains(DstReg)) 1742 return false; 1743 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1744 DstReg).addReg(SrcReg); 1745 1746 // Mark the register as live out of the function. 1747 MRI.addLiveOut(VA.getLocReg()); 1748 } 1749 1750 unsigned RetOpc = isThumb ? ARM::tBX_RET : ARM::BX_RET; 1751 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1752 TII.get(RetOpc))); 1753 return true; 1754} 1755 1756unsigned ARMFastISel::ARMSelectCallOp(const GlobalValue *GV) { 1757 1758 // Darwin needs the r9 versions of the opcodes. 1759 bool isDarwin = Subtarget->isTargetDarwin(); 1760 if (isThumb) { 1761 return isDarwin ? ARM::tBLr9 : ARM::tBL; 1762 } else { 1763 return isDarwin ? ARM::BLr9 : ARM::BL; 1764 } 1765} 1766 1767// A quick function that will emit a call for a named libcall in F with the 1768// vector of passed arguments for the Instruction in I. We can assume that we 1769// can emit a call for any libcall we can produce. This is an abridged version 1770// of the full call infrastructure since we won't need to worry about things 1771// like computed function pointers or strange arguments at call sites. 1772// TODO: Try to unify this and the normal call bits for ARM, then try to unify 1773// with X86. 1774bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) { 1775 CallingConv::ID CC = TLI.getLibcallCallingConv(Call); 1776 1777 // Handle *simple* calls for now. 1778 Type *RetTy = I->getType(); 1779 MVT RetVT; 1780 if (RetTy->isVoidTy()) 1781 RetVT = MVT::isVoid; 1782 else if (!isTypeLegal(RetTy, RetVT)) 1783 return false; 1784 1785 // TODO: For now if we have long calls specified we don't handle the call. 1786 if (EnableARMLongCalls) return false; 1787 1788 // Set up the argument vectors. 1789 SmallVector<Value*, 8> Args; 1790 SmallVector<unsigned, 8> ArgRegs; 1791 SmallVector<MVT, 8> ArgVTs; 1792 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 1793 Args.reserve(I->getNumOperands()); 1794 ArgRegs.reserve(I->getNumOperands()); 1795 ArgVTs.reserve(I->getNumOperands()); 1796 ArgFlags.reserve(I->getNumOperands()); 1797 for (unsigned i = 0; i < I->getNumOperands(); ++i) { 1798 Value *Op = I->getOperand(i); 1799 unsigned Arg = getRegForValue(Op); 1800 if (Arg == 0) return false; 1801 1802 Type *ArgTy = Op->getType(); 1803 MVT ArgVT; 1804 if (!isTypeLegal(ArgTy, ArgVT)) return false; 1805 1806 ISD::ArgFlagsTy Flags; 1807 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 1808 Flags.setOrigAlign(OriginalAlignment); 1809 1810 Args.push_back(Op); 1811 ArgRegs.push_back(Arg); 1812 ArgVTs.push_back(ArgVT); 1813 ArgFlags.push_back(Flags); 1814 } 1815 1816 // Handle the arguments now that we've gotten them. 1817 SmallVector<unsigned, 4> RegArgs; 1818 unsigned NumBytes; 1819 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes)) 1820 return false; 1821 1822 // Issue the call, BLr9 for darwin, BL otherwise. 1823 // TODO: Turn this into the table of arm call ops. 1824 MachineInstrBuilder MIB; 1825 unsigned CallOpc = ARMSelectCallOp(NULL); 1826 if(isThumb) 1827 // Explicitly adding the predicate here. 1828 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1829 TII.get(CallOpc))) 1830 .addExternalSymbol(TLI.getLibcallName(Call)); 1831 else 1832 // Explicitly adding the predicate here. 1833 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1834 TII.get(CallOpc)) 1835 .addExternalSymbol(TLI.getLibcallName(Call))); 1836 1837 // Add implicit physical register uses to the call. 1838 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 1839 MIB.addReg(RegArgs[i]); 1840 1841 // Finish off the call including any return values. 1842 SmallVector<unsigned, 4> UsedRegs; 1843 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes)) return false; 1844 1845 // Set all unused physreg defs as dead. 1846 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 1847 1848 return true; 1849} 1850 1851bool ARMFastISel::SelectCall(const Instruction *I) { 1852 const CallInst *CI = cast<CallInst>(I); 1853 const Value *Callee = CI->getCalledValue(); 1854 1855 // Can't handle inline asm or worry about intrinsics yet. 1856 if (isa<InlineAsm>(Callee) || isa<IntrinsicInst>(CI)) return false; 1857 1858 // Only handle global variable Callees. 1859 const GlobalValue *GV = dyn_cast<GlobalValue>(Callee); 1860 if (!GV) 1861 return false; 1862 1863 // Check the calling convention. 1864 ImmutableCallSite CS(CI); 1865 CallingConv::ID CC = CS.getCallingConv(); 1866 1867 // TODO: Avoid some calling conventions? 1868 1869 // Let SDISel handle vararg functions. 1870 PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType()); 1871 FunctionType *FTy = cast<FunctionType>(PT->getElementType()); 1872 if (FTy->isVarArg()) 1873 return false; 1874 1875 // Handle *simple* calls for now. 1876 Type *RetTy = I->getType(); 1877 MVT RetVT; 1878 if (RetTy->isVoidTy()) 1879 RetVT = MVT::isVoid; 1880 else if (!isTypeLegal(RetTy, RetVT)) 1881 return false; 1882 1883 // TODO: For now if we have long calls specified we don't handle the call. 1884 if (EnableARMLongCalls) return false; 1885 1886 // Set up the argument vectors. 1887 SmallVector<Value*, 8> Args; 1888 SmallVector<unsigned, 8> ArgRegs; 1889 SmallVector<MVT, 8> ArgVTs; 1890 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 1891 Args.reserve(CS.arg_size()); 1892 ArgRegs.reserve(CS.arg_size()); 1893 ArgVTs.reserve(CS.arg_size()); 1894 ArgFlags.reserve(CS.arg_size()); 1895 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end(); 1896 i != e; ++i) { 1897 unsigned Arg = getRegForValue(*i); 1898 1899 if (Arg == 0) 1900 return false; 1901 ISD::ArgFlagsTy Flags; 1902 unsigned AttrInd = i - CS.arg_begin() + 1; 1903 if (CS.paramHasAttr(AttrInd, Attribute::SExt)) 1904 Flags.setSExt(); 1905 if (CS.paramHasAttr(AttrInd, Attribute::ZExt)) 1906 Flags.setZExt(); 1907 1908 // FIXME: Only handle *easy* calls for now. 1909 if (CS.paramHasAttr(AttrInd, Attribute::InReg) || 1910 CS.paramHasAttr(AttrInd, Attribute::StructRet) || 1911 CS.paramHasAttr(AttrInd, Attribute::Nest) || 1912 CS.paramHasAttr(AttrInd, Attribute::ByVal)) 1913 return false; 1914 1915 Type *ArgTy = (*i)->getType(); 1916 MVT ArgVT; 1917 // FIXME: Should be able to handle i1, i8, and/or i16 parameters. 1918 if (!isTypeLegal(ArgTy, ArgVT)) 1919 return false; 1920 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 1921 Flags.setOrigAlign(OriginalAlignment); 1922 1923 Args.push_back(*i); 1924 ArgRegs.push_back(Arg); 1925 ArgVTs.push_back(ArgVT); 1926 ArgFlags.push_back(Flags); 1927 } 1928 1929 // Handle the arguments now that we've gotten them. 1930 SmallVector<unsigned, 4> RegArgs; 1931 unsigned NumBytes; 1932 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes)) 1933 return false; 1934 1935 // Issue the call, BLr9 for darwin, BL otherwise. 1936 // TODO: Turn this into the table of arm call ops. 1937 MachineInstrBuilder MIB; 1938 unsigned CallOpc = ARMSelectCallOp(GV); 1939 // Explicitly adding the predicate here. 1940 if(isThumb) 1941 // Explicitly adding the predicate here. 1942 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1943 TII.get(CallOpc))) 1944 .addGlobalAddress(GV, 0, 0); 1945 else 1946 // Explicitly adding the predicate here. 1947 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1948 TII.get(CallOpc)) 1949 .addGlobalAddress(GV, 0, 0)); 1950 1951 // Add implicit physical register uses to the call. 1952 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 1953 MIB.addReg(RegArgs[i]); 1954 1955 // Finish off the call including any return values. 1956 SmallVector<unsigned, 4> UsedRegs; 1957 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes)) return false; 1958 1959 // Set all unused physreg defs as dead. 1960 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 1961 1962 return true; 1963 1964} 1965 1966bool ARMFastISel::SelectIntCast(const Instruction *I) { 1967 // On ARM, in general, integer casts don't involve legal types; this code 1968 // handles promotable integers. The high bits for a type smaller than 1969 // the register size are assumed to be undefined. 1970 Type *DestTy = I->getType(); 1971 Value *Op = I->getOperand(0); 1972 Type *SrcTy = Op->getType(); 1973 1974 EVT SrcVT, DestVT; 1975 SrcVT = TLI.getValueType(SrcTy, true); 1976 DestVT = TLI.getValueType(DestTy, true); 1977 1978 if (isa<TruncInst>(I)) { 1979 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) 1980 return false; 1981 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1) 1982 return false; 1983 1984 unsigned SrcReg = getRegForValue(Op); 1985 if (!SrcReg) return false; 1986 1987 // Because the high bits are undefined, a truncate doesn't generate 1988 // any code. 1989 UpdateValueMap(I, SrcReg); 1990 return true; 1991 } 1992 if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8) 1993 return false; 1994 1995 unsigned Opc; 1996 bool isZext = isa<ZExtInst>(I); 1997 bool isBoolZext = false; 1998 if (!SrcVT.isSimple()) 1999 return false; 2000 switch (SrcVT.getSimpleVT().SimpleTy) { 2001 default: return false; 2002 case MVT::i16: 2003 if (!Subtarget->hasV6Ops()) return false; 2004 if (isZext) 2005 Opc = isThumb ? ARM::t2UXTH : ARM::UXTH; 2006 else 2007 Opc = isThumb ? ARM::t2SXTH : ARM::SXTH; 2008 break; 2009 case MVT::i8: 2010 if (!Subtarget->hasV6Ops()) return false; 2011 if (isZext) 2012 Opc = isThumb ? ARM::t2UXTB : ARM::UXTB; 2013 else 2014 Opc = isThumb ? ARM::t2SXTB : ARM::SXTB; 2015 break; 2016 case MVT::i1: 2017 if (isZext) { 2018 Opc = isThumb ? ARM::t2ANDri : ARM::ANDri; 2019 isBoolZext = true; 2020 break; 2021 } 2022 return false; 2023 } 2024 2025 // FIXME: We could save an instruction in many cases by special-casing 2026 // load instructions. 2027 unsigned SrcReg = getRegForValue(Op); 2028 if (!SrcReg) return false; 2029 2030 unsigned DestReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 2031 MachineInstrBuilder MIB; 2032 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg) 2033 .addReg(SrcReg); 2034 if (isBoolZext) 2035 MIB.addImm(1); 2036 else 2037 MIB.addImm(0); 2038 AddOptionalDefs(MIB); 2039 UpdateValueMap(I, DestReg); 2040 return true; 2041} 2042 2043// TODO: SoftFP support. 2044bool ARMFastISel::TargetSelectInstruction(const Instruction *I) { 2045 2046 switch (I->getOpcode()) { 2047 case Instruction::Load: 2048 return SelectLoad(I); 2049 case Instruction::Store: 2050 return SelectStore(I); 2051 case Instruction::Br: 2052 return SelectBranch(I); 2053 case Instruction::ICmp: 2054 case Instruction::FCmp: 2055 return SelectCmp(I); 2056 case Instruction::FPExt: 2057 return SelectFPExt(I); 2058 case Instruction::FPTrunc: 2059 return SelectFPTrunc(I); 2060 case Instruction::SIToFP: 2061 return SelectSIToFP(I); 2062 case Instruction::FPToSI: 2063 return SelectFPToSI(I); 2064 case Instruction::FAdd: 2065 return SelectBinaryOp(I, ISD::FADD); 2066 case Instruction::FSub: 2067 return SelectBinaryOp(I, ISD::FSUB); 2068 case Instruction::FMul: 2069 return SelectBinaryOp(I, ISD::FMUL); 2070 case Instruction::SDiv: 2071 return SelectSDiv(I); 2072 case Instruction::SRem: 2073 return SelectSRem(I); 2074 case Instruction::Call: 2075 return SelectCall(I); 2076 case Instruction::Select: 2077 return SelectSelect(I); 2078 case Instruction::Ret: 2079 return SelectRet(I); 2080 case Instruction::Trunc: 2081 case Instruction::ZExt: 2082 case Instruction::SExt: 2083 return SelectIntCast(I); 2084 default: break; 2085 } 2086 return false; 2087} 2088 2089namespace llvm { 2090 llvm::FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo) { 2091 // Completely untested on non-darwin. 2092 const TargetMachine &TM = funcInfo.MF->getTarget(); 2093 2094 // Darwin and thumb1 only for now. 2095 const ARMSubtarget *Subtarget = &TM.getSubtarget<ARMSubtarget>(); 2096 if (Subtarget->isTargetDarwin() && !Subtarget->isThumb1Only() && 2097 !DisableARMFastISel) 2098 return new ARMFastISel(funcInfo); 2099 return 0; 2100 } 2101} 2102