ARMFastISel.cpp revision 33ad4ccb5f7df4ae166f3a6d89de75199a2376f9
1//===-- ARMFastISel.cpp - ARM FastISel implementation ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the ARM-specific support for the FastISel class. Some 11// of the target-specific code is generated by tablegen in the file 12// ARMGenFastISel.inc, which is #included here. 13// 14//===----------------------------------------------------------------------===// 15 16#include "ARM.h" 17#include "ARMBaseInstrInfo.h" 18#include "ARMCallingConv.h" 19#include "ARMRegisterInfo.h" 20#include "ARMTargetMachine.h" 21#include "ARMSubtarget.h" 22#include "ARMConstantPoolValue.h" 23#include "MCTargetDesc/ARMAddressingModes.h" 24#include "llvm/CallingConv.h" 25#include "llvm/DerivedTypes.h" 26#include "llvm/GlobalVariable.h" 27#include "llvm/Instructions.h" 28#include "llvm/IntrinsicInst.h" 29#include "llvm/Module.h" 30#include "llvm/Operator.h" 31#include "llvm/CodeGen/Analysis.h" 32#include "llvm/CodeGen/FastISel.h" 33#include "llvm/CodeGen/FunctionLoweringInfo.h" 34#include "llvm/CodeGen/MachineInstrBuilder.h" 35#include "llvm/CodeGen/MachineModuleInfo.h" 36#include "llvm/CodeGen/MachineConstantPool.h" 37#include "llvm/CodeGen/MachineFrameInfo.h" 38#include "llvm/CodeGen/MachineMemOperand.h" 39#include "llvm/CodeGen/MachineRegisterInfo.h" 40#include "llvm/CodeGen/PseudoSourceValue.h" 41#include "llvm/Support/CallSite.h" 42#include "llvm/Support/CommandLine.h" 43#include "llvm/Support/ErrorHandling.h" 44#include "llvm/Support/GetElementPtrTypeIterator.h" 45#include "llvm/Target/TargetData.h" 46#include "llvm/Target/TargetInstrInfo.h" 47#include "llvm/Target/TargetLowering.h" 48#include "llvm/Target/TargetMachine.h" 49#include "llvm/Target/TargetOptions.h" 50using namespace llvm; 51 52static cl::opt<bool> 53DisableARMFastISel("disable-arm-fast-isel", 54 cl::desc("Turn off experimental ARM fast-isel support"), 55 cl::init(false), cl::Hidden); 56 57extern cl::opt<bool> EnableARMLongCalls; 58 59namespace { 60 61 // All possible address modes, plus some. 62 typedef struct Address { 63 enum { 64 RegBase, 65 FrameIndexBase 66 } BaseType; 67 68 union { 69 unsigned Reg; 70 int FI; 71 } Base; 72 73 int Offset; 74 75 // Innocuous defaults for our address. 76 Address() 77 : BaseType(RegBase), Offset(0) { 78 Base.Reg = 0; 79 } 80 } Address; 81 82class ARMFastISel : public FastISel { 83 84 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can 85 /// make the right decision when generating code for different targets. 86 const ARMSubtarget *Subtarget; 87 const TargetMachine &TM; 88 const TargetInstrInfo &TII; 89 const TargetLowering &TLI; 90 ARMFunctionInfo *AFI; 91 92 // Convenience variables to avoid some queries. 93 bool isThumb; 94 LLVMContext *Context; 95 96 public: 97 explicit ARMFastISel(FunctionLoweringInfo &funcInfo) 98 : FastISel(funcInfo), 99 TM(funcInfo.MF->getTarget()), 100 TII(*TM.getInstrInfo()), 101 TLI(*TM.getTargetLowering()) { 102 Subtarget = &TM.getSubtarget<ARMSubtarget>(); 103 AFI = funcInfo.MF->getInfo<ARMFunctionInfo>(); 104 isThumb = AFI->isThumbFunction(); 105 Context = &funcInfo.Fn->getContext(); 106 } 107 108 // Code from FastISel.cpp. 109 virtual unsigned FastEmitInst_(unsigned MachineInstOpcode, 110 const TargetRegisterClass *RC); 111 virtual unsigned FastEmitInst_r(unsigned MachineInstOpcode, 112 const TargetRegisterClass *RC, 113 unsigned Op0, bool Op0IsKill); 114 virtual unsigned FastEmitInst_rr(unsigned MachineInstOpcode, 115 const TargetRegisterClass *RC, 116 unsigned Op0, bool Op0IsKill, 117 unsigned Op1, bool Op1IsKill); 118 virtual unsigned FastEmitInst_rrr(unsigned MachineInstOpcode, 119 const TargetRegisterClass *RC, 120 unsigned Op0, bool Op0IsKill, 121 unsigned Op1, bool Op1IsKill, 122 unsigned Op2, bool Op2IsKill); 123 virtual unsigned FastEmitInst_ri(unsigned MachineInstOpcode, 124 const TargetRegisterClass *RC, 125 unsigned Op0, bool Op0IsKill, 126 uint64_t Imm); 127 virtual unsigned FastEmitInst_rf(unsigned MachineInstOpcode, 128 const TargetRegisterClass *RC, 129 unsigned Op0, bool Op0IsKill, 130 const ConstantFP *FPImm); 131 virtual unsigned FastEmitInst_rri(unsigned MachineInstOpcode, 132 const TargetRegisterClass *RC, 133 unsigned Op0, bool Op0IsKill, 134 unsigned Op1, bool Op1IsKill, 135 uint64_t Imm); 136 virtual unsigned FastEmitInst_i(unsigned MachineInstOpcode, 137 const TargetRegisterClass *RC, 138 uint64_t Imm); 139 virtual unsigned FastEmitInst_ii(unsigned MachineInstOpcode, 140 const TargetRegisterClass *RC, 141 uint64_t Imm1, uint64_t Imm2); 142 143 virtual unsigned FastEmitInst_extractsubreg(MVT RetVT, 144 unsigned Op0, bool Op0IsKill, 145 uint32_t Idx); 146 147 // Backend specific FastISel code. 148 virtual bool TargetSelectInstruction(const Instruction *I); 149 virtual unsigned TargetMaterializeConstant(const Constant *C); 150 virtual unsigned TargetMaterializeAlloca(const AllocaInst *AI); 151 152 #include "ARMGenFastISel.inc" 153 154 // Instruction selection routines. 155 private: 156 bool SelectLoad(const Instruction *I); 157 bool SelectStore(const Instruction *I); 158 bool SelectBranch(const Instruction *I); 159 bool SelectCmp(const Instruction *I); 160 bool SelectFPExt(const Instruction *I); 161 bool SelectFPTrunc(const Instruction *I); 162 bool SelectBinaryOp(const Instruction *I, unsigned ISDOpcode); 163 bool SelectSIToFP(const Instruction *I); 164 bool SelectFPToSI(const Instruction *I); 165 bool SelectSDiv(const Instruction *I); 166 bool SelectSRem(const Instruction *I); 167 bool SelectCall(const Instruction *I); 168 bool SelectSelect(const Instruction *I); 169 bool SelectRet(const Instruction *I); 170 bool SelectTrunc(const Instruction *I); 171 bool SelectIntExt(const Instruction *I); 172 173 // Utility routines. 174 private: 175 bool isTypeLegal(Type *Ty, MVT &VT); 176 bool isLoadTypeLegal(Type *Ty, MVT &VT); 177 bool ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, 178 bool isZExt); 179 bool ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr); 180 bool ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr); 181 bool ARMComputeAddress(const Value *Obj, Address &Addr); 182 void ARMSimplifyAddress(Address &Addr, EVT VT); 183 unsigned ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT, bool isZExt); 184 unsigned ARMMaterializeFP(const ConstantFP *CFP, EVT VT); 185 unsigned ARMMaterializeInt(const Constant *C, EVT VT); 186 unsigned ARMMaterializeGV(const GlobalValue *GV, EVT VT); 187 unsigned ARMMoveToFPReg(EVT VT, unsigned SrcReg); 188 unsigned ARMMoveToIntReg(EVT VT, unsigned SrcReg); 189 unsigned ARMSelectCallOp(const GlobalValue *GV); 190 191 // Call handling routines. 192 private: 193 bool FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, EVT SrcVT, 194 unsigned &ResultReg); 195 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool Return); 196 bool ProcessCallArgs(SmallVectorImpl<Value*> &Args, 197 SmallVectorImpl<unsigned> &ArgRegs, 198 SmallVectorImpl<MVT> &ArgVTs, 199 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 200 SmallVectorImpl<unsigned> &RegArgs, 201 CallingConv::ID CC, 202 unsigned &NumBytes); 203 bool FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 204 const Instruction *I, CallingConv::ID CC, 205 unsigned &NumBytes); 206 bool ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call); 207 208 // OptionalDef handling routines. 209 private: 210 bool isARMNEONPred(const MachineInstr *MI); 211 bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR); 212 const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB); 213 void AddLoadStoreOperands(EVT VT, Address &Addr, 214 const MachineInstrBuilder &MIB, 215 unsigned Flags); 216}; 217 218} // end anonymous namespace 219 220#include "ARMGenCallingConv.inc" 221 222// DefinesOptionalPredicate - This is different from DefinesPredicate in that 223// we don't care about implicit defs here, just places we'll need to add a 224// default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR. 225bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) { 226 const MCInstrDesc &MCID = MI->getDesc(); 227 if (!MCID.hasOptionalDef()) 228 return false; 229 230 // Look to see if our OptionalDef is defining CPSR or CCR. 231 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 232 const MachineOperand &MO = MI->getOperand(i); 233 if (!MO.isReg() || !MO.isDef()) continue; 234 if (MO.getReg() == ARM::CPSR) 235 *CPSR = true; 236 } 237 return true; 238} 239 240bool ARMFastISel::isARMNEONPred(const MachineInstr *MI) { 241 const MCInstrDesc &MCID = MI->getDesc(); 242 243 // If we're a thumb2 or not NEON function we were handled via isPredicable. 244 if ((MCID.TSFlags & ARMII::DomainMask) != ARMII::DomainNEON || 245 AFI->isThumb2Function()) 246 return false; 247 248 for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) 249 if (MCID.OpInfo[i].isPredicate()) 250 return true; 251 252 return false; 253} 254 255// If the machine is predicable go ahead and add the predicate operands, if 256// it needs default CC operands add those. 257// TODO: If we want to support thumb1 then we'll need to deal with optional 258// CPSR defs that need to be added before the remaining operands. See s_cc_out 259// for descriptions why. 260const MachineInstrBuilder & 261ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) { 262 MachineInstr *MI = &*MIB; 263 264 // Do we use a predicate? or... 265 // Are we NEON in ARM mode and have a predicate operand? If so, I know 266 // we're not predicable but add it anyways. 267 if (TII.isPredicable(MI) || isARMNEONPred(MI)) 268 AddDefaultPred(MIB); 269 270 // Do we optionally set a predicate? Preds is size > 0 iff the predicate 271 // defines CPSR. All other OptionalDefines in ARM are the CCR register. 272 bool CPSR = false; 273 if (DefinesOptionalPredicate(MI, &CPSR)) { 274 if (CPSR) 275 AddDefaultT1CC(MIB); 276 else 277 AddDefaultCC(MIB); 278 } 279 return MIB; 280} 281 282unsigned ARMFastISel::FastEmitInst_(unsigned MachineInstOpcode, 283 const TargetRegisterClass* RC) { 284 unsigned ResultReg = createResultReg(RC); 285 const MCInstrDesc &II = TII.get(MachineInstOpcode); 286 287 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)); 288 return ResultReg; 289} 290 291unsigned ARMFastISel::FastEmitInst_r(unsigned MachineInstOpcode, 292 const TargetRegisterClass *RC, 293 unsigned Op0, bool Op0IsKill) { 294 unsigned ResultReg = createResultReg(RC); 295 const MCInstrDesc &II = TII.get(MachineInstOpcode); 296 297 if (II.getNumDefs() >= 1) 298 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 299 .addReg(Op0, Op0IsKill * RegState::Kill)); 300 else { 301 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 302 .addReg(Op0, Op0IsKill * RegState::Kill)); 303 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 304 TII.get(TargetOpcode::COPY), ResultReg) 305 .addReg(II.ImplicitDefs[0])); 306 } 307 return ResultReg; 308} 309 310unsigned ARMFastISel::FastEmitInst_rr(unsigned MachineInstOpcode, 311 const TargetRegisterClass *RC, 312 unsigned Op0, bool Op0IsKill, 313 unsigned Op1, bool Op1IsKill) { 314 unsigned ResultReg = createResultReg(RC); 315 const MCInstrDesc &II = TII.get(MachineInstOpcode); 316 317 if (II.getNumDefs() >= 1) 318 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 319 .addReg(Op0, Op0IsKill * RegState::Kill) 320 .addReg(Op1, Op1IsKill * RegState::Kill)); 321 else { 322 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 323 .addReg(Op0, Op0IsKill * RegState::Kill) 324 .addReg(Op1, Op1IsKill * RegState::Kill)); 325 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 326 TII.get(TargetOpcode::COPY), ResultReg) 327 .addReg(II.ImplicitDefs[0])); 328 } 329 return ResultReg; 330} 331 332unsigned ARMFastISel::FastEmitInst_rrr(unsigned MachineInstOpcode, 333 const TargetRegisterClass *RC, 334 unsigned Op0, bool Op0IsKill, 335 unsigned Op1, bool Op1IsKill, 336 unsigned Op2, bool Op2IsKill) { 337 unsigned ResultReg = createResultReg(RC); 338 const MCInstrDesc &II = TII.get(MachineInstOpcode); 339 340 if (II.getNumDefs() >= 1) 341 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 342 .addReg(Op0, Op0IsKill * RegState::Kill) 343 .addReg(Op1, Op1IsKill * RegState::Kill) 344 .addReg(Op2, Op2IsKill * RegState::Kill)); 345 else { 346 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 347 .addReg(Op0, Op0IsKill * RegState::Kill) 348 .addReg(Op1, Op1IsKill * RegState::Kill) 349 .addReg(Op2, Op2IsKill * RegState::Kill)); 350 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 351 TII.get(TargetOpcode::COPY), ResultReg) 352 .addReg(II.ImplicitDefs[0])); 353 } 354 return ResultReg; 355} 356 357unsigned ARMFastISel::FastEmitInst_ri(unsigned MachineInstOpcode, 358 const TargetRegisterClass *RC, 359 unsigned Op0, bool Op0IsKill, 360 uint64_t Imm) { 361 unsigned ResultReg = createResultReg(RC); 362 const MCInstrDesc &II = TII.get(MachineInstOpcode); 363 364 if (II.getNumDefs() >= 1) 365 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 366 .addReg(Op0, Op0IsKill * RegState::Kill) 367 .addImm(Imm)); 368 else { 369 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 370 .addReg(Op0, Op0IsKill * RegState::Kill) 371 .addImm(Imm)); 372 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 373 TII.get(TargetOpcode::COPY), ResultReg) 374 .addReg(II.ImplicitDefs[0])); 375 } 376 return ResultReg; 377} 378 379unsigned ARMFastISel::FastEmitInst_rf(unsigned MachineInstOpcode, 380 const TargetRegisterClass *RC, 381 unsigned Op0, bool Op0IsKill, 382 const ConstantFP *FPImm) { 383 unsigned ResultReg = createResultReg(RC); 384 const MCInstrDesc &II = TII.get(MachineInstOpcode); 385 386 if (II.getNumDefs() >= 1) 387 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 388 .addReg(Op0, Op0IsKill * RegState::Kill) 389 .addFPImm(FPImm)); 390 else { 391 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 392 .addReg(Op0, Op0IsKill * RegState::Kill) 393 .addFPImm(FPImm)); 394 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 395 TII.get(TargetOpcode::COPY), ResultReg) 396 .addReg(II.ImplicitDefs[0])); 397 } 398 return ResultReg; 399} 400 401unsigned ARMFastISel::FastEmitInst_rri(unsigned MachineInstOpcode, 402 const TargetRegisterClass *RC, 403 unsigned Op0, bool Op0IsKill, 404 unsigned Op1, bool Op1IsKill, 405 uint64_t Imm) { 406 unsigned ResultReg = createResultReg(RC); 407 const MCInstrDesc &II = TII.get(MachineInstOpcode); 408 409 if (II.getNumDefs() >= 1) 410 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 411 .addReg(Op0, Op0IsKill * RegState::Kill) 412 .addReg(Op1, Op1IsKill * RegState::Kill) 413 .addImm(Imm)); 414 else { 415 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 416 .addReg(Op0, Op0IsKill * RegState::Kill) 417 .addReg(Op1, Op1IsKill * RegState::Kill) 418 .addImm(Imm)); 419 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 420 TII.get(TargetOpcode::COPY), ResultReg) 421 .addReg(II.ImplicitDefs[0])); 422 } 423 return ResultReg; 424} 425 426unsigned ARMFastISel::FastEmitInst_i(unsigned MachineInstOpcode, 427 const TargetRegisterClass *RC, 428 uint64_t Imm) { 429 unsigned ResultReg = createResultReg(RC); 430 const MCInstrDesc &II = TII.get(MachineInstOpcode); 431 432 if (II.getNumDefs() >= 1) 433 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 434 .addImm(Imm)); 435 else { 436 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 437 .addImm(Imm)); 438 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 439 TII.get(TargetOpcode::COPY), ResultReg) 440 .addReg(II.ImplicitDefs[0])); 441 } 442 return ResultReg; 443} 444 445unsigned ARMFastISel::FastEmitInst_ii(unsigned MachineInstOpcode, 446 const TargetRegisterClass *RC, 447 uint64_t Imm1, uint64_t Imm2) { 448 unsigned ResultReg = createResultReg(RC); 449 const MCInstrDesc &II = TII.get(MachineInstOpcode); 450 451 if (II.getNumDefs() >= 1) 452 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 453 .addImm(Imm1).addImm(Imm2)); 454 else { 455 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 456 .addImm(Imm1).addImm(Imm2)); 457 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 458 TII.get(TargetOpcode::COPY), 459 ResultReg) 460 .addReg(II.ImplicitDefs[0])); 461 } 462 return ResultReg; 463} 464 465unsigned ARMFastISel::FastEmitInst_extractsubreg(MVT RetVT, 466 unsigned Op0, bool Op0IsKill, 467 uint32_t Idx) { 468 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT)); 469 assert(TargetRegisterInfo::isVirtualRegister(Op0) && 470 "Cannot yet extract from physregs"); 471 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 472 DL, TII.get(TargetOpcode::COPY), ResultReg) 473 .addReg(Op0, getKillRegState(Op0IsKill), Idx)); 474 return ResultReg; 475} 476 477// TODO: Don't worry about 64-bit now, but when this is fixed remove the 478// checks from the various callers. 479unsigned ARMFastISel::ARMMoveToFPReg(EVT VT, unsigned SrcReg) { 480 if (VT == MVT::f64) return 0; 481 482 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 483 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 484 TII.get(ARM::VMOVRS), MoveReg) 485 .addReg(SrcReg)); 486 return MoveReg; 487} 488 489unsigned ARMFastISel::ARMMoveToIntReg(EVT VT, unsigned SrcReg) { 490 if (VT == MVT::i64) return 0; 491 492 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 493 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 494 TII.get(ARM::VMOVSR), MoveReg) 495 .addReg(SrcReg)); 496 return MoveReg; 497} 498 499// For double width floating point we need to materialize two constants 500// (the high and the low) into integer registers then use a move to get 501// the combined constant into an FP reg. 502unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, EVT VT) { 503 const APFloat Val = CFP->getValueAPF(); 504 bool is64bit = VT == MVT::f64; 505 506 // This checks to see if we can use VFP3 instructions to materialize 507 // a constant, otherwise we have to go through the constant pool. 508 if (TLI.isFPImmLegal(Val, VT)) { 509 int Imm; 510 unsigned Opc; 511 if (is64bit) { 512 Imm = ARM_AM::getFP64Imm(Val); 513 Opc = ARM::FCONSTD; 514 } else { 515 Imm = ARM_AM::getFP32Imm(Val); 516 Opc = ARM::FCONSTS; 517 } 518 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 519 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 520 DestReg) 521 .addImm(Imm)); 522 return DestReg; 523 } 524 525 // Require VFP2 for loading fp constants. 526 if (!Subtarget->hasVFP2()) return false; 527 528 // MachineConstantPool wants an explicit alignment. 529 unsigned Align = TD.getPrefTypeAlignment(CFP->getType()); 530 if (Align == 0) { 531 // TODO: Figure out if this is correct. 532 Align = TD.getTypeAllocSize(CFP->getType()); 533 } 534 unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align); 535 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 536 unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS; 537 538 // The extra reg is for addrmode5. 539 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 540 DestReg) 541 .addConstantPoolIndex(Idx) 542 .addReg(0)); 543 return DestReg; 544} 545 546unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, EVT VT) { 547 548 // For now 32-bit only. 549 if (VT != MVT::i32) return false; 550 551 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 552 553 // If we can do this in a single instruction without a constant pool entry 554 // do so now. 555 const ConstantInt *CI = cast<ConstantInt>(C); 556 if (Subtarget->hasV6T2Ops() && isUInt<16>(CI->getSExtValue())) { 557 unsigned Opc = isThumb ? ARM::t2MOVi16 : ARM::MOVi16; 558 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 559 TII.get(Opc), DestReg) 560 .addImm(CI->getSExtValue())); 561 return DestReg; 562 } 563 564 // MachineConstantPool wants an explicit alignment. 565 unsigned Align = TD.getPrefTypeAlignment(C->getType()); 566 if (Align == 0) { 567 // TODO: Figure out if this is correct. 568 Align = TD.getTypeAllocSize(C->getType()); 569 } 570 unsigned Idx = MCP.getConstantPoolIndex(C, Align); 571 572 if (isThumb) 573 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 574 TII.get(ARM::t2LDRpci), DestReg) 575 .addConstantPoolIndex(Idx)); 576 else 577 // The extra immediate is for addrmode2. 578 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 579 TII.get(ARM::LDRcp), DestReg) 580 .addConstantPoolIndex(Idx) 581 .addImm(0)); 582 583 return DestReg; 584} 585 586unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, EVT VT) { 587 // For now 32-bit only. 588 if (VT != MVT::i32) return 0; 589 590 Reloc::Model RelocM = TM.getRelocationModel(); 591 592 // TODO: Need more magic for ARM PIC. 593 if (!isThumb && (RelocM == Reloc::PIC_)) return 0; 594 595 // MachineConstantPool wants an explicit alignment. 596 unsigned Align = TD.getPrefTypeAlignment(GV->getType()); 597 if (Align == 0) { 598 // TODO: Figure out if this is correct. 599 Align = TD.getTypeAllocSize(GV->getType()); 600 } 601 602 // Grab index. 603 unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : (Subtarget->isThumb() ? 4 : 8); 604 unsigned Id = AFI->createPICLabelUId(); 605 ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create(GV, Id, 606 ARMCP::CPValue, 607 PCAdj); 608 unsigned Idx = MCP.getConstantPoolIndex(CPV, Align); 609 610 // Load value. 611 MachineInstrBuilder MIB; 612 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 613 if (isThumb) { 614 unsigned Opc = (RelocM != Reloc::PIC_) ? ARM::t2LDRpci : ARM::t2LDRpci_pic; 615 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg) 616 .addConstantPoolIndex(Idx); 617 if (RelocM == Reloc::PIC_) 618 MIB.addImm(Id); 619 } else { 620 // The extra immediate is for addrmode2. 621 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRcp), 622 DestReg) 623 .addConstantPoolIndex(Idx) 624 .addImm(0); 625 } 626 AddOptionalDefs(MIB); 627 628 if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) { 629 unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT)); 630 if (isThumb) 631 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 632 TII.get(ARM::t2LDRi12), NewDestReg) 633 .addReg(DestReg) 634 .addImm(0); 635 else 636 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRi12), 637 NewDestReg) 638 .addReg(DestReg) 639 .addImm(0); 640 DestReg = NewDestReg; 641 AddOptionalDefs(MIB); 642 } 643 644 return DestReg; 645} 646 647unsigned ARMFastISel::TargetMaterializeConstant(const Constant *C) { 648 EVT VT = TLI.getValueType(C->getType(), true); 649 650 // Only handle simple types. 651 if (!VT.isSimple()) return 0; 652 653 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) 654 return ARMMaterializeFP(CFP, VT); 655 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C)) 656 return ARMMaterializeGV(GV, VT); 657 else if (isa<ConstantInt>(C)) 658 return ARMMaterializeInt(C, VT); 659 660 return 0; 661} 662 663unsigned ARMFastISel::TargetMaterializeAlloca(const AllocaInst *AI) { 664 // Don't handle dynamic allocas. 665 if (!FuncInfo.StaticAllocaMap.count(AI)) return 0; 666 667 MVT VT; 668 if (!isLoadTypeLegal(AI->getType(), VT)) return false; 669 670 DenseMap<const AllocaInst*, int>::iterator SI = 671 FuncInfo.StaticAllocaMap.find(AI); 672 673 // This will get lowered later into the correct offsets and registers 674 // via rewriteXFrameIndex. 675 if (SI != FuncInfo.StaticAllocaMap.end()) { 676 TargetRegisterClass* RC = TLI.getRegClassFor(VT); 677 unsigned ResultReg = createResultReg(RC); 678 unsigned Opc = isThumb ? ARM::t2ADDri : ARM::ADDri; 679 AddOptionalDefs(BuildMI(*FuncInfo.MBB, *FuncInfo.InsertPt, DL, 680 TII.get(Opc), ResultReg) 681 .addFrameIndex(SI->second) 682 .addImm(0)); 683 return ResultReg; 684 } 685 686 return 0; 687} 688 689bool ARMFastISel::isTypeLegal(Type *Ty, MVT &VT) { 690 EVT evt = TLI.getValueType(Ty, true); 691 692 // Only handle simple types. 693 if (evt == MVT::Other || !evt.isSimple()) return false; 694 VT = evt.getSimpleVT(); 695 696 // Handle all legal types, i.e. a register that will directly hold this 697 // value. 698 return TLI.isTypeLegal(VT); 699} 700 701bool ARMFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) { 702 if (isTypeLegal(Ty, VT)) return true; 703 704 // If this is a type than can be sign or zero-extended to a basic operation 705 // go ahead and accept it now. 706 if (VT == MVT::i8 || VT == MVT::i16) 707 return true; 708 709 return false; 710} 711 712// Computes the address to get to an object. 713bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) { 714 // Some boilerplate from the X86 FastISel. 715 const User *U = NULL; 716 unsigned Opcode = Instruction::UserOp1; 717 if (const Instruction *I = dyn_cast<Instruction>(Obj)) { 718 // Don't walk into other basic blocks unless the object is an alloca from 719 // another block, otherwise it may not have a virtual register assigned. 720 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) || 721 FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) { 722 Opcode = I->getOpcode(); 723 U = I; 724 } 725 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) { 726 Opcode = C->getOpcode(); 727 U = C; 728 } 729 730 if (PointerType *Ty = dyn_cast<PointerType>(Obj->getType())) 731 if (Ty->getAddressSpace() > 255) 732 // Fast instruction selection doesn't support the special 733 // address spaces. 734 return false; 735 736 switch (Opcode) { 737 default: 738 break; 739 case Instruction::BitCast: { 740 // Look through bitcasts. 741 return ARMComputeAddress(U->getOperand(0), Addr); 742 } 743 case Instruction::IntToPtr: { 744 // Look past no-op inttoptrs. 745 if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy()) 746 return ARMComputeAddress(U->getOperand(0), Addr); 747 break; 748 } 749 case Instruction::PtrToInt: { 750 // Look past no-op ptrtoints. 751 if (TLI.getValueType(U->getType()) == TLI.getPointerTy()) 752 return ARMComputeAddress(U->getOperand(0), Addr); 753 break; 754 } 755 case Instruction::GetElementPtr: { 756 Address SavedAddr = Addr; 757 int TmpOffset = Addr.Offset; 758 759 // Iterate through the GEP folding the constants into offsets where 760 // we can. 761 gep_type_iterator GTI = gep_type_begin(U); 762 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); 763 i != e; ++i, ++GTI) { 764 const Value *Op = *i; 765 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 766 const StructLayout *SL = TD.getStructLayout(STy); 767 unsigned Idx = cast<ConstantInt>(Op)->getZExtValue(); 768 TmpOffset += SL->getElementOffset(Idx); 769 } else { 770 uint64_t S = TD.getTypeAllocSize(GTI.getIndexedType()); 771 for (;;) { 772 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) { 773 // Constant-offset addressing. 774 TmpOffset += CI->getSExtValue() * S; 775 break; 776 } 777 if (isa<AddOperator>(Op) && 778 (!isa<Instruction>(Op) || 779 FuncInfo.MBBMap[cast<Instruction>(Op)->getParent()] 780 == FuncInfo.MBB) && 781 isa<ConstantInt>(cast<AddOperator>(Op)->getOperand(1))) { 782 // An add (in the same block) with a constant operand. Fold the 783 // constant. 784 ConstantInt *CI = 785 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1)); 786 TmpOffset += CI->getSExtValue() * S; 787 // Iterate on the other operand. 788 Op = cast<AddOperator>(Op)->getOperand(0); 789 continue; 790 } 791 // Unsupported 792 goto unsupported_gep; 793 } 794 } 795 } 796 797 // Try to grab the base operand now. 798 Addr.Offset = TmpOffset; 799 if (ARMComputeAddress(U->getOperand(0), Addr)) return true; 800 801 // We failed, restore everything and try the other options. 802 Addr = SavedAddr; 803 804 unsupported_gep: 805 break; 806 } 807 case Instruction::Alloca: { 808 const AllocaInst *AI = cast<AllocaInst>(Obj); 809 DenseMap<const AllocaInst*, int>::iterator SI = 810 FuncInfo.StaticAllocaMap.find(AI); 811 if (SI != FuncInfo.StaticAllocaMap.end()) { 812 Addr.BaseType = Address::FrameIndexBase; 813 Addr.Base.FI = SI->second; 814 return true; 815 } 816 break; 817 } 818 } 819 820 // Materialize the global variable's address into a reg which can 821 // then be used later to load the variable. 822 if (const GlobalValue *GV = dyn_cast<GlobalValue>(Obj)) { 823 unsigned Tmp = ARMMaterializeGV(GV, TLI.getValueType(Obj->getType())); 824 if (Tmp == 0) return false; 825 826 Addr.Base.Reg = Tmp; 827 return true; 828 } 829 830 // Try to get this in a register if nothing else has worked. 831 if (Addr.Base.Reg == 0) Addr.Base.Reg = getRegForValue(Obj); 832 return Addr.Base.Reg != 0; 833} 834 835void ARMFastISel::ARMSimplifyAddress(Address &Addr, EVT VT) { 836 837 assert(VT.isSimple() && "Non-simple types are invalid here!"); 838 839 bool needsLowering = false; 840 switch (VT.getSimpleVT().SimpleTy) { 841 default: 842 assert(false && "Unhandled load/store type!"); 843 case MVT::i1: 844 case MVT::i8: 845 case MVT::i16: 846 case MVT::i32: 847 // Integer loads/stores handle 12-bit offsets. 848 needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset); 849 break; 850 case MVT::f32: 851 case MVT::f64: 852 // Floating point operands handle 8-bit offsets. 853 needsLowering = ((Addr.Offset & 0xff) != Addr.Offset); 854 break; 855 } 856 857 // If this is a stack pointer and the offset needs to be simplified then 858 // put the alloca address into a register, set the base type back to 859 // register and continue. This should almost never happen. 860 if (needsLowering && Addr.BaseType == Address::FrameIndexBase) { 861 TargetRegisterClass *RC = isThumb ? ARM::tGPRRegisterClass : 862 ARM::GPRRegisterClass; 863 unsigned ResultReg = createResultReg(RC); 864 unsigned Opc = isThumb ? ARM::t2ADDri : ARM::ADDri; 865 AddOptionalDefs(BuildMI(*FuncInfo.MBB, *FuncInfo.InsertPt, DL, 866 TII.get(Opc), ResultReg) 867 .addFrameIndex(Addr.Base.FI) 868 .addImm(0)); 869 Addr.Base.Reg = ResultReg; 870 Addr.BaseType = Address::RegBase; 871 } 872 873 // Since the offset is too large for the load/store instruction 874 // get the reg+offset into a register. 875 if (needsLowering) { 876 Addr.Base.Reg = FastEmit_ri_(MVT::i32, ISD::ADD, Addr.Base.Reg, 877 /*Op0IsKill*/false, Addr.Offset, MVT::i32); 878 Addr.Offset = 0; 879 } 880} 881 882void ARMFastISel::AddLoadStoreOperands(EVT VT, Address &Addr, 883 const MachineInstrBuilder &MIB, 884 unsigned Flags) { 885 // addrmode5 output depends on the selection dag addressing dividing the 886 // offset by 4 that it then later multiplies. Do this here as well. 887 if (VT.getSimpleVT().SimpleTy == MVT::f32 || 888 VT.getSimpleVT().SimpleTy == MVT::f64) 889 Addr.Offset /= 4; 890 891 // Frame base works a bit differently. Handle it separately. 892 if (Addr.BaseType == Address::FrameIndexBase) { 893 int FI = Addr.Base.FI; 894 int Offset = Addr.Offset; 895 MachineMemOperand *MMO = 896 FuncInfo.MF->getMachineMemOperand( 897 MachinePointerInfo::getFixedStack(FI, Offset), 898 Flags, 899 MFI.getObjectSize(FI), 900 MFI.getObjectAlignment(FI)); 901 // Now add the rest of the operands. 902 MIB.addFrameIndex(FI); 903 904 // ARM halfword load/stores need an additional operand. 905 if (!isThumb && VT.getSimpleVT().SimpleTy == MVT::i16) MIB.addReg(0); 906 907 MIB.addImm(Addr.Offset); 908 MIB.addMemOperand(MMO); 909 } else { 910 // Now add the rest of the operands. 911 MIB.addReg(Addr.Base.Reg); 912 913 // ARM halfword load/stores need an additional operand. 914 if (!isThumb && VT.getSimpleVT().SimpleTy == MVT::i16) MIB.addReg(0); 915 916 MIB.addImm(Addr.Offset); 917 } 918 AddOptionalDefs(MIB); 919} 920 921bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr) { 922 923 assert(VT.isSimple() && "Non-simple types are invalid here!"); 924 unsigned Opc; 925 TargetRegisterClass *RC; 926 switch (VT.getSimpleVT().SimpleTy) { 927 // This is mostly going to be Neon/vector support. 928 default: return false; 929 case MVT::i16: 930 Opc = isThumb ? ARM::t2LDRHi12 : ARM::LDRH; 931 RC = ARM::GPRRegisterClass; 932 break; 933 case MVT::i8: 934 Opc = isThumb ? ARM::t2LDRBi12 : ARM::LDRBi12; 935 RC = ARM::GPRRegisterClass; 936 break; 937 case MVT::i32: 938 Opc = isThumb ? ARM::t2LDRi12 : ARM::LDRi12; 939 RC = ARM::GPRRegisterClass; 940 break; 941 case MVT::f32: 942 Opc = ARM::VLDRS; 943 RC = TLI.getRegClassFor(VT); 944 break; 945 case MVT::f64: 946 Opc = ARM::VLDRD; 947 RC = TLI.getRegClassFor(VT); 948 break; 949 } 950 // Simplify this down to something we can handle. 951 ARMSimplifyAddress(Addr, VT); 952 953 // Create the base instruction, then add the operands. 954 ResultReg = createResultReg(RC); 955 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 956 TII.get(Opc), ResultReg); 957 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOLoad); 958 return true; 959} 960 961bool ARMFastISel::SelectLoad(const Instruction *I) { 962 // Atomic loads need special handling. 963 if (cast<LoadInst>(I)->isAtomic()) 964 return false; 965 966 // Verify we have a legal type before going any further. 967 MVT VT; 968 if (!isLoadTypeLegal(I->getType(), VT)) 969 return false; 970 971 // See if we can handle this address. 972 Address Addr; 973 if (!ARMComputeAddress(I->getOperand(0), Addr)) return false; 974 975 unsigned ResultReg; 976 if (!ARMEmitLoad(VT, ResultReg, Addr)) return false; 977 UpdateValueMap(I, ResultReg); 978 return true; 979} 980 981bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr) { 982 unsigned StrOpc; 983 switch (VT.getSimpleVT().SimpleTy) { 984 // This is mostly going to be Neon/vector support. 985 default: return false; 986 case MVT::i1: { 987 unsigned Res = createResultReg(isThumb ? ARM::tGPRRegisterClass : 988 ARM::GPRRegisterClass); 989 unsigned Opc = isThumb ? ARM::t2ANDri : ARM::ANDri; 990 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 991 TII.get(Opc), Res) 992 .addReg(SrcReg).addImm(1)); 993 SrcReg = Res; 994 } // Fallthrough here. 995 case MVT::i8: 996 StrOpc = isThumb ? ARM::t2STRBi12 : ARM::STRBi12; 997 break; 998 case MVT::i16: 999 StrOpc = isThumb ? ARM::t2STRHi12 : ARM::STRH; 1000 break; 1001 case MVT::i32: 1002 StrOpc = isThumb ? ARM::t2STRi12 : ARM::STRi12; 1003 break; 1004 case MVT::f32: 1005 if (!Subtarget->hasVFP2()) return false; 1006 StrOpc = ARM::VSTRS; 1007 break; 1008 case MVT::f64: 1009 if (!Subtarget->hasVFP2()) return false; 1010 StrOpc = ARM::VSTRD; 1011 break; 1012 } 1013 // Simplify this down to something we can handle. 1014 ARMSimplifyAddress(Addr, VT); 1015 1016 // Create the base instruction, then add the operands. 1017 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1018 TII.get(StrOpc)) 1019 .addReg(SrcReg, getKillRegState(true)); 1020 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOStore); 1021 return true; 1022} 1023 1024bool ARMFastISel::SelectStore(const Instruction *I) { 1025 Value *Op0 = I->getOperand(0); 1026 unsigned SrcReg = 0; 1027 1028 // Atomic stores need special handling. 1029 if (cast<StoreInst>(I)->isAtomic()) 1030 return false; 1031 1032 // Verify we have a legal type before going any further. 1033 MVT VT; 1034 if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT)) 1035 return false; 1036 1037 // Get the value to be stored into a register. 1038 SrcReg = getRegForValue(Op0); 1039 if (SrcReg == 0) return false; 1040 1041 // See if we can handle this address. 1042 Address Addr; 1043 if (!ARMComputeAddress(I->getOperand(1), Addr)) 1044 return false; 1045 1046 if (!ARMEmitStore(VT, SrcReg, Addr)) return false; 1047 return true; 1048} 1049 1050static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred) { 1051 switch (Pred) { 1052 // Needs two compares... 1053 case CmpInst::FCMP_ONE: 1054 case CmpInst::FCMP_UEQ: 1055 default: 1056 // AL is our "false" for now. The other two need more compares. 1057 return ARMCC::AL; 1058 case CmpInst::ICMP_EQ: 1059 case CmpInst::FCMP_OEQ: 1060 return ARMCC::EQ; 1061 case CmpInst::ICMP_SGT: 1062 case CmpInst::FCMP_OGT: 1063 return ARMCC::GT; 1064 case CmpInst::ICMP_SGE: 1065 case CmpInst::FCMP_OGE: 1066 return ARMCC::GE; 1067 case CmpInst::ICMP_UGT: 1068 case CmpInst::FCMP_UGT: 1069 return ARMCC::HI; 1070 case CmpInst::FCMP_OLT: 1071 return ARMCC::MI; 1072 case CmpInst::ICMP_ULE: 1073 case CmpInst::FCMP_OLE: 1074 return ARMCC::LS; 1075 case CmpInst::FCMP_ORD: 1076 return ARMCC::VC; 1077 case CmpInst::FCMP_UNO: 1078 return ARMCC::VS; 1079 case CmpInst::FCMP_UGE: 1080 return ARMCC::PL; 1081 case CmpInst::ICMP_SLT: 1082 case CmpInst::FCMP_ULT: 1083 return ARMCC::LT; 1084 case CmpInst::ICMP_SLE: 1085 case CmpInst::FCMP_ULE: 1086 return ARMCC::LE; 1087 case CmpInst::FCMP_UNE: 1088 case CmpInst::ICMP_NE: 1089 return ARMCC::NE; 1090 case CmpInst::ICMP_UGE: 1091 return ARMCC::HS; 1092 case CmpInst::ICMP_ULT: 1093 return ARMCC::LO; 1094 } 1095} 1096 1097bool ARMFastISel::SelectBranch(const Instruction *I) { 1098 const BranchInst *BI = cast<BranchInst>(I); 1099 MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)]; 1100 MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)]; 1101 1102 // Simple branch support. 1103 1104 // If we can, avoid recomputing the compare - redoing it could lead to wonky 1105 // behavior. 1106 if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) { 1107 if (CI->hasOneUse() && (CI->getParent() == I->getParent())) { 1108 1109 // Get the compare predicate. 1110 // Try to take advantage of fallthrough opportunities. 1111 CmpInst::Predicate Predicate = CI->getPredicate(); 1112 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1113 std::swap(TBB, FBB); 1114 Predicate = CmpInst::getInversePredicate(Predicate); 1115 } 1116 1117 ARMCC::CondCodes ARMPred = getComparePred(Predicate); 1118 1119 // We may not handle every CC for now. 1120 if (ARMPred == ARMCC::AL) return false; 1121 1122 // Emit the compare. 1123 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) 1124 return false; 1125 1126 unsigned BrOpc = isThumb ? ARM::t2Bcc : ARM::Bcc; 1127 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1128 .addMBB(TBB).addImm(ARMPred).addReg(ARM::CPSR); 1129 FastEmitBranch(FBB, DL); 1130 FuncInfo.MBB->addSuccessor(TBB); 1131 return true; 1132 } 1133 } else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) { 1134 MVT SourceVT; 1135 if (TI->hasOneUse() && TI->getParent() == I->getParent() && 1136 (isLoadTypeLegal(TI->getOperand(0)->getType(), SourceVT))) { 1137 unsigned TstOpc = isThumb ? ARM::t2TSTri : ARM::TSTri; 1138 unsigned OpReg = getRegForValue(TI->getOperand(0)); 1139 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1140 TII.get(TstOpc)) 1141 .addReg(OpReg).addImm(1)); 1142 1143 unsigned CCMode = ARMCC::NE; 1144 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1145 std::swap(TBB, FBB); 1146 CCMode = ARMCC::EQ; 1147 } 1148 1149 unsigned BrOpc = isThumb ? ARM::t2Bcc : ARM::Bcc; 1150 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1151 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); 1152 1153 FastEmitBranch(FBB, DL); 1154 FuncInfo.MBB->addSuccessor(TBB); 1155 return true; 1156 } 1157 } else if (const ConstantInt *CI = 1158 dyn_cast<ConstantInt>(BI->getCondition())) { 1159 uint64_t Imm = CI->getZExtValue(); 1160 MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB; 1161 FastEmitBranch(Target, DL); 1162 return true; 1163 } 1164 1165 unsigned CmpReg = getRegForValue(BI->getCondition()); 1166 if (CmpReg == 0) return false; 1167 1168 // We've been divorced from our compare! Our block was split, and 1169 // now our compare lives in a predecessor block. We musn't 1170 // re-compare here, as the children of the compare aren't guaranteed 1171 // live across the block boundary (we *could* check for this). 1172 // Regardless, the compare has been done in the predecessor block, 1173 // and it left a value for us in a virtual register. Ergo, we test 1174 // the one-bit value left in the virtual register. 1175 unsigned TstOpc = isThumb ? ARM::t2TSTri : ARM::TSTri; 1176 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TstOpc)) 1177 .addReg(CmpReg).addImm(1)); 1178 1179 unsigned CCMode = ARMCC::NE; 1180 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1181 std::swap(TBB, FBB); 1182 CCMode = ARMCC::EQ; 1183 } 1184 1185 unsigned BrOpc = isThumb ? ARM::t2Bcc : ARM::Bcc; 1186 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1187 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); 1188 FastEmitBranch(FBB, DL); 1189 FuncInfo.MBB->addSuccessor(TBB); 1190 return true; 1191} 1192 1193bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, 1194 bool isZExt) { 1195 Type *Ty = Src1Value->getType(); 1196 EVT SrcVT = TLI.getValueType(Ty, true); 1197 if (!SrcVT.isSimple()) return false; 1198 1199 bool isFloat = (Ty->isFloatTy() || Ty->isDoubleTy()); 1200 if (isFloat && !Subtarget->hasVFP2()) 1201 return false; 1202 1203 unsigned CmpOpc; 1204 bool needsExt = false; 1205 switch (SrcVT.getSimpleVT().SimpleTy) { 1206 default: return false; 1207 // TODO: Verify compares. 1208 case MVT::f32: 1209 CmpOpc = ARM::VCMPES; 1210 break; 1211 case MVT::f64: 1212 CmpOpc = ARM::VCMPED; 1213 break; 1214 case MVT::i1: 1215 case MVT::i8: 1216 case MVT::i16: 1217 needsExt = true; 1218 // Intentional fall-through. 1219 case MVT::i32: 1220 CmpOpc = isThumb ? ARM::t2CMPrr : ARM::CMPrr; 1221 break; 1222 } 1223 1224 unsigned SrcReg1 = getRegForValue(Src1Value); 1225 if (SrcReg1 == 0) return false; 1226 1227 unsigned SrcReg2 = getRegForValue(Src2Value); 1228 if (SrcReg2 == 0) return false; 1229 1230 // We have i1, i8, or i16, we need to either zero extend or sign extend. 1231 if (needsExt) { 1232 unsigned ResultReg; 1233 EVT DestVT = MVT::i32; 1234 ResultReg = ARMEmitIntExt(SrcVT, SrcReg1, DestVT, isZExt); 1235 if (ResultReg == 0) return false; 1236 SrcReg1 = ResultReg; 1237 ResultReg = ARMEmitIntExt(SrcVT, SrcReg2, DestVT, isZExt); 1238 if (ResultReg == 0) return false; 1239 SrcReg2 = ResultReg; 1240 } 1241 1242 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1243 .addReg(SrcReg1).addReg(SrcReg2)); 1244 1245 // For floating point we need to move the result to a comparison register 1246 // that we can then use for branches. 1247 if (Ty->isFloatTy() || Ty->isDoubleTy()) 1248 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1249 TII.get(ARM::FMSTAT))); 1250 return true; 1251} 1252 1253bool ARMFastISel::SelectCmp(const Instruction *I) { 1254 const CmpInst *CI = cast<CmpInst>(I); 1255 Type *Ty = CI->getOperand(0)->getType(); 1256 1257 // Get the compare predicate. 1258 ARMCC::CondCodes ARMPred = getComparePred(CI->getPredicate()); 1259 1260 // We may not handle every CC for now. 1261 if (ARMPred == ARMCC::AL) return false; 1262 1263 // Emit the compare. 1264 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) 1265 return false; 1266 1267 // Now set a register based on the comparison. Explicitly set the predicates 1268 // here. 1269 unsigned MovCCOpc = isThumb ? ARM::t2MOVCCi : ARM::MOVCCi; 1270 TargetRegisterClass *RC = isThumb ? ARM::rGPRRegisterClass 1271 : ARM::GPRRegisterClass; 1272 unsigned DestReg = createResultReg(RC); 1273 Constant *Zero = ConstantInt::get(Type::getInt32Ty(*Context), 0); 1274 unsigned ZeroReg = TargetMaterializeConstant(Zero); 1275 bool isFloat = (Ty->isFloatTy() || Ty->isDoubleTy()); 1276 unsigned CondReg = isFloat ? ARM::FPSCR : ARM::CPSR; 1277 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), DestReg) 1278 .addReg(ZeroReg).addImm(1) 1279 .addImm(ARMPred).addReg(CondReg); 1280 1281 UpdateValueMap(I, DestReg); 1282 return true; 1283} 1284 1285bool ARMFastISel::SelectFPExt(const Instruction *I) { 1286 // Make sure we have VFP and that we're extending float to double. 1287 if (!Subtarget->hasVFP2()) return false; 1288 1289 Value *V = I->getOperand(0); 1290 if (!I->getType()->isDoubleTy() || 1291 !V->getType()->isFloatTy()) return false; 1292 1293 unsigned Op = getRegForValue(V); 1294 if (Op == 0) return false; 1295 1296 unsigned Result = createResultReg(ARM::DPRRegisterClass); 1297 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1298 TII.get(ARM::VCVTDS), Result) 1299 .addReg(Op)); 1300 UpdateValueMap(I, Result); 1301 return true; 1302} 1303 1304bool ARMFastISel::SelectFPTrunc(const Instruction *I) { 1305 // Make sure we have VFP and that we're truncating double to float. 1306 if (!Subtarget->hasVFP2()) return false; 1307 1308 Value *V = I->getOperand(0); 1309 if (!(I->getType()->isFloatTy() && 1310 V->getType()->isDoubleTy())) return false; 1311 1312 unsigned Op = getRegForValue(V); 1313 if (Op == 0) return false; 1314 1315 unsigned Result = createResultReg(ARM::SPRRegisterClass); 1316 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1317 TII.get(ARM::VCVTSD), Result) 1318 .addReg(Op)); 1319 UpdateValueMap(I, Result); 1320 return true; 1321} 1322 1323bool ARMFastISel::SelectSIToFP(const Instruction *I) { 1324 // Make sure we have VFP. 1325 if (!Subtarget->hasVFP2()) return false; 1326 1327 MVT DstVT; 1328 Type *Ty = I->getType(); 1329 if (!isTypeLegal(Ty, DstVT)) 1330 return false; 1331 1332 Value *Src = I->getOperand(0); 1333 EVT SrcVT = TLI.getValueType(Src->getType(), true); 1334 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) 1335 return false; 1336 1337 unsigned SrcReg = getRegForValue(Src); 1338 if (SrcReg == 0) return false; 1339 1340 // Handle sign-extension. 1341 if (SrcVT == MVT::i16 || SrcVT == MVT::i8) { 1342 EVT DestVT = MVT::i32; 1343 unsigned ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, /*isZExt*/ false); 1344 if (ResultReg == 0) return false; 1345 SrcReg = ResultReg; 1346 } 1347 1348 // The conversion routine works on fp-reg to fp-reg and the operand above 1349 // was an integer, move it to the fp registers if possible. 1350 unsigned FP = ARMMoveToFPReg(MVT::f32, SrcReg); 1351 if (FP == 0) return false; 1352 1353 unsigned Opc; 1354 if (Ty->isFloatTy()) Opc = ARM::VSITOS; 1355 else if (Ty->isDoubleTy()) Opc = ARM::VSITOD; 1356 else return false; 1357 1358 unsigned ResultReg = createResultReg(TLI.getRegClassFor(DstVT)); 1359 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1360 ResultReg) 1361 .addReg(FP)); 1362 UpdateValueMap(I, ResultReg); 1363 return true; 1364} 1365 1366bool ARMFastISel::SelectFPToSI(const Instruction *I) { 1367 // Make sure we have VFP. 1368 if (!Subtarget->hasVFP2()) return false; 1369 1370 MVT DstVT; 1371 Type *RetTy = I->getType(); 1372 if (!isTypeLegal(RetTy, DstVT)) 1373 return false; 1374 1375 unsigned Op = getRegForValue(I->getOperand(0)); 1376 if (Op == 0) return false; 1377 1378 unsigned Opc; 1379 Type *OpTy = I->getOperand(0)->getType(); 1380 if (OpTy->isFloatTy()) Opc = ARM::VTOSIZS; 1381 else if (OpTy->isDoubleTy()) Opc = ARM::VTOSIZD; 1382 else return false; 1383 1384 // f64->s32 or f32->s32 both need an intermediate f32 reg. 1385 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32)); 1386 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1387 ResultReg) 1388 .addReg(Op)); 1389 1390 // This result needs to be in an integer register, but the conversion only 1391 // takes place in fp-regs. 1392 unsigned IntReg = ARMMoveToIntReg(DstVT, ResultReg); 1393 if (IntReg == 0) return false; 1394 1395 UpdateValueMap(I, IntReg); 1396 return true; 1397} 1398 1399bool ARMFastISel::SelectSelect(const Instruction *I) { 1400 MVT VT; 1401 if (!isTypeLegal(I->getType(), VT)) 1402 return false; 1403 1404 // Things need to be register sized for register moves. 1405 if (VT != MVT::i32) return false; 1406 const TargetRegisterClass *RC = TLI.getRegClassFor(VT); 1407 1408 unsigned CondReg = getRegForValue(I->getOperand(0)); 1409 if (CondReg == 0) return false; 1410 unsigned Op1Reg = getRegForValue(I->getOperand(1)); 1411 if (Op1Reg == 0) return false; 1412 unsigned Op2Reg = getRegForValue(I->getOperand(2)); 1413 if (Op2Reg == 0) return false; 1414 1415 unsigned CmpOpc = isThumb ? ARM::t2TSTri : ARM::TSTri; 1416 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1417 .addReg(CondReg).addImm(1)); 1418 unsigned ResultReg = createResultReg(RC); 1419 unsigned MovCCOpc = isThumb ? ARM::t2MOVCCr : ARM::MOVCCr; 1420 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg) 1421 .addReg(Op1Reg).addReg(Op2Reg) 1422 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 1423 UpdateValueMap(I, ResultReg); 1424 return true; 1425} 1426 1427bool ARMFastISel::SelectSDiv(const Instruction *I) { 1428 MVT VT; 1429 Type *Ty = I->getType(); 1430 if (!isTypeLegal(Ty, VT)) 1431 return false; 1432 1433 // If we have integer div support we should have selected this automagically. 1434 // In case we have a real miss go ahead and return false and we'll pick 1435 // it up later. 1436 if (Subtarget->hasDivide()) return false; 1437 1438 // Otherwise emit a libcall. 1439 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1440 if (VT == MVT::i8) 1441 LC = RTLIB::SDIV_I8; 1442 else if (VT == MVT::i16) 1443 LC = RTLIB::SDIV_I16; 1444 else if (VT == MVT::i32) 1445 LC = RTLIB::SDIV_I32; 1446 else if (VT == MVT::i64) 1447 LC = RTLIB::SDIV_I64; 1448 else if (VT == MVT::i128) 1449 LC = RTLIB::SDIV_I128; 1450 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SDIV!"); 1451 1452 return ARMEmitLibcall(I, LC); 1453} 1454 1455bool ARMFastISel::SelectSRem(const Instruction *I) { 1456 MVT VT; 1457 Type *Ty = I->getType(); 1458 if (!isTypeLegal(Ty, VT)) 1459 return false; 1460 1461 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1462 if (VT == MVT::i8) 1463 LC = RTLIB::SREM_I8; 1464 else if (VT == MVT::i16) 1465 LC = RTLIB::SREM_I16; 1466 else if (VT == MVT::i32) 1467 LC = RTLIB::SREM_I32; 1468 else if (VT == MVT::i64) 1469 LC = RTLIB::SREM_I64; 1470 else if (VT == MVT::i128) 1471 LC = RTLIB::SREM_I128; 1472 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SREM!"); 1473 1474 return ARMEmitLibcall(I, LC); 1475} 1476 1477bool ARMFastISel::SelectBinaryOp(const Instruction *I, unsigned ISDOpcode) { 1478 EVT VT = TLI.getValueType(I->getType(), true); 1479 1480 // We can get here in the case when we want to use NEON for our fp 1481 // operations, but can't figure out how to. Just use the vfp instructions 1482 // if we have them. 1483 // FIXME: It'd be nice to use NEON instructions. 1484 Type *Ty = I->getType(); 1485 bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy()); 1486 if (isFloat && !Subtarget->hasVFP2()) 1487 return false; 1488 1489 unsigned Op1 = getRegForValue(I->getOperand(0)); 1490 if (Op1 == 0) return false; 1491 1492 unsigned Op2 = getRegForValue(I->getOperand(1)); 1493 if (Op2 == 0) return false; 1494 1495 unsigned Opc; 1496 bool is64bit = VT == MVT::f64 || VT == MVT::i64; 1497 switch (ISDOpcode) { 1498 default: return false; 1499 case ISD::FADD: 1500 Opc = is64bit ? ARM::VADDD : ARM::VADDS; 1501 break; 1502 case ISD::FSUB: 1503 Opc = is64bit ? ARM::VSUBD : ARM::VSUBS; 1504 break; 1505 case ISD::FMUL: 1506 Opc = is64bit ? ARM::VMULD : ARM::VMULS; 1507 break; 1508 } 1509 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT)); 1510 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1511 TII.get(Opc), ResultReg) 1512 .addReg(Op1).addReg(Op2)); 1513 UpdateValueMap(I, ResultReg); 1514 return true; 1515} 1516 1517// Call Handling Code 1518 1519bool ARMFastISel::FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, 1520 EVT SrcVT, unsigned &ResultReg) { 1521 unsigned RR = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc, 1522 Src, /*TODO: Kill=*/false); 1523 1524 if (RR != 0) { 1525 ResultReg = RR; 1526 return true; 1527 } else 1528 return false; 1529} 1530 1531// This is largely taken directly from CCAssignFnForNode - we don't support 1532// varargs in FastISel so that part has been removed. 1533// TODO: We may not support all of this. 1534CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC, bool Return) { 1535 switch (CC) { 1536 default: 1537 llvm_unreachable("Unsupported calling convention"); 1538 case CallingConv::Fast: 1539 // Ignore fastcc. Silence compiler warnings. 1540 (void)RetFastCC_ARM_APCS; 1541 (void)FastCC_ARM_APCS; 1542 // Fallthrough 1543 case CallingConv::C: 1544 // Use target triple & subtarget features to do actual dispatch. 1545 if (Subtarget->isAAPCS_ABI()) { 1546 if (Subtarget->hasVFP2() && 1547 FloatABIType == FloatABI::Hard) 1548 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1549 else 1550 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1551 } else 1552 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1553 case CallingConv::ARM_AAPCS_VFP: 1554 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1555 case CallingConv::ARM_AAPCS: 1556 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1557 case CallingConv::ARM_APCS: 1558 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1559 } 1560} 1561 1562bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args, 1563 SmallVectorImpl<unsigned> &ArgRegs, 1564 SmallVectorImpl<MVT> &ArgVTs, 1565 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 1566 SmallVectorImpl<unsigned> &RegArgs, 1567 CallingConv::ID CC, 1568 unsigned &NumBytes) { 1569 SmallVector<CCValAssign, 16> ArgLocs; 1570 CCState CCInfo(CC, false, *FuncInfo.MF, TM, ArgLocs, *Context); 1571 CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CCAssignFnForCall(CC, false)); 1572 1573 // Get a count of how many bytes are to be pushed on the stack. 1574 NumBytes = CCInfo.getNextStackOffset(); 1575 1576 // Issue CALLSEQ_START 1577 unsigned AdjStackDown = TII.getCallFrameSetupOpcode(); 1578 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1579 TII.get(AdjStackDown)) 1580 .addImm(NumBytes)); 1581 1582 // Process the args. 1583 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1584 CCValAssign &VA = ArgLocs[i]; 1585 unsigned Arg = ArgRegs[VA.getValNo()]; 1586 MVT ArgVT = ArgVTs[VA.getValNo()]; 1587 1588 // We don't handle NEON/vector parameters yet. 1589 if (ArgVT.isVector() || ArgVT.getSizeInBits() > 64) 1590 return false; 1591 1592 // Handle arg promotion, etc. 1593 switch (VA.getLocInfo()) { 1594 case CCValAssign::Full: break; 1595 case CCValAssign::SExt: { 1596 bool Emitted = FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(), 1597 Arg, ArgVT, Arg); 1598 assert(Emitted && "Failed to emit a sext!"); (void)Emitted; 1599 Emitted = true; 1600 ArgVT = VA.getLocVT(); 1601 break; 1602 } 1603 case CCValAssign::ZExt: { 1604 bool Emitted = FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(), 1605 Arg, ArgVT, Arg); 1606 assert(Emitted && "Failed to emit a zext!"); (void)Emitted; 1607 Emitted = true; 1608 ArgVT = VA.getLocVT(); 1609 break; 1610 } 1611 case CCValAssign::AExt: { 1612 bool Emitted = FastEmitExtend(ISD::ANY_EXTEND, VA.getLocVT(), 1613 Arg, ArgVT, Arg); 1614 if (!Emitted) 1615 Emitted = FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(), 1616 Arg, ArgVT, Arg); 1617 if (!Emitted) 1618 Emitted = FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(), 1619 Arg, ArgVT, Arg); 1620 1621 assert(Emitted && "Failed to emit a aext!"); (void)Emitted; 1622 ArgVT = VA.getLocVT(); 1623 break; 1624 } 1625 case CCValAssign::BCvt: { 1626 unsigned BC = FastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg, 1627 /*TODO: Kill=*/false); 1628 assert(BC != 0 && "Failed to emit a bitcast!"); 1629 Arg = BC; 1630 ArgVT = VA.getLocVT(); 1631 break; 1632 } 1633 default: llvm_unreachable("Unknown arg promotion!"); 1634 } 1635 1636 // Now copy/store arg to correct locations. 1637 if (VA.isRegLoc() && !VA.needsCustom()) { 1638 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1639 VA.getLocReg()) 1640 .addReg(Arg); 1641 RegArgs.push_back(VA.getLocReg()); 1642 } else if (VA.needsCustom()) { 1643 // TODO: We need custom lowering for vector (v2f64) args. 1644 if (VA.getLocVT() != MVT::f64) return false; 1645 1646 CCValAssign &NextVA = ArgLocs[++i]; 1647 1648 // TODO: Only handle register args for now. 1649 if(!(VA.isRegLoc() && NextVA.isRegLoc())) return false; 1650 1651 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1652 TII.get(ARM::VMOVRRD), VA.getLocReg()) 1653 .addReg(NextVA.getLocReg(), RegState::Define) 1654 .addReg(Arg)); 1655 RegArgs.push_back(VA.getLocReg()); 1656 RegArgs.push_back(NextVA.getLocReg()); 1657 } else { 1658 assert(VA.isMemLoc()); 1659 // Need to store on the stack. 1660 Address Addr; 1661 Addr.BaseType = Address::RegBase; 1662 Addr.Base.Reg = ARM::SP; 1663 Addr.Offset = VA.getLocMemOffset(); 1664 1665 if (!ARMEmitStore(ArgVT, Arg, Addr)) return false; 1666 } 1667 } 1668 return true; 1669} 1670 1671bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 1672 const Instruction *I, CallingConv::ID CC, 1673 unsigned &NumBytes) { 1674 // Issue CALLSEQ_END 1675 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); 1676 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1677 TII.get(AdjStackUp)) 1678 .addImm(NumBytes).addImm(0)); 1679 1680 // Now the return value. 1681 if (RetVT != MVT::isVoid) { 1682 SmallVector<CCValAssign, 16> RVLocs; 1683 CCState CCInfo(CC, false, *FuncInfo.MF, TM, RVLocs, *Context); 1684 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true)); 1685 1686 // Copy all of the result registers out of their specified physreg. 1687 if (RVLocs.size() == 2 && RetVT == MVT::f64) { 1688 // For this move we copy into two registers and then move into the 1689 // double fp reg we want. 1690 EVT DestVT = RVLocs[0].getValVT(); 1691 TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT); 1692 unsigned ResultReg = createResultReg(DstRC); 1693 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1694 TII.get(ARM::VMOVDRR), ResultReg) 1695 .addReg(RVLocs[0].getLocReg()) 1696 .addReg(RVLocs[1].getLocReg())); 1697 1698 UsedRegs.push_back(RVLocs[0].getLocReg()); 1699 UsedRegs.push_back(RVLocs[1].getLocReg()); 1700 1701 // Finally update the result. 1702 UpdateValueMap(I, ResultReg); 1703 } else { 1704 assert(RVLocs.size() == 1 &&"Can't handle non-double multi-reg retvals!"); 1705 EVT CopyVT = RVLocs[0].getValVT(); 1706 TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT); 1707 1708 unsigned ResultReg = createResultReg(DstRC); 1709 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1710 ResultReg).addReg(RVLocs[0].getLocReg()); 1711 UsedRegs.push_back(RVLocs[0].getLocReg()); 1712 1713 // Finally update the result. 1714 UpdateValueMap(I, ResultReg); 1715 } 1716 } 1717 1718 return true; 1719} 1720 1721bool ARMFastISel::SelectRet(const Instruction *I) { 1722 const ReturnInst *Ret = cast<ReturnInst>(I); 1723 const Function &F = *I->getParent()->getParent(); 1724 1725 if (!FuncInfo.CanLowerReturn) 1726 return false; 1727 1728 if (F.isVarArg()) 1729 return false; 1730 1731 CallingConv::ID CC = F.getCallingConv(); 1732 if (Ret->getNumOperands() > 0) { 1733 SmallVector<ISD::OutputArg, 4> Outs; 1734 GetReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(), 1735 Outs, TLI); 1736 1737 // Analyze operands of the call, assigning locations to each operand. 1738 SmallVector<CCValAssign, 16> ValLocs; 1739 CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, TM, ValLocs,I->getContext()); 1740 CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC, true /* is Ret */)); 1741 1742 const Value *RV = Ret->getOperand(0); 1743 unsigned Reg = getRegForValue(RV); 1744 if (Reg == 0) 1745 return false; 1746 1747 // Only handle a single return value for now. 1748 if (ValLocs.size() != 1) 1749 return false; 1750 1751 CCValAssign &VA = ValLocs[0]; 1752 1753 // Don't bother handling odd stuff for now. 1754 // FIXME: Should be able to handle i1, i8, and/or i16 return types. 1755 if (VA.getLocInfo() != CCValAssign::Full) 1756 return false; 1757 // Only handle register returns for now. 1758 if (!VA.isRegLoc()) 1759 return false; 1760 // TODO: For now, don't try to handle cases where getLocInfo() 1761 // says Full but the types don't match. 1762 if (TLI.getValueType(RV->getType()) != VA.getValVT()) 1763 return false; 1764 1765 // Make the copy. 1766 unsigned SrcReg = Reg + VA.getValNo(); 1767 unsigned DstReg = VA.getLocReg(); 1768 const TargetRegisterClass* SrcRC = MRI.getRegClass(SrcReg); 1769 // Avoid a cross-class copy. This is very unlikely. 1770 if (!SrcRC->contains(DstReg)) 1771 return false; 1772 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1773 DstReg).addReg(SrcReg); 1774 1775 // Mark the register as live out of the function. 1776 MRI.addLiveOut(VA.getLocReg()); 1777 } 1778 1779 unsigned RetOpc = isThumb ? ARM::tBX_RET : ARM::BX_RET; 1780 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1781 TII.get(RetOpc))); 1782 return true; 1783} 1784 1785unsigned ARMFastISel::ARMSelectCallOp(const GlobalValue *GV) { 1786 1787 // Darwin needs the r9 versions of the opcodes. 1788 bool isDarwin = Subtarget->isTargetDarwin(); 1789 if (isThumb) { 1790 return isDarwin ? ARM::tBLr9 : ARM::tBL; 1791 } else { 1792 return isDarwin ? ARM::BLr9 : ARM::BL; 1793 } 1794} 1795 1796// A quick function that will emit a call for a named libcall in F with the 1797// vector of passed arguments for the Instruction in I. We can assume that we 1798// can emit a call for any libcall we can produce. This is an abridged version 1799// of the full call infrastructure since we won't need to worry about things 1800// like computed function pointers or strange arguments at call sites. 1801// TODO: Try to unify this and the normal call bits for ARM, then try to unify 1802// with X86. 1803bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) { 1804 CallingConv::ID CC = TLI.getLibcallCallingConv(Call); 1805 1806 // Handle *simple* calls for now. 1807 Type *RetTy = I->getType(); 1808 MVT RetVT; 1809 if (RetTy->isVoidTy()) 1810 RetVT = MVT::isVoid; 1811 else if (!isTypeLegal(RetTy, RetVT)) 1812 return false; 1813 1814 // TODO: For now if we have long calls specified we don't handle the call. 1815 if (EnableARMLongCalls) return false; 1816 1817 // Set up the argument vectors. 1818 SmallVector<Value*, 8> Args; 1819 SmallVector<unsigned, 8> ArgRegs; 1820 SmallVector<MVT, 8> ArgVTs; 1821 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 1822 Args.reserve(I->getNumOperands()); 1823 ArgRegs.reserve(I->getNumOperands()); 1824 ArgVTs.reserve(I->getNumOperands()); 1825 ArgFlags.reserve(I->getNumOperands()); 1826 for (unsigned i = 0; i < I->getNumOperands(); ++i) { 1827 Value *Op = I->getOperand(i); 1828 unsigned Arg = getRegForValue(Op); 1829 if (Arg == 0) return false; 1830 1831 Type *ArgTy = Op->getType(); 1832 MVT ArgVT; 1833 if (!isTypeLegal(ArgTy, ArgVT)) return false; 1834 1835 ISD::ArgFlagsTy Flags; 1836 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 1837 Flags.setOrigAlign(OriginalAlignment); 1838 1839 Args.push_back(Op); 1840 ArgRegs.push_back(Arg); 1841 ArgVTs.push_back(ArgVT); 1842 ArgFlags.push_back(Flags); 1843 } 1844 1845 // Handle the arguments now that we've gotten them. 1846 SmallVector<unsigned, 4> RegArgs; 1847 unsigned NumBytes; 1848 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes)) 1849 return false; 1850 1851 // Issue the call, BLr9 for darwin, BL otherwise. 1852 // TODO: Turn this into the table of arm call ops. 1853 MachineInstrBuilder MIB; 1854 unsigned CallOpc = ARMSelectCallOp(NULL); 1855 if(isThumb) 1856 // Explicitly adding the predicate here. 1857 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1858 TII.get(CallOpc))) 1859 .addExternalSymbol(TLI.getLibcallName(Call)); 1860 else 1861 // Explicitly adding the predicate here. 1862 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1863 TII.get(CallOpc)) 1864 .addExternalSymbol(TLI.getLibcallName(Call))); 1865 1866 // Add implicit physical register uses to the call. 1867 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 1868 MIB.addReg(RegArgs[i]); 1869 1870 // Finish off the call including any return values. 1871 SmallVector<unsigned, 4> UsedRegs; 1872 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes)) return false; 1873 1874 // Set all unused physreg defs as dead. 1875 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 1876 1877 return true; 1878} 1879 1880bool ARMFastISel::SelectCall(const Instruction *I) { 1881 const CallInst *CI = cast<CallInst>(I); 1882 const Value *Callee = CI->getCalledValue(); 1883 1884 // Can't handle inline asm or worry about intrinsics yet. 1885 if (isa<InlineAsm>(Callee) || isa<IntrinsicInst>(CI)) return false; 1886 1887 // Only handle global variable Callees. 1888 const GlobalValue *GV = dyn_cast<GlobalValue>(Callee); 1889 if (!GV) 1890 return false; 1891 1892 // Check the calling convention. 1893 ImmutableCallSite CS(CI); 1894 CallingConv::ID CC = CS.getCallingConv(); 1895 1896 // TODO: Avoid some calling conventions? 1897 1898 // Let SDISel handle vararg functions. 1899 PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType()); 1900 FunctionType *FTy = cast<FunctionType>(PT->getElementType()); 1901 if (FTy->isVarArg()) 1902 return false; 1903 1904 // Handle *simple* calls for now. 1905 Type *RetTy = I->getType(); 1906 MVT RetVT; 1907 if (RetTy->isVoidTy()) 1908 RetVT = MVT::isVoid; 1909 else if (!isTypeLegal(RetTy, RetVT)) 1910 return false; 1911 1912 // TODO: For now if we have long calls specified we don't handle the call. 1913 if (EnableARMLongCalls) return false; 1914 1915 // Set up the argument vectors. 1916 SmallVector<Value*, 8> Args; 1917 SmallVector<unsigned, 8> ArgRegs; 1918 SmallVector<MVT, 8> ArgVTs; 1919 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 1920 Args.reserve(CS.arg_size()); 1921 ArgRegs.reserve(CS.arg_size()); 1922 ArgVTs.reserve(CS.arg_size()); 1923 ArgFlags.reserve(CS.arg_size()); 1924 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end(); 1925 i != e; ++i) { 1926 unsigned Arg = getRegForValue(*i); 1927 1928 if (Arg == 0) 1929 return false; 1930 ISD::ArgFlagsTy Flags; 1931 unsigned AttrInd = i - CS.arg_begin() + 1; 1932 if (CS.paramHasAttr(AttrInd, Attribute::SExt)) 1933 Flags.setSExt(); 1934 if (CS.paramHasAttr(AttrInd, Attribute::ZExt)) 1935 Flags.setZExt(); 1936 1937 // FIXME: Only handle *easy* calls for now. 1938 if (CS.paramHasAttr(AttrInd, Attribute::InReg) || 1939 CS.paramHasAttr(AttrInd, Attribute::StructRet) || 1940 CS.paramHasAttr(AttrInd, Attribute::Nest) || 1941 CS.paramHasAttr(AttrInd, Attribute::ByVal)) 1942 return false; 1943 1944 Type *ArgTy = (*i)->getType(); 1945 MVT ArgVT; 1946 // FIXME: Should be able to handle i1, i8, and/or i16 parameters. 1947 if (!isTypeLegal(ArgTy, ArgVT)) 1948 return false; 1949 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 1950 Flags.setOrigAlign(OriginalAlignment); 1951 1952 Args.push_back(*i); 1953 ArgRegs.push_back(Arg); 1954 ArgVTs.push_back(ArgVT); 1955 ArgFlags.push_back(Flags); 1956 } 1957 1958 // Handle the arguments now that we've gotten them. 1959 SmallVector<unsigned, 4> RegArgs; 1960 unsigned NumBytes; 1961 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes)) 1962 return false; 1963 1964 // Issue the call, BLr9 for darwin, BL otherwise. 1965 // TODO: Turn this into the table of arm call ops. 1966 MachineInstrBuilder MIB; 1967 unsigned CallOpc = ARMSelectCallOp(GV); 1968 // Explicitly adding the predicate here. 1969 if(isThumb) 1970 // Explicitly adding the predicate here. 1971 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1972 TII.get(CallOpc))) 1973 .addGlobalAddress(GV, 0, 0); 1974 else 1975 // Explicitly adding the predicate here. 1976 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1977 TII.get(CallOpc)) 1978 .addGlobalAddress(GV, 0, 0)); 1979 1980 // Add implicit physical register uses to the call. 1981 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 1982 MIB.addReg(RegArgs[i]); 1983 1984 // Finish off the call including any return values. 1985 SmallVector<unsigned, 4> UsedRegs; 1986 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes)) return false; 1987 1988 // Set all unused physreg defs as dead. 1989 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 1990 1991 return true; 1992} 1993 1994bool ARMFastISel::SelectTrunc(const Instruction *I) { 1995 // The high bits for a type smaller than the register size are assumed to be 1996 // undefined. 1997 Value *Op = I->getOperand(0); 1998 1999 EVT SrcVT, DestVT; 2000 SrcVT = TLI.getValueType(Op->getType(), true); 2001 DestVT = TLI.getValueType(I->getType(), true); 2002 2003 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) 2004 return false; 2005 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1) 2006 return false; 2007 2008 unsigned SrcReg = getRegForValue(Op); 2009 if (!SrcReg) return false; 2010 2011 // Because the high bits are undefined, a truncate doesn't generate 2012 // any code. 2013 UpdateValueMap(I, SrcReg); 2014 return true; 2015} 2016 2017unsigned ARMFastISel::ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT, 2018 bool isZExt) { 2019 if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8) 2020 return 0; 2021 2022 unsigned Opc; 2023 bool isBoolZext = false; 2024 if (!SrcVT.isSimple()) return 0; 2025 switch (SrcVT.getSimpleVT().SimpleTy) { 2026 default: return 0; 2027 case MVT::i16: 2028 if (!Subtarget->hasV6Ops()) return 0; 2029 if (isZExt) 2030 Opc = isThumb ? ARM::t2UXTH : ARM::UXTH; 2031 else 2032 Opc = isThumb ? ARM::t2SXTH : ARM::SXTH; 2033 break; 2034 case MVT::i8: 2035 if (!Subtarget->hasV6Ops()) return 0; 2036 if (isZExt) 2037 Opc = isThumb ? ARM::t2UXTB : ARM::UXTB; 2038 else 2039 Opc = isThumb ? ARM::t2SXTB : ARM::SXTB; 2040 break; 2041 case MVT::i1: 2042 if (isZExt) { 2043 Opc = isThumb ? ARM::t2ANDri : ARM::ANDri; 2044 isBoolZext = true; 2045 break; 2046 } 2047 return 0; 2048 } 2049 2050 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 2051 MachineInstrBuilder MIB; 2052 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg) 2053 .addReg(SrcReg); 2054 if (isBoolZext) 2055 MIB.addImm(1); 2056 else 2057 MIB.addImm(0); 2058 AddOptionalDefs(MIB); 2059 return ResultReg; 2060} 2061 2062bool ARMFastISel::SelectIntExt(const Instruction *I) { 2063 // On ARM, in general, integer casts don't involve legal types; this code 2064 // handles promotable integers. 2065 // FIXME: We could save an instruction in many cases by special-casing 2066 // load instructions. 2067 Type *DestTy = I->getType(); 2068 Value *Src = I->getOperand(0); 2069 Type *SrcTy = Src->getType(); 2070 2071 EVT SrcVT, DestVT; 2072 SrcVT = TLI.getValueType(SrcTy, true); 2073 DestVT = TLI.getValueType(DestTy, true); 2074 2075 bool isZExt = isa<ZExtInst>(I); 2076 unsigned SrcReg = getRegForValue(Src); 2077 if (!SrcReg) return false; 2078 2079 unsigned ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, isZExt); 2080 if (ResultReg == 0) return false; 2081 UpdateValueMap(I, ResultReg); 2082 return true; 2083} 2084 2085// TODO: SoftFP support. 2086bool ARMFastISel::TargetSelectInstruction(const Instruction *I) { 2087 2088 switch (I->getOpcode()) { 2089 case Instruction::Load: 2090 return SelectLoad(I); 2091 case Instruction::Store: 2092 return SelectStore(I); 2093 case Instruction::Br: 2094 return SelectBranch(I); 2095 case Instruction::ICmp: 2096 case Instruction::FCmp: 2097 return SelectCmp(I); 2098 case Instruction::FPExt: 2099 return SelectFPExt(I); 2100 case Instruction::FPTrunc: 2101 return SelectFPTrunc(I); 2102 case Instruction::SIToFP: 2103 return SelectSIToFP(I); 2104 case Instruction::FPToSI: 2105 return SelectFPToSI(I); 2106 case Instruction::FAdd: 2107 return SelectBinaryOp(I, ISD::FADD); 2108 case Instruction::FSub: 2109 return SelectBinaryOp(I, ISD::FSUB); 2110 case Instruction::FMul: 2111 return SelectBinaryOp(I, ISD::FMUL); 2112 case Instruction::SDiv: 2113 return SelectSDiv(I); 2114 case Instruction::SRem: 2115 return SelectSRem(I); 2116 case Instruction::Call: 2117 return SelectCall(I); 2118 case Instruction::Select: 2119 return SelectSelect(I); 2120 case Instruction::Ret: 2121 return SelectRet(I); 2122 case Instruction::Trunc: 2123 return SelectTrunc(I); 2124 case Instruction::ZExt: 2125 case Instruction::SExt: 2126 return SelectIntExt(I); 2127 default: break; 2128 } 2129 return false; 2130} 2131 2132namespace llvm { 2133 llvm::FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo) { 2134 // Completely untested on non-darwin. 2135 const TargetMachine &TM = funcInfo.MF->getTarget(); 2136 2137 // Darwin and thumb1 only for now. 2138 const ARMSubtarget *Subtarget = &TM.getSubtarget<ARMSubtarget>(); 2139 if (Subtarget->isTargetDarwin() && !Subtarget->isThumb1Only() && 2140 !DisableARMFastISel) 2141 return new ARMFastISel(funcInfo); 2142 return 0; 2143 } 2144} 2145