ARMFastISel.cpp revision c54f6348861517398f17e85f41b30c4dd079fc3d
1//===-- ARMFastISel.cpp - ARM FastISel implementation ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the ARM-specific support for the FastISel class. Some 11// of the target-specific code is generated by tablegen in the file 12// ARMGenFastISel.inc, which is #included here. 13// 14//===----------------------------------------------------------------------===// 15 16#include "ARM.h" 17#include "ARMBaseInstrInfo.h" 18#include "ARMCallingConv.h" 19#include "ARMRegisterInfo.h" 20#include "ARMTargetMachine.h" 21#include "ARMSubtarget.h" 22#include "ARMConstantPoolValue.h" 23#include "MCTargetDesc/ARMAddressingModes.h" 24#include "llvm/CallingConv.h" 25#include "llvm/DerivedTypes.h" 26#include "llvm/GlobalVariable.h" 27#include "llvm/Instructions.h" 28#include "llvm/IntrinsicInst.h" 29#include "llvm/Module.h" 30#include "llvm/Operator.h" 31#include "llvm/CodeGen/Analysis.h" 32#include "llvm/CodeGen/FastISel.h" 33#include "llvm/CodeGen/FunctionLoweringInfo.h" 34#include "llvm/CodeGen/MachineInstrBuilder.h" 35#include "llvm/CodeGen/MachineModuleInfo.h" 36#include "llvm/CodeGen/MachineConstantPool.h" 37#include "llvm/CodeGen/MachineFrameInfo.h" 38#include "llvm/CodeGen/MachineMemOperand.h" 39#include "llvm/CodeGen/MachineRegisterInfo.h" 40#include "llvm/Support/CallSite.h" 41#include "llvm/Support/CommandLine.h" 42#include "llvm/Support/ErrorHandling.h" 43#include "llvm/Support/GetElementPtrTypeIterator.h" 44#include "llvm/Target/TargetData.h" 45#include "llvm/Target/TargetInstrInfo.h" 46#include "llvm/Target/TargetLowering.h" 47#include "llvm/Target/TargetMachine.h" 48#include "llvm/Target/TargetOptions.h" 49using namespace llvm; 50 51static cl::opt<bool> 52DisableARMFastISel("disable-arm-fast-isel", 53 cl::desc("Turn off experimental ARM fast-isel support"), 54 cl::init(false), cl::Hidden); 55 56extern cl::opt<bool> EnableARMLongCalls; 57 58namespace { 59 60 // All possible address modes, plus some. 61 typedef struct Address { 62 enum { 63 RegBase, 64 FrameIndexBase 65 } BaseType; 66 67 union { 68 unsigned Reg; 69 int FI; 70 } Base; 71 72 int Offset; 73 74 // Innocuous defaults for our address. 75 Address() 76 : BaseType(RegBase), Offset(0) { 77 Base.Reg = 0; 78 } 79 } Address; 80 81class ARMFastISel : public FastISel { 82 83 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can 84 /// make the right decision when generating code for different targets. 85 const ARMSubtarget *Subtarget; 86 const TargetMachine &TM; 87 const TargetInstrInfo &TII; 88 const TargetLowering &TLI; 89 ARMFunctionInfo *AFI; 90 91 // Convenience variables to avoid some queries. 92 bool isThumb2; 93 LLVMContext *Context; 94 95 public: 96 explicit ARMFastISel(FunctionLoweringInfo &funcInfo) 97 : FastISel(funcInfo), 98 TM(funcInfo.MF->getTarget()), 99 TII(*TM.getInstrInfo()), 100 TLI(*TM.getTargetLowering()) { 101 Subtarget = &TM.getSubtarget<ARMSubtarget>(); 102 AFI = funcInfo.MF->getInfo<ARMFunctionInfo>(); 103 isThumb2 = AFI->isThumbFunction(); 104 Context = &funcInfo.Fn->getContext(); 105 } 106 107 // Code from FastISel.cpp. 108 virtual unsigned FastEmitInst_(unsigned MachineInstOpcode, 109 const TargetRegisterClass *RC); 110 virtual unsigned FastEmitInst_r(unsigned MachineInstOpcode, 111 const TargetRegisterClass *RC, 112 unsigned Op0, bool Op0IsKill); 113 virtual unsigned FastEmitInst_rr(unsigned MachineInstOpcode, 114 const TargetRegisterClass *RC, 115 unsigned Op0, bool Op0IsKill, 116 unsigned Op1, bool Op1IsKill); 117 virtual unsigned FastEmitInst_rrr(unsigned MachineInstOpcode, 118 const TargetRegisterClass *RC, 119 unsigned Op0, bool Op0IsKill, 120 unsigned Op1, bool Op1IsKill, 121 unsigned Op2, bool Op2IsKill); 122 virtual unsigned FastEmitInst_ri(unsigned MachineInstOpcode, 123 const TargetRegisterClass *RC, 124 unsigned Op0, bool Op0IsKill, 125 uint64_t Imm); 126 virtual unsigned FastEmitInst_rf(unsigned MachineInstOpcode, 127 const TargetRegisterClass *RC, 128 unsigned Op0, bool Op0IsKill, 129 const ConstantFP *FPImm); 130 virtual unsigned FastEmitInst_rri(unsigned MachineInstOpcode, 131 const TargetRegisterClass *RC, 132 unsigned Op0, bool Op0IsKill, 133 unsigned Op1, bool Op1IsKill, 134 uint64_t Imm); 135 virtual unsigned FastEmitInst_i(unsigned MachineInstOpcode, 136 const TargetRegisterClass *RC, 137 uint64_t Imm); 138 virtual unsigned FastEmitInst_ii(unsigned MachineInstOpcode, 139 const TargetRegisterClass *RC, 140 uint64_t Imm1, uint64_t Imm2); 141 142 virtual unsigned FastEmitInst_extractsubreg(MVT RetVT, 143 unsigned Op0, bool Op0IsKill, 144 uint32_t Idx); 145 146 // Backend specific FastISel code. 147 virtual bool TargetSelectInstruction(const Instruction *I); 148 virtual unsigned TargetMaterializeConstant(const Constant *C); 149 virtual unsigned TargetMaterializeAlloca(const AllocaInst *AI); 150 virtual bool TryToFoldLoad(MachineInstr *MI, unsigned OpNo, 151 const LoadInst *LI); 152 153 #include "ARMGenFastISel.inc" 154 155 // Instruction selection routines. 156 private: 157 bool SelectLoad(const Instruction *I); 158 bool SelectStore(const Instruction *I); 159 bool SelectBranch(const Instruction *I); 160 bool SelectIndirectBr(const Instruction *I); 161 bool SelectCmp(const Instruction *I); 162 bool SelectFPExt(const Instruction *I); 163 bool SelectFPTrunc(const Instruction *I); 164 bool SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode); 165 bool SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode); 166 bool SelectIToFP(const Instruction *I, bool isSigned); 167 bool SelectFPToI(const Instruction *I, bool isSigned); 168 bool SelectDiv(const Instruction *I, bool isSigned); 169 bool SelectRem(const Instruction *I, bool isSigned); 170 bool SelectCall(const Instruction *I, const char *IntrMemName); 171 bool SelectIntrinsicCall(const IntrinsicInst &I); 172 bool SelectSelect(const Instruction *I); 173 bool SelectRet(const Instruction *I); 174 bool SelectTrunc(const Instruction *I); 175 bool SelectIntExt(const Instruction *I); 176 177 // Utility routines. 178 private: 179 bool isTypeLegal(Type *Ty, MVT &VT); 180 bool isLoadTypeLegal(Type *Ty, MVT &VT); 181 bool ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, 182 bool isZExt); 183 bool ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr, 184 unsigned Alignment = 0, bool isZExt = true, 185 bool allocReg = true); 186 187 bool ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr, 188 unsigned Alignment = 0); 189 bool ARMComputeAddress(const Value *Obj, Address &Addr); 190 void ARMSimplifyAddress(Address &Addr, EVT VT, bool useAM3); 191 bool ARMIsMemCpySmall(uint64_t Len); 192 bool ARMTryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len); 193 unsigned ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT, bool isZExt); 194 unsigned ARMMaterializeFP(const ConstantFP *CFP, EVT VT); 195 unsigned ARMMaterializeInt(const Constant *C, EVT VT); 196 unsigned ARMMaterializeGV(const GlobalValue *GV, EVT VT); 197 unsigned ARMMoveToFPReg(EVT VT, unsigned SrcReg); 198 unsigned ARMMoveToIntReg(EVT VT, unsigned SrcReg); 199 unsigned ARMSelectCallOp(const GlobalValue *GV); 200 201 // Call handling routines. 202 private: 203 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool Return); 204 bool ProcessCallArgs(SmallVectorImpl<Value*> &Args, 205 SmallVectorImpl<unsigned> &ArgRegs, 206 SmallVectorImpl<MVT> &ArgVTs, 207 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 208 SmallVectorImpl<unsigned> &RegArgs, 209 CallingConv::ID CC, 210 unsigned &NumBytes); 211 bool FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 212 const Instruction *I, CallingConv::ID CC, 213 unsigned &NumBytes); 214 bool ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call); 215 216 // OptionalDef handling routines. 217 private: 218 bool isARMNEONPred(const MachineInstr *MI); 219 bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR); 220 const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB); 221 void AddLoadStoreOperands(EVT VT, Address &Addr, 222 const MachineInstrBuilder &MIB, 223 unsigned Flags, bool useAM3); 224}; 225 226} // end anonymous namespace 227 228#include "ARMGenCallingConv.inc" 229 230// DefinesOptionalPredicate - This is different from DefinesPredicate in that 231// we don't care about implicit defs here, just places we'll need to add a 232// default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR. 233bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) { 234 if (!MI->hasOptionalDef()) 235 return false; 236 237 // Look to see if our OptionalDef is defining CPSR or CCR. 238 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 239 const MachineOperand &MO = MI->getOperand(i); 240 if (!MO.isReg() || !MO.isDef()) continue; 241 if (MO.getReg() == ARM::CPSR) 242 *CPSR = true; 243 } 244 return true; 245} 246 247bool ARMFastISel::isARMNEONPred(const MachineInstr *MI) { 248 const MCInstrDesc &MCID = MI->getDesc(); 249 250 // If we're a thumb2 or not NEON function we were handled via isPredicable. 251 if ((MCID.TSFlags & ARMII::DomainMask) != ARMII::DomainNEON || 252 AFI->isThumb2Function()) 253 return false; 254 255 for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) 256 if (MCID.OpInfo[i].isPredicate()) 257 return true; 258 259 return false; 260} 261 262// If the machine is predicable go ahead and add the predicate operands, if 263// it needs default CC operands add those. 264// TODO: If we want to support thumb1 then we'll need to deal with optional 265// CPSR defs that need to be added before the remaining operands. See s_cc_out 266// for descriptions why. 267const MachineInstrBuilder & 268ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) { 269 MachineInstr *MI = &*MIB; 270 271 // Do we use a predicate? or... 272 // Are we NEON in ARM mode and have a predicate operand? If so, I know 273 // we're not predicable but add it anyways. 274 if (TII.isPredicable(MI) || isARMNEONPred(MI)) 275 AddDefaultPred(MIB); 276 277 // Do we optionally set a predicate? Preds is size > 0 iff the predicate 278 // defines CPSR. All other OptionalDefines in ARM are the CCR register. 279 bool CPSR = false; 280 if (DefinesOptionalPredicate(MI, &CPSR)) { 281 if (CPSR) 282 AddDefaultT1CC(MIB); 283 else 284 AddDefaultCC(MIB); 285 } 286 return MIB; 287} 288 289unsigned ARMFastISel::FastEmitInst_(unsigned MachineInstOpcode, 290 const TargetRegisterClass* RC) { 291 unsigned ResultReg = createResultReg(RC); 292 const MCInstrDesc &II = TII.get(MachineInstOpcode); 293 294 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)); 295 return ResultReg; 296} 297 298unsigned ARMFastISel::FastEmitInst_r(unsigned MachineInstOpcode, 299 const TargetRegisterClass *RC, 300 unsigned Op0, bool Op0IsKill) { 301 unsigned ResultReg = createResultReg(RC); 302 const MCInstrDesc &II = TII.get(MachineInstOpcode); 303 304 if (II.getNumDefs() >= 1) { 305 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 306 .addReg(Op0, Op0IsKill * RegState::Kill)); 307 } else { 308 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 309 .addReg(Op0, Op0IsKill * RegState::Kill)); 310 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 311 TII.get(TargetOpcode::COPY), ResultReg) 312 .addReg(II.ImplicitDefs[0])); 313 } 314 return ResultReg; 315} 316 317unsigned ARMFastISel::FastEmitInst_rr(unsigned MachineInstOpcode, 318 const TargetRegisterClass *RC, 319 unsigned Op0, bool Op0IsKill, 320 unsigned Op1, bool Op1IsKill) { 321 unsigned ResultReg = createResultReg(RC); 322 const MCInstrDesc &II = TII.get(MachineInstOpcode); 323 324 if (II.getNumDefs() >= 1) { 325 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 326 .addReg(Op0, Op0IsKill * RegState::Kill) 327 .addReg(Op1, Op1IsKill * RegState::Kill)); 328 } else { 329 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 330 .addReg(Op0, Op0IsKill * RegState::Kill) 331 .addReg(Op1, Op1IsKill * RegState::Kill)); 332 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 333 TII.get(TargetOpcode::COPY), ResultReg) 334 .addReg(II.ImplicitDefs[0])); 335 } 336 return ResultReg; 337} 338 339unsigned ARMFastISel::FastEmitInst_rrr(unsigned MachineInstOpcode, 340 const TargetRegisterClass *RC, 341 unsigned Op0, bool Op0IsKill, 342 unsigned Op1, bool Op1IsKill, 343 unsigned Op2, bool Op2IsKill) { 344 unsigned ResultReg = createResultReg(RC); 345 const MCInstrDesc &II = TII.get(MachineInstOpcode); 346 347 if (II.getNumDefs() >= 1) { 348 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 349 .addReg(Op0, Op0IsKill * RegState::Kill) 350 .addReg(Op1, Op1IsKill * RegState::Kill) 351 .addReg(Op2, Op2IsKill * RegState::Kill)); 352 } else { 353 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 354 .addReg(Op0, Op0IsKill * RegState::Kill) 355 .addReg(Op1, Op1IsKill * RegState::Kill) 356 .addReg(Op2, Op2IsKill * RegState::Kill)); 357 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 358 TII.get(TargetOpcode::COPY), ResultReg) 359 .addReg(II.ImplicitDefs[0])); 360 } 361 return ResultReg; 362} 363 364unsigned ARMFastISel::FastEmitInst_ri(unsigned MachineInstOpcode, 365 const TargetRegisterClass *RC, 366 unsigned Op0, bool Op0IsKill, 367 uint64_t Imm) { 368 unsigned ResultReg = createResultReg(RC); 369 const MCInstrDesc &II = TII.get(MachineInstOpcode); 370 371 if (II.getNumDefs() >= 1) { 372 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 373 .addReg(Op0, Op0IsKill * RegState::Kill) 374 .addImm(Imm)); 375 } else { 376 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 377 .addReg(Op0, Op0IsKill * RegState::Kill) 378 .addImm(Imm)); 379 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 380 TII.get(TargetOpcode::COPY), ResultReg) 381 .addReg(II.ImplicitDefs[0])); 382 } 383 return ResultReg; 384} 385 386unsigned ARMFastISel::FastEmitInst_rf(unsigned MachineInstOpcode, 387 const TargetRegisterClass *RC, 388 unsigned Op0, bool Op0IsKill, 389 const ConstantFP *FPImm) { 390 unsigned ResultReg = createResultReg(RC); 391 const MCInstrDesc &II = TII.get(MachineInstOpcode); 392 393 if (II.getNumDefs() >= 1) { 394 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 395 .addReg(Op0, Op0IsKill * RegState::Kill) 396 .addFPImm(FPImm)); 397 } else { 398 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 399 .addReg(Op0, Op0IsKill * RegState::Kill) 400 .addFPImm(FPImm)); 401 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 402 TII.get(TargetOpcode::COPY), ResultReg) 403 .addReg(II.ImplicitDefs[0])); 404 } 405 return ResultReg; 406} 407 408unsigned ARMFastISel::FastEmitInst_rri(unsigned MachineInstOpcode, 409 const TargetRegisterClass *RC, 410 unsigned Op0, bool Op0IsKill, 411 unsigned Op1, bool Op1IsKill, 412 uint64_t Imm) { 413 unsigned ResultReg = createResultReg(RC); 414 const MCInstrDesc &II = TII.get(MachineInstOpcode); 415 416 if (II.getNumDefs() >= 1) { 417 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 418 .addReg(Op0, Op0IsKill * RegState::Kill) 419 .addReg(Op1, Op1IsKill * RegState::Kill) 420 .addImm(Imm)); 421 } else { 422 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 423 .addReg(Op0, Op0IsKill * RegState::Kill) 424 .addReg(Op1, Op1IsKill * RegState::Kill) 425 .addImm(Imm)); 426 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 427 TII.get(TargetOpcode::COPY), ResultReg) 428 .addReg(II.ImplicitDefs[0])); 429 } 430 return ResultReg; 431} 432 433unsigned ARMFastISel::FastEmitInst_i(unsigned MachineInstOpcode, 434 const TargetRegisterClass *RC, 435 uint64_t Imm) { 436 unsigned ResultReg = createResultReg(RC); 437 const MCInstrDesc &II = TII.get(MachineInstOpcode); 438 439 if (II.getNumDefs() >= 1) { 440 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 441 .addImm(Imm)); 442 } else { 443 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 444 .addImm(Imm)); 445 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 446 TII.get(TargetOpcode::COPY), ResultReg) 447 .addReg(II.ImplicitDefs[0])); 448 } 449 return ResultReg; 450} 451 452unsigned ARMFastISel::FastEmitInst_ii(unsigned MachineInstOpcode, 453 const TargetRegisterClass *RC, 454 uint64_t Imm1, uint64_t Imm2) { 455 unsigned ResultReg = createResultReg(RC); 456 const MCInstrDesc &II = TII.get(MachineInstOpcode); 457 458 if (II.getNumDefs() >= 1) { 459 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 460 .addImm(Imm1).addImm(Imm2)); 461 } else { 462 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 463 .addImm(Imm1).addImm(Imm2)); 464 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 465 TII.get(TargetOpcode::COPY), 466 ResultReg) 467 .addReg(II.ImplicitDefs[0])); 468 } 469 return ResultReg; 470} 471 472unsigned ARMFastISel::FastEmitInst_extractsubreg(MVT RetVT, 473 unsigned Op0, bool Op0IsKill, 474 uint32_t Idx) { 475 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT)); 476 assert(TargetRegisterInfo::isVirtualRegister(Op0) && 477 "Cannot yet extract from physregs"); 478 479 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 480 DL, TII.get(TargetOpcode::COPY), ResultReg) 481 .addReg(Op0, getKillRegState(Op0IsKill), Idx)); 482 return ResultReg; 483} 484 485// TODO: Don't worry about 64-bit now, but when this is fixed remove the 486// checks from the various callers. 487unsigned ARMFastISel::ARMMoveToFPReg(EVT VT, unsigned SrcReg) { 488 if (VT == MVT::f64) return 0; 489 490 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 491 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 492 TII.get(ARM::VMOVRS), MoveReg) 493 .addReg(SrcReg)); 494 return MoveReg; 495} 496 497unsigned ARMFastISel::ARMMoveToIntReg(EVT VT, unsigned SrcReg) { 498 if (VT == MVT::i64) return 0; 499 500 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 501 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 502 TII.get(ARM::VMOVSR), MoveReg) 503 .addReg(SrcReg)); 504 return MoveReg; 505} 506 507// For double width floating point we need to materialize two constants 508// (the high and the low) into integer registers then use a move to get 509// the combined constant into an FP reg. 510unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, EVT VT) { 511 const APFloat Val = CFP->getValueAPF(); 512 bool is64bit = VT == MVT::f64; 513 514 // This checks to see if we can use VFP3 instructions to materialize 515 // a constant, otherwise we have to go through the constant pool. 516 if (TLI.isFPImmLegal(Val, VT)) { 517 int Imm; 518 unsigned Opc; 519 if (is64bit) { 520 Imm = ARM_AM::getFP64Imm(Val); 521 Opc = ARM::FCONSTD; 522 } else { 523 Imm = ARM_AM::getFP32Imm(Val); 524 Opc = ARM::FCONSTS; 525 } 526 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 527 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 528 DestReg) 529 .addImm(Imm)); 530 return DestReg; 531 } 532 533 // Require VFP2 for loading fp constants. 534 if (!Subtarget->hasVFP2()) return false; 535 536 // MachineConstantPool wants an explicit alignment. 537 unsigned Align = TD.getPrefTypeAlignment(CFP->getType()); 538 if (Align == 0) { 539 // TODO: Figure out if this is correct. 540 Align = TD.getTypeAllocSize(CFP->getType()); 541 } 542 unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align); 543 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 544 unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS; 545 546 // The extra reg is for addrmode5. 547 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 548 DestReg) 549 .addConstantPoolIndex(Idx) 550 .addReg(0)); 551 return DestReg; 552} 553 554unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, EVT VT) { 555 556 if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1) 557 return false; 558 559 // If we can do this in a single instruction without a constant pool entry 560 // do so now. 561 const ConstantInt *CI = cast<ConstantInt>(C); 562 if (Subtarget->hasV6T2Ops() && isUInt<16>(CI->getZExtValue())) { 563 unsigned Opc = isThumb2 ? ARM::t2MOVi16 : ARM::MOVi16; 564 unsigned ImmReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 565 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 566 TII.get(Opc), ImmReg) 567 .addImm(CI->getZExtValue())); 568 return ImmReg; 569 } 570 571 // Use MVN to emit negative constants. 572 if (VT == MVT::i32 && Subtarget->hasV6T2Ops() && CI->isNegative()) { 573 unsigned Imm = (unsigned)~(CI->getSExtValue()); 574 bool UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 575 (ARM_AM::getSOImmVal(Imm) != -1); 576 if (UseImm) { 577 unsigned Opc = isThumb2 ? ARM::t2MVNi : ARM::MVNi; 578 unsigned ImmReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 579 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 580 TII.get(Opc), ImmReg) 581 .addImm(Imm)); 582 return ImmReg; 583 } 584 } 585 586 // Load from constant pool. For now 32-bit only. 587 if (VT != MVT::i32) 588 return false; 589 590 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 591 592 // MachineConstantPool wants an explicit alignment. 593 unsigned Align = TD.getPrefTypeAlignment(C->getType()); 594 if (Align == 0) { 595 // TODO: Figure out if this is correct. 596 Align = TD.getTypeAllocSize(C->getType()); 597 } 598 unsigned Idx = MCP.getConstantPoolIndex(C, Align); 599 600 if (isThumb2) 601 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 602 TII.get(ARM::t2LDRpci), DestReg) 603 .addConstantPoolIndex(Idx)); 604 else 605 // The extra immediate is for addrmode2. 606 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 607 TII.get(ARM::LDRcp), DestReg) 608 .addConstantPoolIndex(Idx) 609 .addImm(0)); 610 611 return DestReg; 612} 613 614unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, EVT VT) { 615 // For now 32-bit only. 616 if (VT != MVT::i32) return 0; 617 618 Reloc::Model RelocM = TM.getRelocationModel(); 619 620 // TODO: Need more magic for ARM PIC. 621 if (!isThumb2 && (RelocM == Reloc::PIC_)) return 0; 622 623 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 624 625 // Use movw+movt when possible, it avoids constant pool entries. 626 // Darwin targets don't support movt with Reloc::Static, see 627 // ARMTargetLowering::LowerGlobalAddressDarwin. Other targets only support 628 // static movt relocations. 629 if (Subtarget->useMovt() && 630 Subtarget->isTargetDarwin() == (RelocM != Reloc::Static)) { 631 unsigned Opc; 632 switch (RelocM) { 633 case Reloc::PIC_: 634 Opc = isThumb2 ? ARM::t2MOV_ga_pcrel : ARM::MOV_ga_pcrel; 635 break; 636 case Reloc::DynamicNoPIC: 637 Opc = isThumb2 ? ARM::t2MOV_ga_dyn : ARM::MOV_ga_dyn; 638 break; 639 default: 640 Opc = isThumb2 ? ARM::t2MOVi32imm : ARM::MOVi32imm; 641 break; 642 } 643 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 644 DestReg).addGlobalAddress(GV)); 645 } else { 646 // MachineConstantPool wants an explicit alignment. 647 unsigned Align = TD.getPrefTypeAlignment(GV->getType()); 648 if (Align == 0) { 649 // TODO: Figure out if this is correct. 650 Align = TD.getTypeAllocSize(GV->getType()); 651 } 652 653 // Grab index. 654 unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : 655 (Subtarget->isThumb() ? 4 : 8); 656 unsigned Id = AFI->createPICLabelUId(); 657 ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create(GV, Id, 658 ARMCP::CPValue, 659 PCAdj); 660 unsigned Idx = MCP.getConstantPoolIndex(CPV, Align); 661 662 // Load value. 663 MachineInstrBuilder MIB; 664 if (isThumb2) { 665 unsigned Opc = (RelocM!=Reloc::PIC_) ? ARM::t2LDRpci : ARM::t2LDRpci_pic; 666 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg) 667 .addConstantPoolIndex(Idx); 668 if (RelocM == Reloc::PIC_) 669 MIB.addImm(Id); 670 } else { 671 // The extra immediate is for addrmode2. 672 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRcp), 673 DestReg) 674 .addConstantPoolIndex(Idx) 675 .addImm(0); 676 } 677 AddOptionalDefs(MIB); 678 } 679 680 if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) { 681 MachineInstrBuilder MIB; 682 unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT)); 683 if (isThumb2) 684 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 685 TII.get(ARM::t2LDRi12), NewDestReg) 686 .addReg(DestReg) 687 .addImm(0); 688 else 689 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRi12), 690 NewDestReg) 691 .addReg(DestReg) 692 .addImm(0); 693 DestReg = NewDestReg; 694 AddOptionalDefs(MIB); 695 } 696 697 return DestReg; 698} 699 700unsigned ARMFastISel::TargetMaterializeConstant(const Constant *C) { 701 EVT VT = TLI.getValueType(C->getType(), true); 702 703 // Only handle simple types. 704 if (!VT.isSimple()) return 0; 705 706 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) 707 return ARMMaterializeFP(CFP, VT); 708 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C)) 709 return ARMMaterializeGV(GV, VT); 710 else if (isa<ConstantInt>(C)) 711 return ARMMaterializeInt(C, VT); 712 713 return 0; 714} 715 716// TODO: unsigned ARMFastISel::TargetMaterializeFloatZero(const ConstantFP *CF); 717 718unsigned ARMFastISel::TargetMaterializeAlloca(const AllocaInst *AI) { 719 // Don't handle dynamic allocas. 720 if (!FuncInfo.StaticAllocaMap.count(AI)) return 0; 721 722 MVT VT; 723 if (!isLoadTypeLegal(AI->getType(), VT)) return false; 724 725 DenseMap<const AllocaInst*, int>::iterator SI = 726 FuncInfo.StaticAllocaMap.find(AI); 727 728 // This will get lowered later into the correct offsets and registers 729 // via rewriteXFrameIndex. 730 if (SI != FuncInfo.StaticAllocaMap.end()) { 731 const TargetRegisterClass* RC = TLI.getRegClassFor(VT); 732 unsigned ResultReg = createResultReg(RC); 733 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri; 734 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 735 TII.get(Opc), ResultReg) 736 .addFrameIndex(SI->second) 737 .addImm(0)); 738 return ResultReg; 739 } 740 741 return 0; 742} 743 744bool ARMFastISel::isTypeLegal(Type *Ty, MVT &VT) { 745 EVT evt = TLI.getValueType(Ty, true); 746 747 // Only handle simple types. 748 if (evt == MVT::Other || !evt.isSimple()) return false; 749 VT = evt.getSimpleVT(); 750 751 // Handle all legal types, i.e. a register that will directly hold this 752 // value. 753 return TLI.isTypeLegal(VT); 754} 755 756bool ARMFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) { 757 if (isTypeLegal(Ty, VT)) return true; 758 759 // If this is a type than can be sign or zero-extended to a basic operation 760 // go ahead and accept it now. 761 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16) 762 return true; 763 764 return false; 765} 766 767// Computes the address to get to an object. 768bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) { 769 // Some boilerplate from the X86 FastISel. 770 const User *U = NULL; 771 unsigned Opcode = Instruction::UserOp1; 772 if (const Instruction *I = dyn_cast<Instruction>(Obj)) { 773 // Don't walk into other basic blocks unless the object is an alloca from 774 // another block, otherwise it may not have a virtual register assigned. 775 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) || 776 FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) { 777 Opcode = I->getOpcode(); 778 U = I; 779 } 780 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) { 781 Opcode = C->getOpcode(); 782 U = C; 783 } 784 785 if (PointerType *Ty = dyn_cast<PointerType>(Obj->getType())) 786 if (Ty->getAddressSpace() > 255) 787 // Fast instruction selection doesn't support the special 788 // address spaces. 789 return false; 790 791 switch (Opcode) { 792 default: 793 break; 794 case Instruction::BitCast: { 795 // Look through bitcasts. 796 return ARMComputeAddress(U->getOperand(0), Addr); 797 } 798 case Instruction::IntToPtr: { 799 // Look past no-op inttoptrs. 800 if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy()) 801 return ARMComputeAddress(U->getOperand(0), Addr); 802 break; 803 } 804 case Instruction::PtrToInt: { 805 // Look past no-op ptrtoints. 806 if (TLI.getValueType(U->getType()) == TLI.getPointerTy()) 807 return ARMComputeAddress(U->getOperand(0), Addr); 808 break; 809 } 810 case Instruction::GetElementPtr: { 811 Address SavedAddr = Addr; 812 int TmpOffset = Addr.Offset; 813 814 // Iterate through the GEP folding the constants into offsets where 815 // we can. 816 gep_type_iterator GTI = gep_type_begin(U); 817 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); 818 i != e; ++i, ++GTI) { 819 const Value *Op = *i; 820 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 821 const StructLayout *SL = TD.getStructLayout(STy); 822 unsigned Idx = cast<ConstantInt>(Op)->getZExtValue(); 823 TmpOffset += SL->getElementOffset(Idx); 824 } else { 825 uint64_t S = TD.getTypeAllocSize(GTI.getIndexedType()); 826 for (;;) { 827 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) { 828 // Constant-offset addressing. 829 TmpOffset += CI->getSExtValue() * S; 830 break; 831 } 832 if (isa<AddOperator>(Op) && 833 (!isa<Instruction>(Op) || 834 FuncInfo.MBBMap[cast<Instruction>(Op)->getParent()] 835 == FuncInfo.MBB) && 836 isa<ConstantInt>(cast<AddOperator>(Op)->getOperand(1))) { 837 // An add (in the same block) with a constant operand. Fold the 838 // constant. 839 ConstantInt *CI = 840 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1)); 841 TmpOffset += CI->getSExtValue() * S; 842 // Iterate on the other operand. 843 Op = cast<AddOperator>(Op)->getOperand(0); 844 continue; 845 } 846 // Unsupported 847 goto unsupported_gep; 848 } 849 } 850 } 851 852 // Try to grab the base operand now. 853 Addr.Offset = TmpOffset; 854 if (ARMComputeAddress(U->getOperand(0), Addr)) return true; 855 856 // We failed, restore everything and try the other options. 857 Addr = SavedAddr; 858 859 unsupported_gep: 860 break; 861 } 862 case Instruction::Alloca: { 863 const AllocaInst *AI = cast<AllocaInst>(Obj); 864 DenseMap<const AllocaInst*, int>::iterator SI = 865 FuncInfo.StaticAllocaMap.find(AI); 866 if (SI != FuncInfo.StaticAllocaMap.end()) { 867 Addr.BaseType = Address::FrameIndexBase; 868 Addr.Base.FI = SI->second; 869 return true; 870 } 871 break; 872 } 873 } 874 875 // Try to get this in a register if nothing else has worked. 876 if (Addr.Base.Reg == 0) Addr.Base.Reg = getRegForValue(Obj); 877 return Addr.Base.Reg != 0; 878} 879 880void ARMFastISel::ARMSimplifyAddress(Address &Addr, EVT VT, bool useAM3) { 881 882 assert(VT.isSimple() && "Non-simple types are invalid here!"); 883 884 bool needsLowering = false; 885 switch (VT.getSimpleVT().SimpleTy) { 886 default: llvm_unreachable("Unhandled load/store type!"); 887 case MVT::i1: 888 case MVT::i8: 889 case MVT::i16: 890 case MVT::i32: 891 if (!useAM3) { 892 // Integer loads/stores handle 12-bit offsets. 893 needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset); 894 // Handle negative offsets. 895 if (needsLowering && isThumb2) 896 needsLowering = !(Subtarget->hasV6T2Ops() && Addr.Offset < 0 && 897 Addr.Offset > -256); 898 } else { 899 // ARM halfword load/stores and signed byte loads use +/-imm8 offsets. 900 needsLowering = (Addr.Offset > 255 || Addr.Offset < -255); 901 } 902 break; 903 case MVT::f32: 904 case MVT::f64: 905 // Floating point operands handle 8-bit offsets. 906 needsLowering = ((Addr.Offset & 0xff) != Addr.Offset); 907 break; 908 } 909 910 // If this is a stack pointer and the offset needs to be simplified then 911 // put the alloca address into a register, set the base type back to 912 // register and continue. This should almost never happen. 913 if (needsLowering && Addr.BaseType == Address::FrameIndexBase) { 914 const TargetRegisterClass *RC = isThumb2 ? ARM::tGPRRegisterClass 915 : ARM::GPRRegisterClass; 916 unsigned ResultReg = createResultReg(RC); 917 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri; 918 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 919 TII.get(Opc), ResultReg) 920 .addFrameIndex(Addr.Base.FI) 921 .addImm(0)); 922 Addr.Base.Reg = ResultReg; 923 Addr.BaseType = Address::RegBase; 924 } 925 926 // Since the offset is too large for the load/store instruction 927 // get the reg+offset into a register. 928 if (needsLowering) { 929 Addr.Base.Reg = FastEmit_ri_(MVT::i32, ISD::ADD, Addr.Base.Reg, 930 /*Op0IsKill*/false, Addr.Offset, MVT::i32); 931 Addr.Offset = 0; 932 } 933} 934 935void ARMFastISel::AddLoadStoreOperands(EVT VT, Address &Addr, 936 const MachineInstrBuilder &MIB, 937 unsigned Flags, bool useAM3) { 938 // addrmode5 output depends on the selection dag addressing dividing the 939 // offset by 4 that it then later multiplies. Do this here as well. 940 if (VT.getSimpleVT().SimpleTy == MVT::f32 || 941 VT.getSimpleVT().SimpleTy == MVT::f64) 942 Addr.Offset /= 4; 943 944 // Frame base works a bit differently. Handle it separately. 945 if (Addr.BaseType == Address::FrameIndexBase) { 946 int FI = Addr.Base.FI; 947 int Offset = Addr.Offset; 948 MachineMemOperand *MMO = 949 FuncInfo.MF->getMachineMemOperand( 950 MachinePointerInfo::getFixedStack(FI, Offset), 951 Flags, 952 MFI.getObjectSize(FI), 953 MFI.getObjectAlignment(FI)); 954 // Now add the rest of the operands. 955 MIB.addFrameIndex(FI); 956 957 // ARM halfword load/stores and signed byte loads need an additional 958 // operand. 959 if (useAM3) { 960 signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset; 961 MIB.addReg(0); 962 MIB.addImm(Imm); 963 } else { 964 MIB.addImm(Addr.Offset); 965 } 966 MIB.addMemOperand(MMO); 967 } else { 968 // Now add the rest of the operands. 969 MIB.addReg(Addr.Base.Reg); 970 971 // ARM halfword load/stores and signed byte loads need an additional 972 // operand. 973 if (useAM3) { 974 signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset; 975 MIB.addReg(0); 976 MIB.addImm(Imm); 977 } else { 978 MIB.addImm(Addr.Offset); 979 } 980 } 981 AddOptionalDefs(MIB); 982} 983 984bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr, 985 unsigned Alignment, bool isZExt, bool allocReg) { 986 assert(VT.isSimple() && "Non-simple types are invalid here!"); 987 unsigned Opc; 988 bool useAM3 = false; 989 bool needVMOV = false; 990 const TargetRegisterClass *RC; 991 switch (VT.getSimpleVT().SimpleTy) { 992 // This is mostly going to be Neon/vector support. 993 default: return false; 994 case MVT::i1: 995 case MVT::i8: 996 if (isThumb2) { 997 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 998 Opc = isZExt ? ARM::t2LDRBi8 : ARM::t2LDRSBi8; 999 else 1000 Opc = isZExt ? ARM::t2LDRBi12 : ARM::t2LDRSBi12; 1001 } else { 1002 if (isZExt) { 1003 Opc = ARM::LDRBi12; 1004 } else { 1005 Opc = ARM::LDRSB; 1006 useAM3 = true; 1007 } 1008 } 1009 RC = ARM::GPRRegisterClass; 1010 break; 1011 case MVT::i16: 1012 if (isThumb2) { 1013 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1014 Opc = isZExt ? ARM::t2LDRHi8 : ARM::t2LDRSHi8; 1015 else 1016 Opc = isZExt ? ARM::t2LDRHi12 : ARM::t2LDRSHi12; 1017 } else { 1018 Opc = isZExt ? ARM::LDRH : ARM::LDRSH; 1019 useAM3 = true; 1020 } 1021 RC = ARM::GPRRegisterClass; 1022 break; 1023 case MVT::i32: 1024 if (isThumb2) { 1025 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1026 Opc = ARM::t2LDRi8; 1027 else 1028 Opc = ARM::t2LDRi12; 1029 } else { 1030 Opc = ARM::LDRi12; 1031 } 1032 RC = ARM::GPRRegisterClass; 1033 break; 1034 case MVT::f32: 1035 if (!Subtarget->hasVFP2()) return false; 1036 // Unaligned loads need special handling. Floats require word-alignment. 1037 if (Alignment && Alignment < 4) { 1038 needVMOV = true; 1039 VT = MVT::i32; 1040 Opc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12; 1041 RC = ARM::GPRRegisterClass; 1042 } else { 1043 Opc = ARM::VLDRS; 1044 RC = TLI.getRegClassFor(VT); 1045 } 1046 break; 1047 case MVT::f64: 1048 if (!Subtarget->hasVFP2()) return false; 1049 // FIXME: Unaligned loads need special handling. Doublewords require 1050 // word-alignment. 1051 if (Alignment && Alignment < 4) 1052 return false; 1053 1054 Opc = ARM::VLDRD; 1055 RC = TLI.getRegClassFor(VT); 1056 break; 1057 } 1058 // Simplify this down to something we can handle. 1059 ARMSimplifyAddress(Addr, VT, useAM3); 1060 1061 // Create the base instruction, then add the operands. 1062 if (allocReg) 1063 ResultReg = createResultReg(RC); 1064 assert (ResultReg > 255 && "Expected an allocated virtual register."); 1065 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1066 TII.get(Opc), ResultReg); 1067 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOLoad, useAM3); 1068 1069 // If we had an unaligned load of a float we've converted it to an regular 1070 // load. Now we must move from the GRP to the FP register. 1071 if (needVMOV) { 1072 unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::f32)); 1073 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1074 TII.get(ARM::VMOVSR), MoveReg) 1075 .addReg(ResultReg)); 1076 ResultReg = MoveReg; 1077 } 1078 return true; 1079} 1080 1081bool ARMFastISel::SelectLoad(const Instruction *I) { 1082 // Atomic loads need special handling. 1083 if (cast<LoadInst>(I)->isAtomic()) 1084 return false; 1085 1086 // Verify we have a legal type before going any further. 1087 MVT VT; 1088 if (!isLoadTypeLegal(I->getType(), VT)) 1089 return false; 1090 1091 // See if we can handle this address. 1092 Address Addr; 1093 if (!ARMComputeAddress(I->getOperand(0), Addr)) return false; 1094 1095 unsigned ResultReg; 1096 if (!ARMEmitLoad(VT, ResultReg, Addr, cast<LoadInst>(I)->getAlignment())) 1097 return false; 1098 UpdateValueMap(I, ResultReg); 1099 return true; 1100} 1101 1102bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr, 1103 unsigned Alignment) { 1104 unsigned StrOpc; 1105 bool useAM3 = false; 1106 switch (VT.getSimpleVT().SimpleTy) { 1107 // This is mostly going to be Neon/vector support. 1108 default: return false; 1109 case MVT::i1: { 1110 unsigned Res = createResultReg(isThumb2 ? ARM::tGPRRegisterClass : 1111 ARM::GPRRegisterClass); 1112 unsigned Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri; 1113 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1114 TII.get(Opc), Res) 1115 .addReg(SrcReg).addImm(1)); 1116 SrcReg = Res; 1117 } // Fallthrough here. 1118 case MVT::i8: 1119 if (isThumb2) { 1120 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1121 StrOpc = ARM::t2STRBi8; 1122 else 1123 StrOpc = ARM::t2STRBi12; 1124 } else { 1125 StrOpc = ARM::STRBi12; 1126 } 1127 break; 1128 case MVT::i16: 1129 if (isThumb2) { 1130 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1131 StrOpc = ARM::t2STRHi8; 1132 else 1133 StrOpc = ARM::t2STRHi12; 1134 } else { 1135 StrOpc = ARM::STRH; 1136 useAM3 = true; 1137 } 1138 break; 1139 case MVT::i32: 1140 if (isThumb2) { 1141 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1142 StrOpc = ARM::t2STRi8; 1143 else 1144 StrOpc = ARM::t2STRi12; 1145 } else { 1146 StrOpc = ARM::STRi12; 1147 } 1148 break; 1149 case MVT::f32: 1150 if (!Subtarget->hasVFP2()) return false; 1151 // Unaligned stores need special handling. Floats require word-alignment. 1152 if (Alignment && Alignment < 4) { 1153 unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 1154 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1155 TII.get(ARM::VMOVRS), MoveReg) 1156 .addReg(SrcReg)); 1157 SrcReg = MoveReg; 1158 VT = MVT::i32; 1159 StrOpc = isThumb2 ? ARM::t2STRi12 : ARM::STRi12; 1160 } else { 1161 StrOpc = ARM::VSTRS; 1162 } 1163 break; 1164 case MVT::f64: 1165 if (!Subtarget->hasVFP2()) return false; 1166 // FIXME: Unaligned stores need special handling. Doublewords require 1167 // word-alignment. 1168 if (Alignment && Alignment < 4) 1169 return false; 1170 1171 StrOpc = ARM::VSTRD; 1172 break; 1173 } 1174 // Simplify this down to something we can handle. 1175 ARMSimplifyAddress(Addr, VT, useAM3); 1176 1177 // Create the base instruction, then add the operands. 1178 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1179 TII.get(StrOpc)) 1180 .addReg(SrcReg); 1181 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOStore, useAM3); 1182 return true; 1183} 1184 1185bool ARMFastISel::SelectStore(const Instruction *I) { 1186 Value *Op0 = I->getOperand(0); 1187 unsigned SrcReg = 0; 1188 1189 // Atomic stores need special handling. 1190 if (cast<StoreInst>(I)->isAtomic()) 1191 return false; 1192 1193 // Verify we have a legal type before going any further. 1194 MVT VT; 1195 if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT)) 1196 return false; 1197 1198 // Get the value to be stored into a register. 1199 SrcReg = getRegForValue(Op0); 1200 if (SrcReg == 0) return false; 1201 1202 // See if we can handle this address. 1203 Address Addr; 1204 if (!ARMComputeAddress(I->getOperand(1), Addr)) 1205 return false; 1206 1207 if (!ARMEmitStore(VT, SrcReg, Addr, cast<StoreInst>(I)->getAlignment())) 1208 return false; 1209 return true; 1210} 1211 1212static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred) { 1213 switch (Pred) { 1214 // Needs two compares... 1215 case CmpInst::FCMP_ONE: 1216 case CmpInst::FCMP_UEQ: 1217 default: 1218 // AL is our "false" for now. The other two need more compares. 1219 return ARMCC::AL; 1220 case CmpInst::ICMP_EQ: 1221 case CmpInst::FCMP_OEQ: 1222 return ARMCC::EQ; 1223 case CmpInst::ICMP_SGT: 1224 case CmpInst::FCMP_OGT: 1225 return ARMCC::GT; 1226 case CmpInst::ICMP_SGE: 1227 case CmpInst::FCMP_OGE: 1228 return ARMCC::GE; 1229 case CmpInst::ICMP_UGT: 1230 case CmpInst::FCMP_UGT: 1231 return ARMCC::HI; 1232 case CmpInst::FCMP_OLT: 1233 return ARMCC::MI; 1234 case CmpInst::ICMP_ULE: 1235 case CmpInst::FCMP_OLE: 1236 return ARMCC::LS; 1237 case CmpInst::FCMP_ORD: 1238 return ARMCC::VC; 1239 case CmpInst::FCMP_UNO: 1240 return ARMCC::VS; 1241 case CmpInst::FCMP_UGE: 1242 return ARMCC::PL; 1243 case CmpInst::ICMP_SLT: 1244 case CmpInst::FCMP_ULT: 1245 return ARMCC::LT; 1246 case CmpInst::ICMP_SLE: 1247 case CmpInst::FCMP_ULE: 1248 return ARMCC::LE; 1249 case CmpInst::FCMP_UNE: 1250 case CmpInst::ICMP_NE: 1251 return ARMCC::NE; 1252 case CmpInst::ICMP_UGE: 1253 return ARMCC::HS; 1254 case CmpInst::ICMP_ULT: 1255 return ARMCC::LO; 1256 } 1257} 1258 1259bool ARMFastISel::SelectBranch(const Instruction *I) { 1260 const BranchInst *BI = cast<BranchInst>(I); 1261 MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)]; 1262 MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)]; 1263 1264 // Simple branch support. 1265 1266 // If we can, avoid recomputing the compare - redoing it could lead to wonky 1267 // behavior. 1268 if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) { 1269 if (CI->hasOneUse() && (CI->getParent() == I->getParent())) { 1270 1271 // Get the compare predicate. 1272 // Try to take advantage of fallthrough opportunities. 1273 CmpInst::Predicate Predicate = CI->getPredicate(); 1274 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1275 std::swap(TBB, FBB); 1276 Predicate = CmpInst::getInversePredicate(Predicate); 1277 } 1278 1279 ARMCC::CondCodes ARMPred = getComparePred(Predicate); 1280 1281 // We may not handle every CC for now. 1282 if (ARMPred == ARMCC::AL) return false; 1283 1284 // Emit the compare. 1285 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) 1286 return false; 1287 1288 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1289 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1290 .addMBB(TBB).addImm(ARMPred).addReg(ARM::CPSR); 1291 FastEmitBranch(FBB, DL); 1292 FuncInfo.MBB->addSuccessor(TBB); 1293 return true; 1294 } 1295 } else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) { 1296 MVT SourceVT; 1297 if (TI->hasOneUse() && TI->getParent() == I->getParent() && 1298 (isLoadTypeLegal(TI->getOperand(0)->getType(), SourceVT))) { 1299 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; 1300 unsigned OpReg = getRegForValue(TI->getOperand(0)); 1301 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1302 TII.get(TstOpc)) 1303 .addReg(OpReg).addImm(1)); 1304 1305 unsigned CCMode = ARMCC::NE; 1306 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1307 std::swap(TBB, FBB); 1308 CCMode = ARMCC::EQ; 1309 } 1310 1311 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1312 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1313 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); 1314 1315 FastEmitBranch(FBB, DL); 1316 FuncInfo.MBB->addSuccessor(TBB); 1317 return true; 1318 } 1319 } else if (const ConstantInt *CI = 1320 dyn_cast<ConstantInt>(BI->getCondition())) { 1321 uint64_t Imm = CI->getZExtValue(); 1322 MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB; 1323 FastEmitBranch(Target, DL); 1324 return true; 1325 } 1326 1327 unsigned CmpReg = getRegForValue(BI->getCondition()); 1328 if (CmpReg == 0) return false; 1329 1330 // We've been divorced from our compare! Our block was split, and 1331 // now our compare lives in a predecessor block. We musn't 1332 // re-compare here, as the children of the compare aren't guaranteed 1333 // live across the block boundary (we *could* check for this). 1334 // Regardless, the compare has been done in the predecessor block, 1335 // and it left a value for us in a virtual register. Ergo, we test 1336 // the one-bit value left in the virtual register. 1337 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; 1338 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TstOpc)) 1339 .addReg(CmpReg).addImm(1)); 1340 1341 unsigned CCMode = ARMCC::NE; 1342 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1343 std::swap(TBB, FBB); 1344 CCMode = ARMCC::EQ; 1345 } 1346 1347 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1348 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1349 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); 1350 FastEmitBranch(FBB, DL); 1351 FuncInfo.MBB->addSuccessor(TBB); 1352 return true; 1353} 1354 1355bool ARMFastISel::SelectIndirectBr(const Instruction *I) { 1356 unsigned AddrReg = getRegForValue(I->getOperand(0)); 1357 if (AddrReg == 0) return false; 1358 1359 unsigned Opc = isThumb2 ? ARM::tBRIND : ARM::BX; 1360 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc)) 1361 .addReg(AddrReg)); 1362 return true; 1363} 1364 1365bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, 1366 bool isZExt) { 1367 Type *Ty = Src1Value->getType(); 1368 EVT SrcVT = TLI.getValueType(Ty, true); 1369 if (!SrcVT.isSimple()) return false; 1370 1371 bool isFloat = (Ty->isFloatTy() || Ty->isDoubleTy()); 1372 if (isFloat && !Subtarget->hasVFP2()) 1373 return false; 1374 1375 // Check to see if the 2nd operand is a constant that we can encode directly 1376 // in the compare. 1377 int Imm = 0; 1378 bool UseImm = false; 1379 bool isNegativeImm = false; 1380 // FIXME: At -O0 we don't have anything that canonicalizes operand order. 1381 // Thus, Src1Value may be a ConstantInt, but we're missing it. 1382 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(Src2Value)) { 1383 if (SrcVT == MVT::i32 || SrcVT == MVT::i16 || SrcVT == MVT::i8 || 1384 SrcVT == MVT::i1) { 1385 const APInt &CIVal = ConstInt->getValue(); 1386 Imm = (isZExt) ? (int)CIVal.getZExtValue() : (int)CIVal.getSExtValue(); 1387 if (Imm < 0) { 1388 isNegativeImm = true; 1389 Imm = -Imm; 1390 } 1391 UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 1392 (ARM_AM::getSOImmVal(Imm) != -1); 1393 } 1394 } else if (const ConstantFP *ConstFP = dyn_cast<ConstantFP>(Src2Value)) { 1395 if (SrcVT == MVT::f32 || SrcVT == MVT::f64) 1396 if (ConstFP->isZero() && !ConstFP->isNegative()) 1397 UseImm = true; 1398 } 1399 1400 unsigned CmpOpc; 1401 bool isICmp = true; 1402 bool needsExt = false; 1403 switch (SrcVT.getSimpleVT().SimpleTy) { 1404 default: return false; 1405 // TODO: Verify compares. 1406 case MVT::f32: 1407 isICmp = false; 1408 CmpOpc = UseImm ? ARM::VCMPEZS : ARM::VCMPES; 1409 break; 1410 case MVT::f64: 1411 isICmp = false; 1412 CmpOpc = UseImm ? ARM::VCMPEZD : ARM::VCMPED; 1413 break; 1414 case MVT::i1: 1415 case MVT::i8: 1416 case MVT::i16: 1417 needsExt = true; 1418 // Intentional fall-through. 1419 case MVT::i32: 1420 if (isThumb2) { 1421 if (!UseImm) 1422 CmpOpc = ARM::t2CMPrr; 1423 else 1424 CmpOpc = isNegativeImm ? ARM::t2CMNzri : ARM::t2CMPri; 1425 } else { 1426 if (!UseImm) 1427 CmpOpc = ARM::CMPrr; 1428 else 1429 CmpOpc = isNegativeImm ? ARM::CMNzri : ARM::CMPri; 1430 } 1431 break; 1432 } 1433 1434 unsigned SrcReg1 = getRegForValue(Src1Value); 1435 if (SrcReg1 == 0) return false; 1436 1437 unsigned SrcReg2 = 0; 1438 if (!UseImm) { 1439 SrcReg2 = getRegForValue(Src2Value); 1440 if (SrcReg2 == 0) return false; 1441 } 1442 1443 // We have i1, i8, or i16, we need to either zero extend or sign extend. 1444 if (needsExt) { 1445 SrcReg1 = ARMEmitIntExt(SrcVT, SrcReg1, MVT::i32, isZExt); 1446 if (SrcReg1 == 0) return false; 1447 if (!UseImm) { 1448 SrcReg2 = ARMEmitIntExt(SrcVT, SrcReg2, MVT::i32, isZExt); 1449 if (SrcReg2 == 0) return false; 1450 } 1451 } 1452 1453 if (!UseImm) { 1454 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1455 TII.get(CmpOpc)) 1456 .addReg(SrcReg1).addReg(SrcReg2)); 1457 } else { 1458 MachineInstrBuilder MIB; 1459 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1460 .addReg(SrcReg1); 1461 1462 // Only add immediate for icmp as the immediate for fcmp is an implicit 0.0. 1463 if (isICmp) 1464 MIB.addImm(Imm); 1465 AddOptionalDefs(MIB); 1466 } 1467 1468 // For floating point we need to move the result to a comparison register 1469 // that we can then use for branches. 1470 if (Ty->isFloatTy() || Ty->isDoubleTy()) 1471 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1472 TII.get(ARM::FMSTAT))); 1473 return true; 1474} 1475 1476bool ARMFastISel::SelectCmp(const Instruction *I) { 1477 const CmpInst *CI = cast<CmpInst>(I); 1478 Type *Ty = CI->getOperand(0)->getType(); 1479 1480 // Get the compare predicate. 1481 ARMCC::CondCodes ARMPred = getComparePred(CI->getPredicate()); 1482 1483 // We may not handle every CC for now. 1484 if (ARMPred == ARMCC::AL) return false; 1485 1486 // Emit the compare. 1487 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) 1488 return false; 1489 1490 // Now set a register based on the comparison. Explicitly set the predicates 1491 // here. 1492 unsigned MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi; 1493 const TargetRegisterClass *RC = isThumb2 ? ARM::rGPRRegisterClass 1494 : ARM::GPRRegisterClass; 1495 unsigned DestReg = createResultReg(RC); 1496 Constant *Zero = ConstantInt::get(Type::getInt32Ty(*Context), 0); 1497 unsigned ZeroReg = TargetMaterializeConstant(Zero); 1498 bool isFloat = (Ty->isFloatTy() || Ty->isDoubleTy()); 1499 unsigned CondReg = isFloat ? ARM::FPSCR : ARM::CPSR; 1500 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), DestReg) 1501 .addReg(ZeroReg).addImm(1) 1502 .addImm(ARMPred).addReg(CondReg); 1503 1504 UpdateValueMap(I, DestReg); 1505 return true; 1506} 1507 1508bool ARMFastISel::SelectFPExt(const Instruction *I) { 1509 // Make sure we have VFP and that we're extending float to double. 1510 if (!Subtarget->hasVFP2()) return false; 1511 1512 Value *V = I->getOperand(0); 1513 if (!I->getType()->isDoubleTy() || 1514 !V->getType()->isFloatTy()) return false; 1515 1516 unsigned Op = getRegForValue(V); 1517 if (Op == 0) return false; 1518 1519 unsigned Result = createResultReg(ARM::DPRRegisterClass); 1520 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1521 TII.get(ARM::VCVTDS), Result) 1522 .addReg(Op)); 1523 UpdateValueMap(I, Result); 1524 return true; 1525} 1526 1527bool ARMFastISel::SelectFPTrunc(const Instruction *I) { 1528 // Make sure we have VFP and that we're truncating double to float. 1529 if (!Subtarget->hasVFP2()) return false; 1530 1531 Value *V = I->getOperand(0); 1532 if (!(I->getType()->isFloatTy() && 1533 V->getType()->isDoubleTy())) return false; 1534 1535 unsigned Op = getRegForValue(V); 1536 if (Op == 0) return false; 1537 1538 unsigned Result = createResultReg(ARM::SPRRegisterClass); 1539 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1540 TII.get(ARM::VCVTSD), Result) 1541 .addReg(Op)); 1542 UpdateValueMap(I, Result); 1543 return true; 1544} 1545 1546bool ARMFastISel::SelectIToFP(const Instruction *I, bool isSigned) { 1547 // Make sure we have VFP. 1548 if (!Subtarget->hasVFP2()) return false; 1549 1550 MVT DstVT; 1551 Type *Ty = I->getType(); 1552 if (!isTypeLegal(Ty, DstVT)) 1553 return false; 1554 1555 Value *Src = I->getOperand(0); 1556 EVT SrcVT = TLI.getValueType(Src->getType(), true); 1557 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) 1558 return false; 1559 1560 unsigned SrcReg = getRegForValue(Src); 1561 if (SrcReg == 0) return false; 1562 1563 // Handle sign-extension. 1564 if (SrcVT == MVT::i16 || SrcVT == MVT::i8) { 1565 EVT DestVT = MVT::i32; 1566 SrcReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, 1567 /*isZExt*/!isSigned); 1568 if (SrcReg == 0) return false; 1569 } 1570 1571 // The conversion routine works on fp-reg to fp-reg and the operand above 1572 // was an integer, move it to the fp registers if possible. 1573 unsigned FP = ARMMoveToFPReg(MVT::f32, SrcReg); 1574 if (FP == 0) return false; 1575 1576 unsigned Opc; 1577 if (Ty->isFloatTy()) Opc = isSigned ? ARM::VSITOS : ARM::VUITOS; 1578 else if (Ty->isDoubleTy()) Opc = isSigned ? ARM::VSITOD : ARM::VUITOD; 1579 else return false; 1580 1581 unsigned ResultReg = createResultReg(TLI.getRegClassFor(DstVT)); 1582 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1583 ResultReg) 1584 .addReg(FP)); 1585 UpdateValueMap(I, ResultReg); 1586 return true; 1587} 1588 1589bool ARMFastISel::SelectFPToI(const Instruction *I, bool isSigned) { 1590 // Make sure we have VFP. 1591 if (!Subtarget->hasVFP2()) return false; 1592 1593 MVT DstVT; 1594 Type *RetTy = I->getType(); 1595 if (!isTypeLegal(RetTy, DstVT)) 1596 return false; 1597 1598 unsigned Op = getRegForValue(I->getOperand(0)); 1599 if (Op == 0) return false; 1600 1601 unsigned Opc; 1602 Type *OpTy = I->getOperand(0)->getType(); 1603 if (OpTy->isFloatTy()) Opc = isSigned ? ARM::VTOSIZS : ARM::VTOUIZS; 1604 else if (OpTy->isDoubleTy()) Opc = isSigned ? ARM::VTOSIZD : ARM::VTOUIZD; 1605 else return false; 1606 1607 // f64->s32/u32 or f32->s32/u32 both need an intermediate f32 reg. 1608 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32)); 1609 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1610 ResultReg) 1611 .addReg(Op)); 1612 1613 // This result needs to be in an integer register, but the conversion only 1614 // takes place in fp-regs. 1615 unsigned IntReg = ARMMoveToIntReg(DstVT, ResultReg); 1616 if (IntReg == 0) return false; 1617 1618 UpdateValueMap(I, IntReg); 1619 return true; 1620} 1621 1622bool ARMFastISel::SelectSelect(const Instruction *I) { 1623 MVT VT; 1624 if (!isTypeLegal(I->getType(), VT)) 1625 return false; 1626 1627 // Things need to be register sized for register moves. 1628 if (VT != MVT::i32) return false; 1629 const TargetRegisterClass *RC = TLI.getRegClassFor(VT); 1630 1631 unsigned CondReg = getRegForValue(I->getOperand(0)); 1632 if (CondReg == 0) return false; 1633 unsigned Op1Reg = getRegForValue(I->getOperand(1)); 1634 if (Op1Reg == 0) return false; 1635 1636 // Check to see if we can use an immediate in the conditional move. 1637 int Imm = 0; 1638 bool UseImm = false; 1639 bool isNegativeImm = false; 1640 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(I->getOperand(2))) { 1641 assert (VT == MVT::i32 && "Expecting an i32."); 1642 Imm = (int)ConstInt->getValue().getZExtValue(); 1643 if (Imm < 0) { 1644 isNegativeImm = true; 1645 Imm = ~Imm; 1646 } 1647 UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 1648 (ARM_AM::getSOImmVal(Imm) != -1); 1649 } 1650 1651 unsigned Op2Reg = 0; 1652 if (!UseImm) { 1653 Op2Reg = getRegForValue(I->getOperand(2)); 1654 if (Op2Reg == 0) return false; 1655 } 1656 1657 unsigned CmpOpc = isThumb2 ? ARM::t2CMPri : ARM::CMPri; 1658 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1659 .addReg(CondReg).addImm(0)); 1660 1661 unsigned MovCCOpc; 1662 if (!UseImm) { 1663 MovCCOpc = isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr; 1664 } else { 1665 if (!isNegativeImm) { 1666 MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi; 1667 } else { 1668 MovCCOpc = isThumb2 ? ARM::t2MVNCCi : ARM::MVNCCi; 1669 } 1670 } 1671 unsigned ResultReg = createResultReg(RC); 1672 if (!UseImm) 1673 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg) 1674 .addReg(Op2Reg).addReg(Op1Reg).addImm(ARMCC::NE).addReg(ARM::CPSR); 1675 else 1676 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg) 1677 .addReg(Op1Reg).addImm(Imm).addImm(ARMCC::EQ).addReg(ARM::CPSR); 1678 UpdateValueMap(I, ResultReg); 1679 return true; 1680} 1681 1682bool ARMFastISel::SelectDiv(const Instruction *I, bool isSigned) { 1683 MVT VT; 1684 Type *Ty = I->getType(); 1685 if (!isTypeLegal(Ty, VT)) 1686 return false; 1687 1688 // If we have integer div support we should have selected this automagically. 1689 // In case we have a real miss go ahead and return false and we'll pick 1690 // it up later. 1691 if (Subtarget->hasDivide()) return false; 1692 1693 // Otherwise emit a libcall. 1694 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1695 if (VT == MVT::i8) 1696 LC = isSigned ? RTLIB::SDIV_I8 : RTLIB::UDIV_I8; 1697 else if (VT == MVT::i16) 1698 LC = isSigned ? RTLIB::SDIV_I16 : RTLIB::UDIV_I16; 1699 else if (VT == MVT::i32) 1700 LC = isSigned ? RTLIB::SDIV_I32 : RTLIB::UDIV_I32; 1701 else if (VT == MVT::i64) 1702 LC = isSigned ? RTLIB::SDIV_I64 : RTLIB::UDIV_I64; 1703 else if (VT == MVT::i128) 1704 LC = isSigned ? RTLIB::SDIV_I128 : RTLIB::UDIV_I128; 1705 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SDIV!"); 1706 1707 return ARMEmitLibcall(I, LC); 1708} 1709 1710bool ARMFastISel::SelectRem(const Instruction *I, bool isSigned) { 1711 MVT VT; 1712 Type *Ty = I->getType(); 1713 if (!isTypeLegal(Ty, VT)) 1714 return false; 1715 1716 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1717 if (VT == MVT::i8) 1718 LC = isSigned ? RTLIB::SREM_I8 : RTLIB::UREM_I8; 1719 else if (VT == MVT::i16) 1720 LC = isSigned ? RTLIB::SREM_I16 : RTLIB::UREM_I16; 1721 else if (VT == MVT::i32) 1722 LC = isSigned ? RTLIB::SREM_I32 : RTLIB::UREM_I32; 1723 else if (VT == MVT::i64) 1724 LC = isSigned ? RTLIB::SREM_I64 : RTLIB::UREM_I64; 1725 else if (VT == MVT::i128) 1726 LC = isSigned ? RTLIB::SREM_I128 : RTLIB::UREM_I128; 1727 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SREM!"); 1728 1729 return ARMEmitLibcall(I, LC); 1730} 1731 1732bool ARMFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) { 1733 EVT DestVT = TLI.getValueType(I->getType(), true); 1734 1735 // We can get here in the case when we have a binary operation on a non-legal 1736 // type and the target independent selector doesn't know how to handle it. 1737 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1) 1738 return false; 1739 1740 unsigned Opc; 1741 switch (ISDOpcode) { 1742 default: return false; 1743 case ISD::ADD: 1744 Opc = isThumb2 ? ARM::t2ADDrr : ARM::ADDrr; 1745 break; 1746 case ISD::OR: 1747 Opc = isThumb2 ? ARM::t2ORRrr : ARM::ORRrr; 1748 break; 1749 case ISD::SUB: 1750 Opc = isThumb2 ? ARM::t2SUBrr : ARM::SUBrr; 1751 break; 1752 } 1753 1754 unsigned SrcReg1 = getRegForValue(I->getOperand(0)); 1755 if (SrcReg1 == 0) return false; 1756 1757 // TODO: Often the 2nd operand is an immediate, which can be encoded directly 1758 // in the instruction, rather then materializing the value in a register. 1759 unsigned SrcReg2 = getRegForValue(I->getOperand(1)); 1760 if (SrcReg2 == 0) return false; 1761 1762 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 1763 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1764 TII.get(Opc), ResultReg) 1765 .addReg(SrcReg1).addReg(SrcReg2)); 1766 UpdateValueMap(I, ResultReg); 1767 return true; 1768} 1769 1770bool ARMFastISel::SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode) { 1771 EVT VT = TLI.getValueType(I->getType(), true); 1772 1773 // We can get here in the case when we want to use NEON for our fp 1774 // operations, but can't figure out how to. Just use the vfp instructions 1775 // if we have them. 1776 // FIXME: It'd be nice to use NEON instructions. 1777 Type *Ty = I->getType(); 1778 bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy()); 1779 if (isFloat && !Subtarget->hasVFP2()) 1780 return false; 1781 1782 unsigned Opc; 1783 bool is64bit = VT == MVT::f64 || VT == MVT::i64; 1784 switch (ISDOpcode) { 1785 default: return false; 1786 case ISD::FADD: 1787 Opc = is64bit ? ARM::VADDD : ARM::VADDS; 1788 break; 1789 case ISD::FSUB: 1790 Opc = is64bit ? ARM::VSUBD : ARM::VSUBS; 1791 break; 1792 case ISD::FMUL: 1793 Opc = is64bit ? ARM::VMULD : ARM::VMULS; 1794 break; 1795 } 1796 unsigned Op1 = getRegForValue(I->getOperand(0)); 1797 if (Op1 == 0) return false; 1798 1799 unsigned Op2 = getRegForValue(I->getOperand(1)); 1800 if (Op2 == 0) return false; 1801 1802 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT)); 1803 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1804 TII.get(Opc), ResultReg) 1805 .addReg(Op1).addReg(Op2)); 1806 UpdateValueMap(I, ResultReg); 1807 return true; 1808} 1809 1810// Call Handling Code 1811 1812// This is largely taken directly from CCAssignFnForNode - we don't support 1813// varargs in FastISel so that part has been removed. 1814// TODO: We may not support all of this. 1815CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC, bool Return) { 1816 switch (CC) { 1817 default: 1818 llvm_unreachable("Unsupported calling convention"); 1819 case CallingConv::Fast: 1820 // Ignore fastcc. Silence compiler warnings. 1821 (void)RetFastCC_ARM_APCS; 1822 (void)FastCC_ARM_APCS; 1823 // Fallthrough 1824 case CallingConv::C: 1825 // Use target triple & subtarget features to do actual dispatch. 1826 if (Subtarget->isAAPCS_ABI()) { 1827 if (Subtarget->hasVFP2() && 1828 TM.Options.FloatABIType == FloatABI::Hard) 1829 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1830 else 1831 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1832 } else 1833 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1834 case CallingConv::ARM_AAPCS_VFP: 1835 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1836 case CallingConv::ARM_AAPCS: 1837 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1838 case CallingConv::ARM_APCS: 1839 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1840 } 1841} 1842 1843bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args, 1844 SmallVectorImpl<unsigned> &ArgRegs, 1845 SmallVectorImpl<MVT> &ArgVTs, 1846 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 1847 SmallVectorImpl<unsigned> &RegArgs, 1848 CallingConv::ID CC, 1849 unsigned &NumBytes) { 1850 SmallVector<CCValAssign, 16> ArgLocs; 1851 CCState CCInfo(CC, false, *FuncInfo.MF, TM, ArgLocs, *Context); 1852 CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CCAssignFnForCall(CC, false)); 1853 1854 // Get a count of how many bytes are to be pushed on the stack. 1855 NumBytes = CCInfo.getNextStackOffset(); 1856 1857 // Issue CALLSEQ_START 1858 unsigned AdjStackDown = TII.getCallFrameSetupOpcode(); 1859 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1860 TII.get(AdjStackDown)) 1861 .addImm(NumBytes)); 1862 1863 // Process the args. 1864 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1865 CCValAssign &VA = ArgLocs[i]; 1866 unsigned Arg = ArgRegs[VA.getValNo()]; 1867 MVT ArgVT = ArgVTs[VA.getValNo()]; 1868 1869 // We don't handle NEON/vector parameters yet. 1870 if (ArgVT.isVector() || ArgVT.getSizeInBits() > 64) 1871 return false; 1872 1873 // Handle arg promotion, etc. 1874 switch (VA.getLocInfo()) { 1875 case CCValAssign::Full: break; 1876 case CCValAssign::SExt: { 1877 MVT DestVT = VA.getLocVT(); 1878 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/false); 1879 assert (Arg != 0 && "Failed to emit a sext"); 1880 ArgVT = DestVT; 1881 break; 1882 } 1883 case CCValAssign::AExt: 1884 // Intentional fall-through. Handle AExt and ZExt. 1885 case CCValAssign::ZExt: { 1886 MVT DestVT = VA.getLocVT(); 1887 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/true); 1888 assert (Arg != 0 && "Failed to emit a sext"); 1889 ArgVT = DestVT; 1890 break; 1891 } 1892 case CCValAssign::BCvt: { 1893 unsigned BC = FastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg, 1894 /*TODO: Kill=*/false); 1895 assert(BC != 0 && "Failed to emit a bitcast!"); 1896 Arg = BC; 1897 ArgVT = VA.getLocVT(); 1898 break; 1899 } 1900 default: llvm_unreachable("Unknown arg promotion!"); 1901 } 1902 1903 // Now copy/store arg to correct locations. 1904 if (VA.isRegLoc() && !VA.needsCustom()) { 1905 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1906 VA.getLocReg()) 1907 .addReg(Arg); 1908 RegArgs.push_back(VA.getLocReg()); 1909 } else if (VA.needsCustom()) { 1910 // TODO: We need custom lowering for vector (v2f64) args. 1911 if (VA.getLocVT() != MVT::f64) return false; 1912 1913 CCValAssign &NextVA = ArgLocs[++i]; 1914 1915 // TODO: Only handle register args for now. 1916 if(!(VA.isRegLoc() && NextVA.isRegLoc())) return false; 1917 1918 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1919 TII.get(ARM::VMOVRRD), VA.getLocReg()) 1920 .addReg(NextVA.getLocReg(), RegState::Define) 1921 .addReg(Arg)); 1922 RegArgs.push_back(VA.getLocReg()); 1923 RegArgs.push_back(NextVA.getLocReg()); 1924 } else { 1925 assert(VA.isMemLoc()); 1926 // Need to store on the stack. 1927 Address Addr; 1928 Addr.BaseType = Address::RegBase; 1929 Addr.Base.Reg = ARM::SP; 1930 Addr.Offset = VA.getLocMemOffset(); 1931 1932 if (!ARMEmitStore(ArgVT, Arg, Addr)) return false; 1933 } 1934 } 1935 return true; 1936} 1937 1938bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 1939 const Instruction *I, CallingConv::ID CC, 1940 unsigned &NumBytes) { 1941 // Issue CALLSEQ_END 1942 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); 1943 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1944 TII.get(AdjStackUp)) 1945 .addImm(NumBytes).addImm(0)); 1946 1947 // Now the return value. 1948 if (RetVT != MVT::isVoid) { 1949 SmallVector<CCValAssign, 16> RVLocs; 1950 CCState CCInfo(CC, false, *FuncInfo.MF, TM, RVLocs, *Context); 1951 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true)); 1952 1953 // Copy all of the result registers out of their specified physreg. 1954 if (RVLocs.size() == 2 && RetVT == MVT::f64) { 1955 // For this move we copy into two registers and then move into the 1956 // double fp reg we want. 1957 EVT DestVT = RVLocs[0].getValVT(); 1958 const TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT); 1959 unsigned ResultReg = createResultReg(DstRC); 1960 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1961 TII.get(ARM::VMOVDRR), ResultReg) 1962 .addReg(RVLocs[0].getLocReg()) 1963 .addReg(RVLocs[1].getLocReg())); 1964 1965 UsedRegs.push_back(RVLocs[0].getLocReg()); 1966 UsedRegs.push_back(RVLocs[1].getLocReg()); 1967 1968 // Finally update the result. 1969 UpdateValueMap(I, ResultReg); 1970 } else { 1971 assert(RVLocs.size() == 1 &&"Can't handle non-double multi-reg retvals!"); 1972 EVT CopyVT = RVLocs[0].getValVT(); 1973 1974 // Special handling for extended integers. 1975 if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16) 1976 CopyVT = MVT::i32; 1977 1978 const TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT); 1979 1980 unsigned ResultReg = createResultReg(DstRC); 1981 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1982 ResultReg).addReg(RVLocs[0].getLocReg()); 1983 UsedRegs.push_back(RVLocs[0].getLocReg()); 1984 1985 // Finally update the result. 1986 UpdateValueMap(I, ResultReg); 1987 } 1988 } 1989 1990 return true; 1991} 1992 1993bool ARMFastISel::SelectRet(const Instruction *I) { 1994 const ReturnInst *Ret = cast<ReturnInst>(I); 1995 const Function &F = *I->getParent()->getParent(); 1996 1997 if (!FuncInfo.CanLowerReturn) 1998 return false; 1999 2000 if (F.isVarArg()) 2001 return false; 2002 2003 CallingConv::ID CC = F.getCallingConv(); 2004 if (Ret->getNumOperands() > 0) { 2005 SmallVector<ISD::OutputArg, 4> Outs; 2006 GetReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(), 2007 Outs, TLI); 2008 2009 // Analyze operands of the call, assigning locations to each operand. 2010 SmallVector<CCValAssign, 16> ValLocs; 2011 CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, TM, ValLocs,I->getContext()); 2012 CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC, true /* is Ret */)); 2013 2014 const Value *RV = Ret->getOperand(0); 2015 unsigned Reg = getRegForValue(RV); 2016 if (Reg == 0) 2017 return false; 2018 2019 // Only handle a single return value for now. 2020 if (ValLocs.size() != 1) 2021 return false; 2022 2023 CCValAssign &VA = ValLocs[0]; 2024 2025 // Don't bother handling odd stuff for now. 2026 if (VA.getLocInfo() != CCValAssign::Full) 2027 return false; 2028 // Only handle register returns for now. 2029 if (!VA.isRegLoc()) 2030 return false; 2031 2032 unsigned SrcReg = Reg + VA.getValNo(); 2033 EVT RVVT = TLI.getValueType(RV->getType()); 2034 EVT DestVT = VA.getValVT(); 2035 // Special handling for extended integers. 2036 if (RVVT != DestVT) { 2037 if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16) 2038 return false; 2039 2040 assert(DestVT == MVT::i32 && "ARM should always ext to i32"); 2041 2042 // Perform extension if flagged as either zext or sext. Otherwise, do 2043 // nothing. 2044 if (Outs[0].Flags.isZExt() || Outs[0].Flags.isSExt()) { 2045 SrcReg = ARMEmitIntExt(RVVT, SrcReg, DestVT, Outs[0].Flags.isZExt()); 2046 if (SrcReg == 0) return false; 2047 } 2048 } 2049 2050 // Make the copy. 2051 unsigned DstReg = VA.getLocReg(); 2052 const TargetRegisterClass* SrcRC = MRI.getRegClass(SrcReg); 2053 // Avoid a cross-class copy. This is very unlikely. 2054 if (!SrcRC->contains(DstReg)) 2055 return false; 2056 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 2057 DstReg).addReg(SrcReg); 2058 2059 // Mark the register as live out of the function. 2060 MRI.addLiveOut(VA.getLocReg()); 2061 } 2062 2063 unsigned RetOpc = isThumb2 ? ARM::tBX_RET : ARM::BX_RET; 2064 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2065 TII.get(RetOpc))); 2066 return true; 2067} 2068 2069unsigned ARMFastISel::ARMSelectCallOp(const GlobalValue *GV) { 2070 2071 // iOS needs the r9 versions of the opcodes. 2072 bool isiOS = Subtarget->isTargetIOS(); 2073 if (isThumb2) { 2074 return isiOS ? ARM::tBLr9 : ARM::tBL; 2075 } else { 2076 return isiOS ? ARM::BLr9 : ARM::BL; 2077 } 2078} 2079 2080// A quick function that will emit a call for a named libcall in F with the 2081// vector of passed arguments for the Instruction in I. We can assume that we 2082// can emit a call for any libcall we can produce. This is an abridged version 2083// of the full call infrastructure since we won't need to worry about things 2084// like computed function pointers or strange arguments at call sites. 2085// TODO: Try to unify this and the normal call bits for ARM, then try to unify 2086// with X86. 2087bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) { 2088 CallingConv::ID CC = TLI.getLibcallCallingConv(Call); 2089 2090 // Handle *simple* calls for now. 2091 Type *RetTy = I->getType(); 2092 MVT RetVT; 2093 if (RetTy->isVoidTy()) 2094 RetVT = MVT::isVoid; 2095 else if (!isTypeLegal(RetTy, RetVT)) 2096 return false; 2097 2098 // TODO: For now if we have long calls specified we don't handle the call. 2099 if (EnableARMLongCalls) return false; 2100 2101 // Set up the argument vectors. 2102 SmallVector<Value*, 8> Args; 2103 SmallVector<unsigned, 8> ArgRegs; 2104 SmallVector<MVT, 8> ArgVTs; 2105 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 2106 Args.reserve(I->getNumOperands()); 2107 ArgRegs.reserve(I->getNumOperands()); 2108 ArgVTs.reserve(I->getNumOperands()); 2109 ArgFlags.reserve(I->getNumOperands()); 2110 for (unsigned i = 0; i < I->getNumOperands(); ++i) { 2111 Value *Op = I->getOperand(i); 2112 unsigned Arg = getRegForValue(Op); 2113 if (Arg == 0) return false; 2114 2115 Type *ArgTy = Op->getType(); 2116 MVT ArgVT; 2117 if (!isTypeLegal(ArgTy, ArgVT)) return false; 2118 2119 ISD::ArgFlagsTy Flags; 2120 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 2121 Flags.setOrigAlign(OriginalAlignment); 2122 2123 Args.push_back(Op); 2124 ArgRegs.push_back(Arg); 2125 ArgVTs.push_back(ArgVT); 2126 ArgFlags.push_back(Flags); 2127 } 2128 2129 // Handle the arguments now that we've gotten them. 2130 SmallVector<unsigned, 4> RegArgs; 2131 unsigned NumBytes; 2132 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes)) 2133 return false; 2134 2135 // Issue the call, BLr9 for iOS, BL otherwise. 2136 // TODO: Turn this into the table of arm call ops. 2137 MachineInstrBuilder MIB; 2138 unsigned CallOpc = ARMSelectCallOp(NULL); 2139 if(isThumb2) 2140 // Explicitly adding the predicate here. 2141 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2142 TII.get(CallOpc))) 2143 .addExternalSymbol(TLI.getLibcallName(Call)); 2144 else 2145 // Explicitly adding the predicate here. 2146 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2147 TII.get(CallOpc)) 2148 .addExternalSymbol(TLI.getLibcallName(Call))); 2149 2150 // Add implicit physical register uses to the call. 2151 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 2152 MIB.addReg(RegArgs[i]); 2153 2154 // Add a register mask with the call-preserved registers. 2155 // Proper defs for return values will be added by setPhysRegsDeadExcept(). 2156 MIB.addRegMask(TRI.getCallPreservedMask(CC)); 2157 2158 // Finish off the call including any return values. 2159 SmallVector<unsigned, 4> UsedRegs; 2160 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes)) return false; 2161 2162 // Set all unused physreg defs as dead. 2163 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 2164 2165 return true; 2166} 2167 2168bool ARMFastISel::SelectCall(const Instruction *I, 2169 const char *IntrMemName = 0) { 2170 const CallInst *CI = cast<CallInst>(I); 2171 const Value *Callee = CI->getCalledValue(); 2172 2173 // Can't handle inline asm. 2174 if (isa<InlineAsm>(Callee)) return false; 2175 2176 // Only handle global variable Callees. 2177 const GlobalValue *GV = dyn_cast<GlobalValue>(Callee); 2178 if (!GV) 2179 return false; 2180 2181 // Check the calling convention. 2182 ImmutableCallSite CS(CI); 2183 CallingConv::ID CC = CS.getCallingConv(); 2184 2185 // TODO: Avoid some calling conventions? 2186 2187 // Let SDISel handle vararg functions. 2188 PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType()); 2189 FunctionType *FTy = cast<FunctionType>(PT->getElementType()); 2190 if (FTy->isVarArg()) 2191 return false; 2192 2193 // Handle *simple* calls for now. 2194 Type *RetTy = I->getType(); 2195 MVT RetVT; 2196 if (RetTy->isVoidTy()) 2197 RetVT = MVT::isVoid; 2198 else if (!isTypeLegal(RetTy, RetVT) && RetVT != MVT::i16 && 2199 RetVT != MVT::i8 && RetVT != MVT::i1) 2200 return false; 2201 2202 // TODO: For now if we have long calls specified we don't handle the call. 2203 if (EnableARMLongCalls) return false; 2204 2205 // Set up the argument vectors. 2206 SmallVector<Value*, 8> Args; 2207 SmallVector<unsigned, 8> ArgRegs; 2208 SmallVector<MVT, 8> ArgVTs; 2209 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 2210 unsigned arg_size = CS.arg_size(); 2211 Args.reserve(arg_size); 2212 ArgRegs.reserve(arg_size); 2213 ArgVTs.reserve(arg_size); 2214 ArgFlags.reserve(arg_size); 2215 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end(); 2216 i != e; ++i) { 2217 // If we're lowering a memory intrinsic instead of a regular call, skip the 2218 // last two arguments, which shouldn't be passed to the underlying function. 2219 if (IntrMemName && e-i <= 2) 2220 break; 2221 2222 ISD::ArgFlagsTy Flags; 2223 unsigned AttrInd = i - CS.arg_begin() + 1; 2224 if (CS.paramHasAttr(AttrInd, Attribute::SExt)) 2225 Flags.setSExt(); 2226 if (CS.paramHasAttr(AttrInd, Attribute::ZExt)) 2227 Flags.setZExt(); 2228 2229 // FIXME: Only handle *easy* calls for now. 2230 if (CS.paramHasAttr(AttrInd, Attribute::InReg) || 2231 CS.paramHasAttr(AttrInd, Attribute::StructRet) || 2232 CS.paramHasAttr(AttrInd, Attribute::Nest) || 2233 CS.paramHasAttr(AttrInd, Attribute::ByVal)) 2234 return false; 2235 2236 Type *ArgTy = (*i)->getType(); 2237 MVT ArgVT; 2238 if (!isTypeLegal(ArgTy, ArgVT) && ArgVT != MVT::i16 && ArgVT != MVT::i8 && 2239 ArgVT != MVT::i1) 2240 return false; 2241 2242 unsigned Arg = getRegForValue(*i); 2243 if (Arg == 0) 2244 return false; 2245 2246 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 2247 Flags.setOrigAlign(OriginalAlignment); 2248 2249 Args.push_back(*i); 2250 ArgRegs.push_back(Arg); 2251 ArgVTs.push_back(ArgVT); 2252 ArgFlags.push_back(Flags); 2253 } 2254 2255 // Handle the arguments now that we've gotten them. 2256 SmallVector<unsigned, 4> RegArgs; 2257 unsigned NumBytes; 2258 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes)) 2259 return false; 2260 2261 // Issue the call, BLr9 for iOS, BL otherwise. 2262 // TODO: Turn this into the table of arm call ops. 2263 MachineInstrBuilder MIB; 2264 unsigned CallOpc = ARMSelectCallOp(GV); 2265 // Explicitly adding the predicate here. 2266 if(isThumb2) { 2267 // Explicitly adding the predicate here. 2268 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2269 TII.get(CallOpc))); 2270 if (!IntrMemName) 2271 MIB.addGlobalAddress(GV, 0, 0); 2272 else 2273 MIB.addExternalSymbol(IntrMemName, 0); 2274 } else { 2275 if (!IntrMemName) 2276 // Explicitly adding the predicate here. 2277 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2278 TII.get(CallOpc)) 2279 .addGlobalAddress(GV, 0, 0)); 2280 else 2281 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2282 TII.get(CallOpc)) 2283 .addExternalSymbol(IntrMemName, 0)); 2284 } 2285 2286 // Add implicit physical register uses to the call. 2287 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 2288 MIB.addReg(RegArgs[i]); 2289 2290 // Add a register mask with the call-preserved registers. 2291 // Proper defs for return values will be added by setPhysRegsDeadExcept(). 2292 MIB.addRegMask(TRI.getCallPreservedMask(CC)); 2293 2294 // Finish off the call including any return values. 2295 SmallVector<unsigned, 4> UsedRegs; 2296 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes)) return false; 2297 2298 // Set all unused physreg defs as dead. 2299 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 2300 2301 return true; 2302} 2303 2304bool ARMFastISel::ARMIsMemCpySmall(uint64_t Len) { 2305 return Len <= 16; 2306} 2307 2308bool ARMFastISel::ARMTryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len) { 2309 // Make sure we don't bloat code by inlining very large memcpy's. 2310 if (!ARMIsMemCpySmall(Len)) 2311 return false; 2312 2313 // We don't care about alignment here since we just emit integer accesses. 2314 while (Len) { 2315 MVT VT; 2316 if (Len >= 4) 2317 VT = MVT::i32; 2318 else if (Len >= 2) 2319 VT = MVT::i16; 2320 else { 2321 assert(Len == 1); 2322 VT = MVT::i8; 2323 } 2324 2325 bool RV; 2326 unsigned ResultReg; 2327 RV = ARMEmitLoad(VT, ResultReg, Src); 2328 assert (RV == true && "Should be able to handle this load."); 2329 RV = ARMEmitStore(VT, ResultReg, Dest); 2330 assert (RV == true && "Should be able to handle this store."); 2331 (void)RV; 2332 2333 unsigned Size = VT.getSizeInBits()/8; 2334 Len -= Size; 2335 Dest.Offset += Size; 2336 Src.Offset += Size; 2337 } 2338 2339 return true; 2340} 2341 2342bool ARMFastISel::SelectIntrinsicCall(const IntrinsicInst &I) { 2343 // FIXME: Handle more intrinsics. 2344 switch (I.getIntrinsicID()) { 2345 default: return false; 2346 case Intrinsic::memcpy: 2347 case Intrinsic::memmove: { 2348 const MemTransferInst &MTI = cast<MemTransferInst>(I); 2349 // Don't handle volatile. 2350 if (MTI.isVolatile()) 2351 return false; 2352 2353 // Disable inlining for memmove before calls to ComputeAddress. Otherwise, 2354 // we would emit dead code because we don't currently handle memmoves. 2355 bool isMemCpy = (I.getIntrinsicID() == Intrinsic::memcpy); 2356 if (isa<ConstantInt>(MTI.getLength()) && isMemCpy) { 2357 // Small memcpy's are common enough that we want to do them without a call 2358 // if possible. 2359 uint64_t Len = cast<ConstantInt>(MTI.getLength())->getZExtValue(); 2360 if (ARMIsMemCpySmall(Len)) { 2361 Address Dest, Src; 2362 if (!ARMComputeAddress(MTI.getRawDest(), Dest) || 2363 !ARMComputeAddress(MTI.getRawSource(), Src)) 2364 return false; 2365 if (ARMTryEmitSmallMemCpy(Dest, Src, Len)) 2366 return true; 2367 } 2368 } 2369 2370 if (!MTI.getLength()->getType()->isIntegerTy(32)) 2371 return false; 2372 2373 if (MTI.getSourceAddressSpace() > 255 || MTI.getDestAddressSpace() > 255) 2374 return false; 2375 2376 const char *IntrMemName = isa<MemCpyInst>(I) ? "memcpy" : "memmove"; 2377 return SelectCall(&I, IntrMemName); 2378 } 2379 case Intrinsic::memset: { 2380 const MemSetInst &MSI = cast<MemSetInst>(I); 2381 // Don't handle volatile. 2382 if (MSI.isVolatile()) 2383 return false; 2384 2385 if (!MSI.getLength()->getType()->isIntegerTy(32)) 2386 return false; 2387 2388 if (MSI.getDestAddressSpace() > 255) 2389 return false; 2390 2391 return SelectCall(&I, "memset"); 2392 } 2393 } 2394} 2395 2396bool ARMFastISel::SelectTrunc(const Instruction *I) { 2397 // The high bits for a type smaller than the register size are assumed to be 2398 // undefined. 2399 Value *Op = I->getOperand(0); 2400 2401 EVT SrcVT, DestVT; 2402 SrcVT = TLI.getValueType(Op->getType(), true); 2403 DestVT = TLI.getValueType(I->getType(), true); 2404 2405 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) 2406 return false; 2407 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1) 2408 return false; 2409 2410 unsigned SrcReg = getRegForValue(Op); 2411 if (!SrcReg) return false; 2412 2413 // Because the high bits are undefined, a truncate doesn't generate 2414 // any code. 2415 UpdateValueMap(I, SrcReg); 2416 return true; 2417} 2418 2419unsigned ARMFastISel::ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT, 2420 bool isZExt) { 2421 if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8) 2422 return 0; 2423 2424 unsigned Opc; 2425 bool isBoolZext = false; 2426 if (!SrcVT.isSimple()) return 0; 2427 switch (SrcVT.getSimpleVT().SimpleTy) { 2428 default: return 0; 2429 case MVT::i16: 2430 if (!Subtarget->hasV6Ops()) return 0; 2431 if (isZExt) 2432 Opc = isThumb2 ? ARM::t2UXTH : ARM::UXTH; 2433 else 2434 Opc = isThumb2 ? ARM::t2SXTH : ARM::SXTH; 2435 break; 2436 case MVT::i8: 2437 if (!Subtarget->hasV6Ops()) return 0; 2438 if (isZExt) 2439 Opc = isThumb2 ? ARM::t2UXTB : ARM::UXTB; 2440 else 2441 Opc = isThumb2 ? ARM::t2SXTB : ARM::SXTB; 2442 break; 2443 case MVT::i1: 2444 if (isZExt) { 2445 Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri; 2446 isBoolZext = true; 2447 break; 2448 } 2449 return 0; 2450 } 2451 2452 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 2453 MachineInstrBuilder MIB; 2454 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg) 2455 .addReg(SrcReg); 2456 if (isBoolZext) 2457 MIB.addImm(1); 2458 else 2459 MIB.addImm(0); 2460 AddOptionalDefs(MIB); 2461 return ResultReg; 2462} 2463 2464bool ARMFastISel::SelectIntExt(const Instruction *I) { 2465 // On ARM, in general, integer casts don't involve legal types; this code 2466 // handles promotable integers. 2467 Type *DestTy = I->getType(); 2468 Value *Src = I->getOperand(0); 2469 Type *SrcTy = Src->getType(); 2470 2471 EVT SrcVT, DestVT; 2472 SrcVT = TLI.getValueType(SrcTy, true); 2473 DestVT = TLI.getValueType(DestTy, true); 2474 2475 bool isZExt = isa<ZExtInst>(I); 2476 unsigned SrcReg = getRegForValue(Src); 2477 if (!SrcReg) return false; 2478 2479 unsigned ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, isZExt); 2480 if (ResultReg == 0) return false; 2481 UpdateValueMap(I, ResultReg); 2482 return true; 2483} 2484 2485// TODO: SoftFP support. 2486bool ARMFastISel::TargetSelectInstruction(const Instruction *I) { 2487 2488 switch (I->getOpcode()) { 2489 case Instruction::Load: 2490 return SelectLoad(I); 2491 case Instruction::Store: 2492 return SelectStore(I); 2493 case Instruction::Br: 2494 return SelectBranch(I); 2495 case Instruction::IndirectBr: 2496 return SelectIndirectBr(I); 2497 case Instruction::ICmp: 2498 case Instruction::FCmp: 2499 return SelectCmp(I); 2500 case Instruction::FPExt: 2501 return SelectFPExt(I); 2502 case Instruction::FPTrunc: 2503 return SelectFPTrunc(I); 2504 case Instruction::SIToFP: 2505 return SelectIToFP(I, /*isSigned*/ true); 2506 case Instruction::UIToFP: 2507 return SelectIToFP(I, /*isSigned*/ false); 2508 case Instruction::FPToSI: 2509 return SelectFPToI(I, /*isSigned*/ true); 2510 case Instruction::FPToUI: 2511 return SelectFPToI(I, /*isSigned*/ false); 2512 case Instruction::Add: 2513 return SelectBinaryIntOp(I, ISD::ADD); 2514 case Instruction::Or: 2515 return SelectBinaryIntOp(I, ISD::OR); 2516 case Instruction::Sub: 2517 return SelectBinaryIntOp(I, ISD::SUB); 2518 case Instruction::FAdd: 2519 return SelectBinaryFPOp(I, ISD::FADD); 2520 case Instruction::FSub: 2521 return SelectBinaryFPOp(I, ISD::FSUB); 2522 case Instruction::FMul: 2523 return SelectBinaryFPOp(I, ISD::FMUL); 2524 case Instruction::SDiv: 2525 return SelectDiv(I, /*isSigned*/ true); 2526 case Instruction::UDiv: 2527 return SelectDiv(I, /*isSigned*/ false); 2528 case Instruction::SRem: 2529 return SelectRem(I, /*isSigned*/ true); 2530 case Instruction::URem: 2531 return SelectRem(I, /*isSigned*/ false); 2532 case Instruction::Call: 2533 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) 2534 return SelectIntrinsicCall(*II); 2535 return SelectCall(I); 2536 case Instruction::Select: 2537 return SelectSelect(I); 2538 case Instruction::Ret: 2539 return SelectRet(I); 2540 case Instruction::Trunc: 2541 return SelectTrunc(I); 2542 case Instruction::ZExt: 2543 case Instruction::SExt: 2544 return SelectIntExt(I); 2545 default: break; 2546 } 2547 return false; 2548} 2549 2550/// TryToFoldLoad - The specified machine instr operand is a vreg, and that 2551/// vreg is being provided by the specified load instruction. If possible, 2552/// try to fold the load as an operand to the instruction, returning true if 2553/// successful. 2554bool ARMFastISel::TryToFoldLoad(MachineInstr *MI, unsigned OpNo, 2555 const LoadInst *LI) { 2556 // Verify we have a legal type before going any further. 2557 MVT VT; 2558 if (!isLoadTypeLegal(LI->getType(), VT)) 2559 return false; 2560 2561 // Combine load followed by zero- or sign-extend. 2562 // ldrb r1, [r0] ldrb r1, [r0] 2563 // uxtb r2, r1 => 2564 // mov r3, r2 mov r3, r1 2565 bool isZExt = true; 2566 switch(MI->getOpcode()) { 2567 default: return false; 2568 case ARM::SXTH: 2569 case ARM::t2SXTH: 2570 isZExt = false; 2571 case ARM::UXTH: 2572 case ARM::t2UXTH: 2573 if (VT != MVT::i16) 2574 return false; 2575 break; 2576 case ARM::SXTB: 2577 case ARM::t2SXTB: 2578 isZExt = false; 2579 case ARM::UXTB: 2580 case ARM::t2UXTB: 2581 if (VT != MVT::i8) 2582 return false; 2583 break; 2584 } 2585 // See if we can handle this address. 2586 Address Addr; 2587 if (!ARMComputeAddress(LI->getOperand(0), Addr)) return false; 2588 2589 unsigned ResultReg = MI->getOperand(0).getReg(); 2590 if (!ARMEmitLoad(VT, ResultReg, Addr, LI->getAlignment(), isZExt, false)) 2591 return false; 2592 MI->eraseFromParent(); 2593 return true; 2594} 2595 2596namespace llvm { 2597 llvm::FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo) { 2598 // Completely untested on non-iOS. 2599 const TargetMachine &TM = funcInfo.MF->getTarget(); 2600 2601 // Darwin and thumb1 only for now. 2602 const ARMSubtarget *Subtarget = &TM.getSubtarget<ARMSubtarget>(); 2603 if (Subtarget->isTargetIOS() && !Subtarget->isThumb1Only() && 2604 !DisableARMFastISel) 2605 return new ARMFastISel(funcInfo); 2606 return 0; 2607 } 2608} 2609