ARMFastISel.cpp revision 35fc62bf70074349d74357900dd65f08384970c5
1//===-- ARMFastISel.cpp - ARM FastISel implementation ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the ARM-specific support for the FastISel class. Some 11// of the target-specific code is generated by tablegen in the file 12// ARMGenFastISel.inc, which is #included here. 13// 14//===----------------------------------------------------------------------===// 15 16#include "ARM.h" 17#include "ARMBaseInstrInfo.h" 18#include "ARMCallingConv.h" 19#include "ARMTargetMachine.h" 20#include "ARMSubtarget.h" 21#include "ARMConstantPoolValue.h" 22#include "MCTargetDesc/ARMAddressingModes.h" 23#include "llvm/CallingConv.h" 24#include "llvm/DerivedTypes.h" 25#include "llvm/GlobalVariable.h" 26#include "llvm/Instructions.h" 27#include "llvm/IntrinsicInst.h" 28#include "llvm/Module.h" 29#include "llvm/Operator.h" 30#include "llvm/CodeGen/Analysis.h" 31#include "llvm/CodeGen/FastISel.h" 32#include "llvm/CodeGen/FunctionLoweringInfo.h" 33#include "llvm/CodeGen/MachineInstrBuilder.h" 34#include "llvm/CodeGen/MachineModuleInfo.h" 35#include "llvm/CodeGen/MachineConstantPool.h" 36#include "llvm/CodeGen/MachineFrameInfo.h" 37#include "llvm/CodeGen/MachineMemOperand.h" 38#include "llvm/CodeGen/MachineRegisterInfo.h" 39#include "llvm/Support/CallSite.h" 40#include "llvm/Support/CommandLine.h" 41#include "llvm/Support/ErrorHandling.h" 42#include "llvm/Support/GetElementPtrTypeIterator.h" 43#include "llvm/Target/TargetData.h" 44#include "llvm/Target/TargetInstrInfo.h" 45#include "llvm/Target/TargetLowering.h" 46#include "llvm/Target/TargetMachine.h" 47#include "llvm/Target/TargetOptions.h" 48using namespace llvm; 49 50extern cl::opt<bool> EnableARMLongCalls; 51 52namespace { 53 54 // All possible address modes, plus some. 55 typedef struct Address { 56 enum { 57 RegBase, 58 FrameIndexBase 59 } BaseType; 60 61 union { 62 unsigned Reg; 63 int FI; 64 } Base; 65 66 int Offset; 67 68 // Innocuous defaults for our address. 69 Address() 70 : BaseType(RegBase), Offset(0) { 71 Base.Reg = 0; 72 } 73 } Address; 74 75class ARMFastISel : public FastISel { 76 77 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can 78 /// make the right decision when generating code for different targets. 79 const ARMSubtarget *Subtarget; 80 const TargetMachine &TM; 81 const TargetInstrInfo &TII; 82 const TargetLowering &TLI; 83 ARMFunctionInfo *AFI; 84 85 // Convenience variables to avoid some queries. 86 bool isThumb2; 87 LLVMContext *Context; 88 89 public: 90 explicit ARMFastISel(FunctionLoweringInfo &funcInfo, 91 const TargetLibraryInfo *libInfo) 92 : FastISel(funcInfo, libInfo), 93 TM(funcInfo.MF->getTarget()), 94 TII(*TM.getInstrInfo()), 95 TLI(*TM.getTargetLowering()) { 96 Subtarget = &TM.getSubtarget<ARMSubtarget>(); 97 AFI = funcInfo.MF->getInfo<ARMFunctionInfo>(); 98 isThumb2 = AFI->isThumbFunction(); 99 Context = &funcInfo.Fn->getContext(); 100 } 101 102 // Code from FastISel.cpp. 103 private: 104 unsigned FastEmitInst_(unsigned MachineInstOpcode, 105 const TargetRegisterClass *RC); 106 unsigned FastEmitInst_r(unsigned MachineInstOpcode, 107 const TargetRegisterClass *RC, 108 unsigned Op0, bool Op0IsKill); 109 unsigned FastEmitInst_rr(unsigned MachineInstOpcode, 110 const TargetRegisterClass *RC, 111 unsigned Op0, bool Op0IsKill, 112 unsigned Op1, bool Op1IsKill); 113 unsigned FastEmitInst_rrr(unsigned MachineInstOpcode, 114 const TargetRegisterClass *RC, 115 unsigned Op0, bool Op0IsKill, 116 unsigned Op1, bool Op1IsKill, 117 unsigned Op2, bool Op2IsKill); 118 unsigned FastEmitInst_ri(unsigned MachineInstOpcode, 119 const TargetRegisterClass *RC, 120 unsigned Op0, bool Op0IsKill, 121 uint64_t Imm); 122 unsigned FastEmitInst_rf(unsigned MachineInstOpcode, 123 const TargetRegisterClass *RC, 124 unsigned Op0, bool Op0IsKill, 125 const ConstantFP *FPImm); 126 unsigned FastEmitInst_rri(unsigned MachineInstOpcode, 127 const TargetRegisterClass *RC, 128 unsigned Op0, bool Op0IsKill, 129 unsigned Op1, bool Op1IsKill, 130 uint64_t Imm); 131 unsigned FastEmitInst_i(unsigned MachineInstOpcode, 132 const TargetRegisterClass *RC, 133 uint64_t Imm); 134 unsigned FastEmitInst_ii(unsigned MachineInstOpcode, 135 const TargetRegisterClass *RC, 136 uint64_t Imm1, uint64_t Imm2); 137 138 unsigned FastEmitInst_extractsubreg(MVT RetVT, 139 unsigned Op0, bool Op0IsKill, 140 uint32_t Idx); 141 142 // Backend specific FastISel code. 143 private: 144 virtual bool TargetSelectInstruction(const Instruction *I); 145 virtual unsigned TargetMaterializeConstant(const Constant *C); 146 virtual unsigned TargetMaterializeAlloca(const AllocaInst *AI); 147 virtual bool TryToFoldLoad(MachineInstr *MI, unsigned OpNo, 148 const LoadInst *LI); 149 private: 150 #include "ARMGenFastISel.inc" 151 152 // Instruction selection routines. 153 private: 154 bool SelectLoad(const Instruction *I); 155 bool SelectStore(const Instruction *I); 156 bool SelectBranch(const Instruction *I); 157 bool SelectIndirectBr(const Instruction *I); 158 bool SelectCmp(const Instruction *I); 159 bool SelectFPExt(const Instruction *I); 160 bool SelectFPTrunc(const Instruction *I); 161 bool SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode); 162 bool SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode); 163 bool SelectIToFP(const Instruction *I, bool isSigned); 164 bool SelectFPToI(const Instruction *I, bool isSigned); 165 bool SelectDiv(const Instruction *I, bool isSigned); 166 bool SelectRem(const Instruction *I, bool isSigned); 167 bool SelectCall(const Instruction *I, const char *IntrMemName); 168 bool SelectIntrinsicCall(const IntrinsicInst &I); 169 bool SelectSelect(const Instruction *I); 170 bool SelectRet(const Instruction *I); 171 bool SelectTrunc(const Instruction *I); 172 bool SelectIntExt(const Instruction *I); 173 bool SelectShift(const Instruction *I, ARM_AM::ShiftOpc ShiftTy); 174 175 // Utility routines. 176 private: 177 bool isTypeLegal(Type *Ty, MVT &VT); 178 bool isLoadTypeLegal(Type *Ty, MVT &VT); 179 bool ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, 180 bool isZExt); 181 bool ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr, 182 unsigned Alignment = 0, bool isZExt = true, 183 bool allocReg = true); 184 bool ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr, 185 unsigned Alignment = 0); 186 bool ARMComputeAddress(const Value *Obj, Address &Addr); 187 void ARMSimplifyAddress(Address &Addr, EVT VT, bool useAM3); 188 bool ARMIsMemCpySmall(uint64_t Len); 189 bool ARMTryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len); 190 unsigned ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT, bool isZExt); 191 unsigned ARMMaterializeFP(const ConstantFP *CFP, EVT VT); 192 unsigned ARMMaterializeInt(const Constant *C, EVT VT); 193 unsigned ARMMaterializeGV(const GlobalValue *GV, EVT VT); 194 unsigned ARMMoveToFPReg(EVT VT, unsigned SrcReg); 195 unsigned ARMMoveToIntReg(EVT VT, unsigned SrcReg); 196 unsigned ARMSelectCallOp(bool UseReg); 197 198 // Call handling routines. 199 private: 200 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, 201 bool Return, 202 bool isVarArg); 203 bool ProcessCallArgs(SmallVectorImpl<Value*> &Args, 204 SmallVectorImpl<unsigned> &ArgRegs, 205 SmallVectorImpl<MVT> &ArgVTs, 206 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 207 SmallVectorImpl<unsigned> &RegArgs, 208 CallingConv::ID CC, 209 unsigned &NumBytes, 210 bool isVarArg); 211 unsigned getLibcallReg(const Twine &Name); 212 bool FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 213 const Instruction *I, CallingConv::ID CC, 214 unsigned &NumBytes, bool isVarArg); 215 bool ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call); 216 217 // OptionalDef handling routines. 218 private: 219 bool isARMNEONPred(const MachineInstr *MI); 220 bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR); 221 const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB); 222 void AddLoadStoreOperands(EVT VT, Address &Addr, 223 const MachineInstrBuilder &MIB, 224 unsigned Flags, bool useAM3); 225}; 226 227} // end anonymous namespace 228 229#include "ARMGenCallingConv.inc" 230 231// DefinesOptionalPredicate - This is different from DefinesPredicate in that 232// we don't care about implicit defs here, just places we'll need to add a 233// default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR. 234bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) { 235 if (!MI->hasOptionalDef()) 236 return false; 237 238 // Look to see if our OptionalDef is defining CPSR or CCR. 239 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 240 const MachineOperand &MO = MI->getOperand(i); 241 if (!MO.isReg() || !MO.isDef()) continue; 242 if (MO.getReg() == ARM::CPSR) 243 *CPSR = true; 244 } 245 return true; 246} 247 248bool ARMFastISel::isARMNEONPred(const MachineInstr *MI) { 249 const MCInstrDesc &MCID = MI->getDesc(); 250 251 // If we're a thumb2 or not NEON function we were handled via isPredicable. 252 if ((MCID.TSFlags & ARMII::DomainMask) != ARMII::DomainNEON || 253 AFI->isThumb2Function()) 254 return false; 255 256 for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) 257 if (MCID.OpInfo[i].isPredicate()) 258 return true; 259 260 return false; 261} 262 263// If the machine is predicable go ahead and add the predicate operands, if 264// it needs default CC operands add those. 265// TODO: If we want to support thumb1 then we'll need to deal with optional 266// CPSR defs that need to be added before the remaining operands. See s_cc_out 267// for descriptions why. 268const MachineInstrBuilder & 269ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) { 270 MachineInstr *MI = &*MIB; 271 272 // Do we use a predicate? or... 273 // Are we NEON in ARM mode and have a predicate operand? If so, I know 274 // we're not predicable but add it anyways. 275 if (TII.isPredicable(MI) || isARMNEONPred(MI)) 276 AddDefaultPred(MIB); 277 278 // Do we optionally set a predicate? Preds is size > 0 iff the predicate 279 // defines CPSR. All other OptionalDefines in ARM are the CCR register. 280 bool CPSR = false; 281 if (DefinesOptionalPredicate(MI, &CPSR)) { 282 if (CPSR) 283 AddDefaultT1CC(MIB); 284 else 285 AddDefaultCC(MIB); 286 } 287 return MIB; 288} 289 290unsigned ARMFastISel::FastEmitInst_(unsigned MachineInstOpcode, 291 const TargetRegisterClass* RC) { 292 unsigned ResultReg = createResultReg(RC); 293 const MCInstrDesc &II = TII.get(MachineInstOpcode); 294 295 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)); 296 return ResultReg; 297} 298 299unsigned ARMFastISel::FastEmitInst_r(unsigned MachineInstOpcode, 300 const TargetRegisterClass *RC, 301 unsigned Op0, bool Op0IsKill) { 302 unsigned ResultReg = createResultReg(RC); 303 const MCInstrDesc &II = TII.get(MachineInstOpcode); 304 305 if (II.getNumDefs() >= 1) { 306 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 307 .addReg(Op0, Op0IsKill * RegState::Kill)); 308 } else { 309 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 310 .addReg(Op0, Op0IsKill * RegState::Kill)); 311 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 312 TII.get(TargetOpcode::COPY), ResultReg) 313 .addReg(II.ImplicitDefs[0])); 314 } 315 return ResultReg; 316} 317 318unsigned ARMFastISel::FastEmitInst_rr(unsigned MachineInstOpcode, 319 const TargetRegisterClass *RC, 320 unsigned Op0, bool Op0IsKill, 321 unsigned Op1, bool Op1IsKill) { 322 unsigned ResultReg = createResultReg(RC); 323 const MCInstrDesc &II = TII.get(MachineInstOpcode); 324 325 if (II.getNumDefs() >= 1) { 326 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 327 .addReg(Op0, Op0IsKill * RegState::Kill) 328 .addReg(Op1, Op1IsKill * RegState::Kill)); 329 } else { 330 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 331 .addReg(Op0, Op0IsKill * RegState::Kill) 332 .addReg(Op1, Op1IsKill * RegState::Kill)); 333 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 334 TII.get(TargetOpcode::COPY), ResultReg) 335 .addReg(II.ImplicitDefs[0])); 336 } 337 return ResultReg; 338} 339 340unsigned ARMFastISel::FastEmitInst_rrr(unsigned MachineInstOpcode, 341 const TargetRegisterClass *RC, 342 unsigned Op0, bool Op0IsKill, 343 unsigned Op1, bool Op1IsKill, 344 unsigned Op2, bool Op2IsKill) { 345 unsigned ResultReg = createResultReg(RC); 346 const MCInstrDesc &II = TII.get(MachineInstOpcode); 347 348 if (II.getNumDefs() >= 1) { 349 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 350 .addReg(Op0, Op0IsKill * RegState::Kill) 351 .addReg(Op1, Op1IsKill * RegState::Kill) 352 .addReg(Op2, Op2IsKill * RegState::Kill)); 353 } else { 354 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 355 .addReg(Op0, Op0IsKill * RegState::Kill) 356 .addReg(Op1, Op1IsKill * RegState::Kill) 357 .addReg(Op2, Op2IsKill * RegState::Kill)); 358 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 359 TII.get(TargetOpcode::COPY), ResultReg) 360 .addReg(II.ImplicitDefs[0])); 361 } 362 return ResultReg; 363} 364 365unsigned ARMFastISel::FastEmitInst_ri(unsigned MachineInstOpcode, 366 const TargetRegisterClass *RC, 367 unsigned Op0, bool Op0IsKill, 368 uint64_t Imm) { 369 unsigned ResultReg = createResultReg(RC); 370 const MCInstrDesc &II = TII.get(MachineInstOpcode); 371 372 if (II.getNumDefs() >= 1) { 373 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 374 .addReg(Op0, Op0IsKill * RegState::Kill) 375 .addImm(Imm)); 376 } else { 377 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 378 .addReg(Op0, Op0IsKill * RegState::Kill) 379 .addImm(Imm)); 380 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 381 TII.get(TargetOpcode::COPY), ResultReg) 382 .addReg(II.ImplicitDefs[0])); 383 } 384 return ResultReg; 385} 386 387unsigned ARMFastISel::FastEmitInst_rf(unsigned MachineInstOpcode, 388 const TargetRegisterClass *RC, 389 unsigned Op0, bool Op0IsKill, 390 const ConstantFP *FPImm) { 391 unsigned ResultReg = createResultReg(RC); 392 const MCInstrDesc &II = TII.get(MachineInstOpcode); 393 394 if (II.getNumDefs() >= 1) { 395 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 396 .addReg(Op0, Op0IsKill * RegState::Kill) 397 .addFPImm(FPImm)); 398 } else { 399 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 400 .addReg(Op0, Op0IsKill * RegState::Kill) 401 .addFPImm(FPImm)); 402 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 403 TII.get(TargetOpcode::COPY), ResultReg) 404 .addReg(II.ImplicitDefs[0])); 405 } 406 return ResultReg; 407} 408 409unsigned ARMFastISel::FastEmitInst_rri(unsigned MachineInstOpcode, 410 const TargetRegisterClass *RC, 411 unsigned Op0, bool Op0IsKill, 412 unsigned Op1, bool Op1IsKill, 413 uint64_t Imm) { 414 unsigned ResultReg = createResultReg(RC); 415 const MCInstrDesc &II = TII.get(MachineInstOpcode); 416 417 if (II.getNumDefs() >= 1) { 418 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 419 .addReg(Op0, Op0IsKill * RegState::Kill) 420 .addReg(Op1, Op1IsKill * RegState::Kill) 421 .addImm(Imm)); 422 } else { 423 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 424 .addReg(Op0, Op0IsKill * RegState::Kill) 425 .addReg(Op1, Op1IsKill * RegState::Kill) 426 .addImm(Imm)); 427 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 428 TII.get(TargetOpcode::COPY), ResultReg) 429 .addReg(II.ImplicitDefs[0])); 430 } 431 return ResultReg; 432} 433 434unsigned ARMFastISel::FastEmitInst_i(unsigned MachineInstOpcode, 435 const TargetRegisterClass *RC, 436 uint64_t Imm) { 437 unsigned ResultReg = createResultReg(RC); 438 const MCInstrDesc &II = TII.get(MachineInstOpcode); 439 440 if (II.getNumDefs() >= 1) { 441 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 442 .addImm(Imm)); 443 } else { 444 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 445 .addImm(Imm)); 446 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 447 TII.get(TargetOpcode::COPY), ResultReg) 448 .addReg(II.ImplicitDefs[0])); 449 } 450 return ResultReg; 451} 452 453unsigned ARMFastISel::FastEmitInst_ii(unsigned MachineInstOpcode, 454 const TargetRegisterClass *RC, 455 uint64_t Imm1, uint64_t Imm2) { 456 unsigned ResultReg = createResultReg(RC); 457 const MCInstrDesc &II = TII.get(MachineInstOpcode); 458 459 if (II.getNumDefs() >= 1) { 460 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 461 .addImm(Imm1).addImm(Imm2)); 462 } else { 463 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 464 .addImm(Imm1).addImm(Imm2)); 465 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 466 TII.get(TargetOpcode::COPY), 467 ResultReg) 468 .addReg(II.ImplicitDefs[0])); 469 } 470 return ResultReg; 471} 472 473unsigned ARMFastISel::FastEmitInst_extractsubreg(MVT RetVT, 474 unsigned Op0, bool Op0IsKill, 475 uint32_t Idx) { 476 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT)); 477 assert(TargetRegisterInfo::isVirtualRegister(Op0) && 478 "Cannot yet extract from physregs"); 479 480 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 481 DL, TII.get(TargetOpcode::COPY), ResultReg) 482 .addReg(Op0, getKillRegState(Op0IsKill), Idx)); 483 return ResultReg; 484} 485 486// TODO: Don't worry about 64-bit now, but when this is fixed remove the 487// checks from the various callers. 488unsigned ARMFastISel::ARMMoveToFPReg(EVT VT, unsigned SrcReg) { 489 if (VT == MVT::f64) return 0; 490 491 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 492 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 493 TII.get(ARM::VMOVSR), MoveReg) 494 .addReg(SrcReg)); 495 return MoveReg; 496} 497 498unsigned ARMFastISel::ARMMoveToIntReg(EVT VT, unsigned SrcReg) { 499 if (VT == MVT::i64) return 0; 500 501 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 502 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 503 TII.get(ARM::VMOVRS), MoveReg) 504 .addReg(SrcReg)); 505 return MoveReg; 506} 507 508// For double width floating point we need to materialize two constants 509// (the high and the low) into integer registers then use a move to get 510// the combined constant into an FP reg. 511unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, EVT VT) { 512 const APFloat Val = CFP->getValueAPF(); 513 bool is64bit = VT == MVT::f64; 514 515 // This checks to see if we can use VFP3 instructions to materialize 516 // a constant, otherwise we have to go through the constant pool. 517 if (TLI.isFPImmLegal(Val, VT)) { 518 int Imm; 519 unsigned Opc; 520 if (is64bit) { 521 Imm = ARM_AM::getFP64Imm(Val); 522 Opc = ARM::FCONSTD; 523 } else { 524 Imm = ARM_AM::getFP32Imm(Val); 525 Opc = ARM::FCONSTS; 526 } 527 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 528 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 529 DestReg) 530 .addImm(Imm)); 531 return DestReg; 532 } 533 534 // Require VFP2 for loading fp constants. 535 if (!Subtarget->hasVFP2()) return false; 536 537 // MachineConstantPool wants an explicit alignment. 538 unsigned Align = TD.getPrefTypeAlignment(CFP->getType()); 539 if (Align == 0) { 540 // TODO: Figure out if this is correct. 541 Align = TD.getTypeAllocSize(CFP->getType()); 542 } 543 unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align); 544 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 545 unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS; 546 547 // The extra reg is for addrmode5. 548 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 549 DestReg) 550 .addConstantPoolIndex(Idx) 551 .addReg(0)); 552 return DestReg; 553} 554 555unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, EVT VT) { 556 557 if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1) 558 return false; 559 560 // If we can do this in a single instruction without a constant pool entry 561 // do so now. 562 const ConstantInt *CI = cast<ConstantInt>(C); 563 if (Subtarget->hasV6T2Ops() && isUInt<16>(CI->getZExtValue())) { 564 unsigned Opc = isThumb2 ? ARM::t2MOVi16 : ARM::MOVi16; 565 unsigned ImmReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 566 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 567 TII.get(Opc), ImmReg) 568 .addImm(CI->getZExtValue())); 569 return ImmReg; 570 } 571 572 // Use MVN to emit negative constants. 573 if (VT == MVT::i32 && Subtarget->hasV6T2Ops() && CI->isNegative()) { 574 unsigned Imm = (unsigned)~(CI->getSExtValue()); 575 bool UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 576 (ARM_AM::getSOImmVal(Imm) != -1); 577 if (UseImm) { 578 unsigned Opc = isThumb2 ? ARM::t2MVNi : ARM::MVNi; 579 unsigned ImmReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 580 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 581 TII.get(Opc), ImmReg) 582 .addImm(Imm)); 583 return ImmReg; 584 } 585 } 586 587 // Load from constant pool. For now 32-bit only. 588 if (VT != MVT::i32) 589 return false; 590 591 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 592 593 // MachineConstantPool wants an explicit alignment. 594 unsigned Align = TD.getPrefTypeAlignment(C->getType()); 595 if (Align == 0) { 596 // TODO: Figure out if this is correct. 597 Align = TD.getTypeAllocSize(C->getType()); 598 } 599 unsigned Idx = MCP.getConstantPoolIndex(C, Align); 600 601 if (isThumb2) 602 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 603 TII.get(ARM::t2LDRpci), DestReg) 604 .addConstantPoolIndex(Idx)); 605 else 606 // The extra immediate is for addrmode2. 607 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 608 TII.get(ARM::LDRcp), DestReg) 609 .addConstantPoolIndex(Idx) 610 .addImm(0)); 611 612 return DestReg; 613} 614 615unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, EVT VT) { 616 // For now 32-bit only. 617 if (VT != MVT::i32) return 0; 618 619 Reloc::Model RelocM = TM.getRelocationModel(); 620 621 // TODO: Need more magic for ARM PIC. 622 if (!isThumb2 && (RelocM == Reloc::PIC_)) return 0; 623 624 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 625 626 // Use movw+movt when possible, it avoids constant pool entries. 627 // Darwin targets don't support movt with Reloc::Static, see 628 // ARMTargetLowering::LowerGlobalAddressDarwin. Other targets only support 629 // static movt relocations. 630 if (Subtarget->useMovt() && 631 Subtarget->isTargetDarwin() == (RelocM != Reloc::Static)) { 632 unsigned Opc; 633 switch (RelocM) { 634 case Reloc::PIC_: 635 Opc = isThumb2 ? ARM::t2MOV_ga_pcrel : ARM::MOV_ga_pcrel; 636 break; 637 case Reloc::DynamicNoPIC: 638 Opc = isThumb2 ? ARM::t2MOV_ga_dyn : ARM::MOV_ga_dyn; 639 break; 640 default: 641 Opc = isThumb2 ? ARM::t2MOVi32imm : ARM::MOVi32imm; 642 break; 643 } 644 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 645 DestReg).addGlobalAddress(GV)); 646 } else { 647 // MachineConstantPool wants an explicit alignment. 648 unsigned Align = TD.getPrefTypeAlignment(GV->getType()); 649 if (Align == 0) { 650 // TODO: Figure out if this is correct. 651 Align = TD.getTypeAllocSize(GV->getType()); 652 } 653 654 // Grab index. 655 unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : 656 (Subtarget->isThumb() ? 4 : 8); 657 unsigned Id = AFI->createPICLabelUId(); 658 ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create(GV, Id, 659 ARMCP::CPValue, 660 PCAdj); 661 unsigned Idx = MCP.getConstantPoolIndex(CPV, Align); 662 663 // Load value. 664 MachineInstrBuilder MIB; 665 if (isThumb2) { 666 unsigned Opc = (RelocM!=Reloc::PIC_) ? ARM::t2LDRpci : ARM::t2LDRpci_pic; 667 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg) 668 .addConstantPoolIndex(Idx); 669 if (RelocM == Reloc::PIC_) 670 MIB.addImm(Id); 671 } else { 672 // The extra immediate is for addrmode2. 673 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRcp), 674 DestReg) 675 .addConstantPoolIndex(Idx) 676 .addImm(0); 677 } 678 AddOptionalDefs(MIB); 679 } 680 681 if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) { 682 MachineInstrBuilder MIB; 683 unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT)); 684 if (isThumb2) 685 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 686 TII.get(ARM::t2LDRi12), NewDestReg) 687 .addReg(DestReg) 688 .addImm(0); 689 else 690 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRi12), 691 NewDestReg) 692 .addReg(DestReg) 693 .addImm(0); 694 DestReg = NewDestReg; 695 AddOptionalDefs(MIB); 696 } 697 698 return DestReg; 699} 700 701unsigned ARMFastISel::TargetMaterializeConstant(const Constant *C) { 702 EVT VT = TLI.getValueType(C->getType(), true); 703 704 // Only handle simple types. 705 if (!VT.isSimple()) return 0; 706 707 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) 708 return ARMMaterializeFP(CFP, VT); 709 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C)) 710 return ARMMaterializeGV(GV, VT); 711 else if (isa<ConstantInt>(C)) 712 return ARMMaterializeInt(C, VT); 713 714 return 0; 715} 716 717// TODO: unsigned ARMFastISel::TargetMaterializeFloatZero(const ConstantFP *CF); 718 719unsigned ARMFastISel::TargetMaterializeAlloca(const AllocaInst *AI) { 720 // Don't handle dynamic allocas. 721 if (!FuncInfo.StaticAllocaMap.count(AI)) return 0; 722 723 MVT VT; 724 if (!isLoadTypeLegal(AI->getType(), VT)) return 0; 725 726 DenseMap<const AllocaInst*, int>::iterator SI = 727 FuncInfo.StaticAllocaMap.find(AI); 728 729 // This will get lowered later into the correct offsets and registers 730 // via rewriteXFrameIndex. 731 if (SI != FuncInfo.StaticAllocaMap.end()) { 732 const TargetRegisterClass* RC = TLI.getRegClassFor(VT); 733 unsigned ResultReg = createResultReg(RC); 734 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri; 735 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 736 TII.get(Opc), ResultReg) 737 .addFrameIndex(SI->second) 738 .addImm(0)); 739 return ResultReg; 740 } 741 742 return 0; 743} 744 745bool ARMFastISel::isTypeLegal(Type *Ty, MVT &VT) { 746 EVT evt = TLI.getValueType(Ty, true); 747 748 // Only handle simple types. 749 if (evt == MVT::Other || !evt.isSimple()) return false; 750 VT = evt.getSimpleVT(); 751 752 // Handle all legal types, i.e. a register that will directly hold this 753 // value. 754 return TLI.isTypeLegal(VT); 755} 756 757bool ARMFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) { 758 if (isTypeLegal(Ty, VT)) return true; 759 760 // If this is a type than can be sign or zero-extended to a basic operation 761 // go ahead and accept it now. 762 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16) 763 return true; 764 765 return false; 766} 767 768// Computes the address to get to an object. 769bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) { 770 // Some boilerplate from the X86 FastISel. 771 const User *U = NULL; 772 unsigned Opcode = Instruction::UserOp1; 773 if (const Instruction *I = dyn_cast<Instruction>(Obj)) { 774 // Don't walk into other basic blocks unless the object is an alloca from 775 // another block, otherwise it may not have a virtual register assigned. 776 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) || 777 FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) { 778 Opcode = I->getOpcode(); 779 U = I; 780 } 781 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) { 782 Opcode = C->getOpcode(); 783 U = C; 784 } 785 786 if (PointerType *Ty = dyn_cast<PointerType>(Obj->getType())) 787 if (Ty->getAddressSpace() > 255) 788 // Fast instruction selection doesn't support the special 789 // address spaces. 790 return false; 791 792 switch (Opcode) { 793 default: 794 break; 795 case Instruction::BitCast: { 796 // Look through bitcasts. 797 return ARMComputeAddress(U->getOperand(0), Addr); 798 } 799 case Instruction::IntToPtr: { 800 // Look past no-op inttoptrs. 801 if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy()) 802 return ARMComputeAddress(U->getOperand(0), Addr); 803 break; 804 } 805 case Instruction::PtrToInt: { 806 // Look past no-op ptrtoints. 807 if (TLI.getValueType(U->getType()) == TLI.getPointerTy()) 808 return ARMComputeAddress(U->getOperand(0), Addr); 809 break; 810 } 811 case Instruction::GetElementPtr: { 812 Address SavedAddr = Addr; 813 int TmpOffset = Addr.Offset; 814 815 // Iterate through the GEP folding the constants into offsets where 816 // we can. 817 gep_type_iterator GTI = gep_type_begin(U); 818 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); 819 i != e; ++i, ++GTI) { 820 const Value *Op = *i; 821 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 822 const StructLayout *SL = TD.getStructLayout(STy); 823 unsigned Idx = cast<ConstantInt>(Op)->getZExtValue(); 824 TmpOffset += SL->getElementOffset(Idx); 825 } else { 826 uint64_t S = TD.getTypeAllocSize(GTI.getIndexedType()); 827 for (;;) { 828 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) { 829 // Constant-offset addressing. 830 TmpOffset += CI->getSExtValue() * S; 831 break; 832 } 833 if (isa<AddOperator>(Op) && 834 (!isa<Instruction>(Op) || 835 FuncInfo.MBBMap[cast<Instruction>(Op)->getParent()] 836 == FuncInfo.MBB) && 837 isa<ConstantInt>(cast<AddOperator>(Op)->getOperand(1))) { 838 // An add (in the same block) with a constant operand. Fold the 839 // constant. 840 ConstantInt *CI = 841 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1)); 842 TmpOffset += CI->getSExtValue() * S; 843 // Iterate on the other operand. 844 Op = cast<AddOperator>(Op)->getOperand(0); 845 continue; 846 } 847 // Unsupported 848 goto unsupported_gep; 849 } 850 } 851 } 852 853 // Try to grab the base operand now. 854 Addr.Offset = TmpOffset; 855 if (ARMComputeAddress(U->getOperand(0), Addr)) return true; 856 857 // We failed, restore everything and try the other options. 858 Addr = SavedAddr; 859 860 unsupported_gep: 861 break; 862 } 863 case Instruction::Alloca: { 864 const AllocaInst *AI = cast<AllocaInst>(Obj); 865 DenseMap<const AllocaInst*, int>::iterator SI = 866 FuncInfo.StaticAllocaMap.find(AI); 867 if (SI != FuncInfo.StaticAllocaMap.end()) { 868 Addr.BaseType = Address::FrameIndexBase; 869 Addr.Base.FI = SI->second; 870 return true; 871 } 872 break; 873 } 874 } 875 876 // Try to get this in a register if nothing else has worked. 877 if (Addr.Base.Reg == 0) Addr.Base.Reg = getRegForValue(Obj); 878 return Addr.Base.Reg != 0; 879} 880 881void ARMFastISel::ARMSimplifyAddress(Address &Addr, EVT VT, bool useAM3) { 882 883 assert(VT.isSimple() && "Non-simple types are invalid here!"); 884 885 bool needsLowering = false; 886 switch (VT.getSimpleVT().SimpleTy) { 887 default: llvm_unreachable("Unhandled load/store type!"); 888 case MVT::i1: 889 case MVT::i8: 890 case MVT::i16: 891 case MVT::i32: 892 if (!useAM3) { 893 // Integer loads/stores handle 12-bit offsets. 894 needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset); 895 // Handle negative offsets. 896 if (needsLowering && isThumb2) 897 needsLowering = !(Subtarget->hasV6T2Ops() && Addr.Offset < 0 && 898 Addr.Offset > -256); 899 } else { 900 // ARM halfword load/stores and signed byte loads use +/-imm8 offsets. 901 needsLowering = (Addr.Offset > 255 || Addr.Offset < -255); 902 } 903 break; 904 case MVT::f32: 905 case MVT::f64: 906 // Floating point operands handle 8-bit offsets. 907 needsLowering = ((Addr.Offset & 0xff) != Addr.Offset); 908 break; 909 } 910 911 // If this is a stack pointer and the offset needs to be simplified then 912 // put the alloca address into a register, set the base type back to 913 // register and continue. This should almost never happen. 914 if (needsLowering && Addr.BaseType == Address::FrameIndexBase) { 915 const TargetRegisterClass *RC = isThumb2 ? 916 (const TargetRegisterClass*)&ARM::tGPRRegClass : 917 (const TargetRegisterClass*)&ARM::GPRRegClass; 918 unsigned ResultReg = createResultReg(RC); 919 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri; 920 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 921 TII.get(Opc), ResultReg) 922 .addFrameIndex(Addr.Base.FI) 923 .addImm(0)); 924 Addr.Base.Reg = ResultReg; 925 Addr.BaseType = Address::RegBase; 926 } 927 928 // Since the offset is too large for the load/store instruction 929 // get the reg+offset into a register. 930 if (needsLowering) { 931 Addr.Base.Reg = FastEmit_ri_(MVT::i32, ISD::ADD, Addr.Base.Reg, 932 /*Op0IsKill*/false, Addr.Offset, MVT::i32); 933 Addr.Offset = 0; 934 } 935} 936 937void ARMFastISel::AddLoadStoreOperands(EVT VT, Address &Addr, 938 const MachineInstrBuilder &MIB, 939 unsigned Flags, bool useAM3) { 940 // addrmode5 output depends on the selection dag addressing dividing the 941 // offset by 4 that it then later multiplies. Do this here as well. 942 if (VT.getSimpleVT().SimpleTy == MVT::f32 || 943 VT.getSimpleVT().SimpleTy == MVT::f64) 944 Addr.Offset /= 4; 945 946 // Frame base works a bit differently. Handle it separately. 947 if (Addr.BaseType == Address::FrameIndexBase) { 948 int FI = Addr.Base.FI; 949 int Offset = Addr.Offset; 950 MachineMemOperand *MMO = 951 FuncInfo.MF->getMachineMemOperand( 952 MachinePointerInfo::getFixedStack(FI, Offset), 953 Flags, 954 MFI.getObjectSize(FI), 955 MFI.getObjectAlignment(FI)); 956 // Now add the rest of the operands. 957 MIB.addFrameIndex(FI); 958 959 // ARM halfword load/stores and signed byte loads need an additional 960 // operand. 961 if (useAM3) { 962 signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset; 963 MIB.addReg(0); 964 MIB.addImm(Imm); 965 } else { 966 MIB.addImm(Addr.Offset); 967 } 968 MIB.addMemOperand(MMO); 969 } else { 970 // Now add the rest of the operands. 971 MIB.addReg(Addr.Base.Reg); 972 973 // ARM halfword load/stores and signed byte loads need an additional 974 // operand. 975 if (useAM3) { 976 signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset; 977 MIB.addReg(0); 978 MIB.addImm(Imm); 979 } else { 980 MIB.addImm(Addr.Offset); 981 } 982 } 983 AddOptionalDefs(MIB); 984} 985 986bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr, 987 unsigned Alignment, bool isZExt, bool allocReg) { 988 assert(VT.isSimple() && "Non-simple types are invalid here!"); 989 unsigned Opc; 990 bool useAM3 = false; 991 bool needVMOV = false; 992 const TargetRegisterClass *RC; 993 switch (VT.getSimpleVT().SimpleTy) { 994 // This is mostly going to be Neon/vector support. 995 default: return false; 996 case MVT::i1: 997 case MVT::i8: 998 if (isThumb2) { 999 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1000 Opc = isZExt ? ARM::t2LDRBi8 : ARM::t2LDRSBi8; 1001 else 1002 Opc = isZExt ? ARM::t2LDRBi12 : ARM::t2LDRSBi12; 1003 } else { 1004 if (isZExt) { 1005 Opc = ARM::LDRBi12; 1006 } else { 1007 Opc = ARM::LDRSB; 1008 useAM3 = true; 1009 } 1010 } 1011 RC = &ARM::GPRRegClass; 1012 break; 1013 case MVT::i16: 1014 if (isThumb2) { 1015 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1016 Opc = isZExt ? ARM::t2LDRHi8 : ARM::t2LDRSHi8; 1017 else 1018 Opc = isZExt ? ARM::t2LDRHi12 : ARM::t2LDRSHi12; 1019 } else { 1020 Opc = isZExt ? ARM::LDRH : ARM::LDRSH; 1021 useAM3 = true; 1022 } 1023 RC = &ARM::GPRRegClass; 1024 break; 1025 case MVT::i32: 1026 if (isThumb2) { 1027 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1028 Opc = ARM::t2LDRi8; 1029 else 1030 Opc = ARM::t2LDRi12; 1031 } else { 1032 Opc = ARM::LDRi12; 1033 } 1034 RC = &ARM::GPRRegClass; 1035 break; 1036 case MVT::f32: 1037 if (!Subtarget->hasVFP2()) return false; 1038 // Unaligned loads need special handling. Floats require word-alignment. 1039 if (Alignment && Alignment < 4) { 1040 needVMOV = true; 1041 VT = MVT::i32; 1042 Opc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12; 1043 RC = &ARM::GPRRegClass; 1044 } else { 1045 Opc = ARM::VLDRS; 1046 RC = TLI.getRegClassFor(VT); 1047 } 1048 break; 1049 case MVT::f64: 1050 if (!Subtarget->hasVFP2()) return false; 1051 // FIXME: Unaligned loads need special handling. Doublewords require 1052 // word-alignment. 1053 if (Alignment && Alignment < 4) 1054 return false; 1055 1056 Opc = ARM::VLDRD; 1057 RC = TLI.getRegClassFor(VT); 1058 break; 1059 } 1060 // Simplify this down to something we can handle. 1061 ARMSimplifyAddress(Addr, VT, useAM3); 1062 1063 // Create the base instruction, then add the operands. 1064 if (allocReg) 1065 ResultReg = createResultReg(RC); 1066 assert (ResultReg > 255 && "Expected an allocated virtual register."); 1067 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1068 TII.get(Opc), ResultReg); 1069 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOLoad, useAM3); 1070 1071 // If we had an unaligned load of a float we've converted it to an regular 1072 // load. Now we must move from the GRP to the FP register. 1073 if (needVMOV) { 1074 unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::f32)); 1075 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1076 TII.get(ARM::VMOVSR), MoveReg) 1077 .addReg(ResultReg)); 1078 ResultReg = MoveReg; 1079 } 1080 return true; 1081} 1082 1083bool ARMFastISel::SelectLoad(const Instruction *I) { 1084 // Atomic loads need special handling. 1085 if (cast<LoadInst>(I)->isAtomic()) 1086 return false; 1087 1088 // Verify we have a legal type before going any further. 1089 MVT VT; 1090 if (!isLoadTypeLegal(I->getType(), VT)) 1091 return false; 1092 1093 // See if we can handle this address. 1094 Address Addr; 1095 if (!ARMComputeAddress(I->getOperand(0), Addr)) return false; 1096 1097 unsigned ResultReg; 1098 if (!ARMEmitLoad(VT, ResultReg, Addr, cast<LoadInst>(I)->getAlignment())) 1099 return false; 1100 UpdateValueMap(I, ResultReg); 1101 return true; 1102} 1103 1104bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr, 1105 unsigned Alignment) { 1106 unsigned StrOpc; 1107 bool useAM3 = false; 1108 switch (VT.getSimpleVT().SimpleTy) { 1109 // This is mostly going to be Neon/vector support. 1110 default: return false; 1111 case MVT::i1: { 1112 unsigned Res = createResultReg(isThumb2 ? 1113 (const TargetRegisterClass*)&ARM::tGPRRegClass : 1114 (const TargetRegisterClass*)&ARM::GPRRegClass); 1115 unsigned Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri; 1116 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1117 TII.get(Opc), Res) 1118 .addReg(SrcReg).addImm(1)); 1119 SrcReg = Res; 1120 } // Fallthrough here. 1121 case MVT::i8: 1122 if (isThumb2) { 1123 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1124 StrOpc = ARM::t2STRBi8; 1125 else 1126 StrOpc = ARM::t2STRBi12; 1127 } else { 1128 StrOpc = ARM::STRBi12; 1129 } 1130 break; 1131 case MVT::i16: 1132 if (isThumb2) { 1133 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1134 StrOpc = ARM::t2STRHi8; 1135 else 1136 StrOpc = ARM::t2STRHi12; 1137 } else { 1138 StrOpc = ARM::STRH; 1139 useAM3 = true; 1140 } 1141 break; 1142 case MVT::i32: 1143 if (isThumb2) { 1144 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1145 StrOpc = ARM::t2STRi8; 1146 else 1147 StrOpc = ARM::t2STRi12; 1148 } else { 1149 StrOpc = ARM::STRi12; 1150 } 1151 break; 1152 case MVT::f32: 1153 if (!Subtarget->hasVFP2()) return false; 1154 // Unaligned stores need special handling. Floats require word-alignment. 1155 if (Alignment && Alignment < 4) { 1156 unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 1157 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1158 TII.get(ARM::VMOVRS), MoveReg) 1159 .addReg(SrcReg)); 1160 SrcReg = MoveReg; 1161 VT = MVT::i32; 1162 StrOpc = isThumb2 ? ARM::t2STRi12 : ARM::STRi12; 1163 } else { 1164 StrOpc = ARM::VSTRS; 1165 } 1166 break; 1167 case MVT::f64: 1168 if (!Subtarget->hasVFP2()) return false; 1169 // FIXME: Unaligned stores need special handling. Doublewords require 1170 // word-alignment. 1171 if (Alignment && Alignment < 4) 1172 return false; 1173 1174 StrOpc = ARM::VSTRD; 1175 break; 1176 } 1177 // Simplify this down to something we can handle. 1178 ARMSimplifyAddress(Addr, VT, useAM3); 1179 1180 // Create the base instruction, then add the operands. 1181 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1182 TII.get(StrOpc)) 1183 .addReg(SrcReg); 1184 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOStore, useAM3); 1185 return true; 1186} 1187 1188bool ARMFastISel::SelectStore(const Instruction *I) { 1189 Value *Op0 = I->getOperand(0); 1190 unsigned SrcReg = 0; 1191 1192 // Atomic stores need special handling. 1193 if (cast<StoreInst>(I)->isAtomic()) 1194 return false; 1195 1196 // Verify we have a legal type before going any further. 1197 MVT VT; 1198 if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT)) 1199 return false; 1200 1201 // Get the value to be stored into a register. 1202 SrcReg = getRegForValue(Op0); 1203 if (SrcReg == 0) return false; 1204 1205 // See if we can handle this address. 1206 Address Addr; 1207 if (!ARMComputeAddress(I->getOperand(1), Addr)) 1208 return false; 1209 1210 if (!ARMEmitStore(VT, SrcReg, Addr, cast<StoreInst>(I)->getAlignment())) 1211 return false; 1212 return true; 1213} 1214 1215static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred) { 1216 switch (Pred) { 1217 // Needs two compares... 1218 case CmpInst::FCMP_ONE: 1219 case CmpInst::FCMP_UEQ: 1220 default: 1221 // AL is our "false" for now. The other two need more compares. 1222 return ARMCC::AL; 1223 case CmpInst::ICMP_EQ: 1224 case CmpInst::FCMP_OEQ: 1225 return ARMCC::EQ; 1226 case CmpInst::ICMP_SGT: 1227 case CmpInst::FCMP_OGT: 1228 return ARMCC::GT; 1229 case CmpInst::ICMP_SGE: 1230 case CmpInst::FCMP_OGE: 1231 return ARMCC::GE; 1232 case CmpInst::ICMP_UGT: 1233 case CmpInst::FCMP_UGT: 1234 return ARMCC::HI; 1235 case CmpInst::FCMP_OLT: 1236 return ARMCC::MI; 1237 case CmpInst::ICMP_ULE: 1238 case CmpInst::FCMP_OLE: 1239 return ARMCC::LS; 1240 case CmpInst::FCMP_ORD: 1241 return ARMCC::VC; 1242 case CmpInst::FCMP_UNO: 1243 return ARMCC::VS; 1244 case CmpInst::FCMP_UGE: 1245 return ARMCC::PL; 1246 case CmpInst::ICMP_SLT: 1247 case CmpInst::FCMP_ULT: 1248 return ARMCC::LT; 1249 case CmpInst::ICMP_SLE: 1250 case CmpInst::FCMP_ULE: 1251 return ARMCC::LE; 1252 case CmpInst::FCMP_UNE: 1253 case CmpInst::ICMP_NE: 1254 return ARMCC::NE; 1255 case CmpInst::ICMP_UGE: 1256 return ARMCC::HS; 1257 case CmpInst::ICMP_ULT: 1258 return ARMCC::LO; 1259 } 1260} 1261 1262bool ARMFastISel::SelectBranch(const Instruction *I) { 1263 const BranchInst *BI = cast<BranchInst>(I); 1264 MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)]; 1265 MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)]; 1266 1267 // Simple branch support. 1268 1269 // If we can, avoid recomputing the compare - redoing it could lead to wonky 1270 // behavior. 1271 if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) { 1272 if (CI->hasOneUse() && (CI->getParent() == I->getParent())) { 1273 1274 // Get the compare predicate. 1275 // Try to take advantage of fallthrough opportunities. 1276 CmpInst::Predicate Predicate = CI->getPredicate(); 1277 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1278 std::swap(TBB, FBB); 1279 Predicate = CmpInst::getInversePredicate(Predicate); 1280 } 1281 1282 ARMCC::CondCodes ARMPred = getComparePred(Predicate); 1283 1284 // We may not handle every CC for now. 1285 if (ARMPred == ARMCC::AL) return false; 1286 1287 // Emit the compare. 1288 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) 1289 return false; 1290 1291 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1292 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1293 .addMBB(TBB).addImm(ARMPred).addReg(ARM::CPSR); 1294 FastEmitBranch(FBB, DL); 1295 FuncInfo.MBB->addSuccessor(TBB); 1296 return true; 1297 } 1298 } else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) { 1299 MVT SourceVT; 1300 if (TI->hasOneUse() && TI->getParent() == I->getParent() && 1301 (isLoadTypeLegal(TI->getOperand(0)->getType(), SourceVT))) { 1302 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; 1303 unsigned OpReg = getRegForValue(TI->getOperand(0)); 1304 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1305 TII.get(TstOpc)) 1306 .addReg(OpReg).addImm(1)); 1307 1308 unsigned CCMode = ARMCC::NE; 1309 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1310 std::swap(TBB, FBB); 1311 CCMode = ARMCC::EQ; 1312 } 1313 1314 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1315 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1316 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); 1317 1318 FastEmitBranch(FBB, DL); 1319 FuncInfo.MBB->addSuccessor(TBB); 1320 return true; 1321 } 1322 } else if (const ConstantInt *CI = 1323 dyn_cast<ConstantInt>(BI->getCondition())) { 1324 uint64_t Imm = CI->getZExtValue(); 1325 MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB; 1326 FastEmitBranch(Target, DL); 1327 return true; 1328 } 1329 1330 unsigned CmpReg = getRegForValue(BI->getCondition()); 1331 if (CmpReg == 0) return false; 1332 1333 // We've been divorced from our compare! Our block was split, and 1334 // now our compare lives in a predecessor block. We musn't 1335 // re-compare here, as the children of the compare aren't guaranteed 1336 // live across the block boundary (we *could* check for this). 1337 // Regardless, the compare has been done in the predecessor block, 1338 // and it left a value for us in a virtual register. Ergo, we test 1339 // the one-bit value left in the virtual register. 1340 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; 1341 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TstOpc)) 1342 .addReg(CmpReg).addImm(1)); 1343 1344 unsigned CCMode = ARMCC::NE; 1345 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1346 std::swap(TBB, FBB); 1347 CCMode = ARMCC::EQ; 1348 } 1349 1350 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1351 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1352 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); 1353 FastEmitBranch(FBB, DL); 1354 FuncInfo.MBB->addSuccessor(TBB); 1355 return true; 1356} 1357 1358bool ARMFastISel::SelectIndirectBr(const Instruction *I) { 1359 unsigned AddrReg = getRegForValue(I->getOperand(0)); 1360 if (AddrReg == 0) return false; 1361 1362 unsigned Opc = isThumb2 ? ARM::tBRIND : ARM::BX; 1363 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc)) 1364 .addReg(AddrReg)); 1365 return true; 1366} 1367 1368bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, 1369 bool isZExt) { 1370 Type *Ty = Src1Value->getType(); 1371 EVT SrcVT = TLI.getValueType(Ty, true); 1372 if (!SrcVT.isSimple()) return false; 1373 1374 bool isFloat = (Ty->isFloatTy() || Ty->isDoubleTy()); 1375 if (isFloat && !Subtarget->hasVFP2()) 1376 return false; 1377 1378 // Check to see if the 2nd operand is a constant that we can encode directly 1379 // in the compare. 1380 int Imm = 0; 1381 bool UseImm = false; 1382 bool isNegativeImm = false; 1383 // FIXME: At -O0 we don't have anything that canonicalizes operand order. 1384 // Thus, Src1Value may be a ConstantInt, but we're missing it. 1385 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(Src2Value)) { 1386 if (SrcVT == MVT::i32 || SrcVT == MVT::i16 || SrcVT == MVT::i8 || 1387 SrcVT == MVT::i1) { 1388 const APInt &CIVal = ConstInt->getValue(); 1389 Imm = (isZExt) ? (int)CIVal.getZExtValue() : (int)CIVal.getSExtValue(); 1390 // For INT_MIN/LONG_MIN (i.e., 0x80000000) we need to use a cmp, rather 1391 // then a cmn, because there is no way to represent 2147483648 as a 1392 // signed 32-bit int. 1393 if (Imm < 0 && Imm != (int)0x80000000) { 1394 isNegativeImm = true; 1395 Imm = -Imm; 1396 } 1397 UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 1398 (ARM_AM::getSOImmVal(Imm) != -1); 1399 } 1400 } else if (const ConstantFP *ConstFP = dyn_cast<ConstantFP>(Src2Value)) { 1401 if (SrcVT == MVT::f32 || SrcVT == MVT::f64) 1402 if (ConstFP->isZero() && !ConstFP->isNegative()) 1403 UseImm = true; 1404 } 1405 1406 unsigned CmpOpc; 1407 bool isICmp = true; 1408 bool needsExt = false; 1409 switch (SrcVT.getSimpleVT().SimpleTy) { 1410 default: return false; 1411 // TODO: Verify compares. 1412 case MVT::f32: 1413 isICmp = false; 1414 CmpOpc = UseImm ? ARM::VCMPEZS : ARM::VCMPES; 1415 break; 1416 case MVT::f64: 1417 isICmp = false; 1418 CmpOpc = UseImm ? ARM::VCMPEZD : ARM::VCMPED; 1419 break; 1420 case MVT::i1: 1421 case MVT::i8: 1422 case MVT::i16: 1423 needsExt = true; 1424 // Intentional fall-through. 1425 case MVT::i32: 1426 if (isThumb2) { 1427 if (!UseImm) 1428 CmpOpc = ARM::t2CMPrr; 1429 else 1430 CmpOpc = isNegativeImm ? ARM::t2CMNri : ARM::t2CMPri; 1431 } else { 1432 if (!UseImm) 1433 CmpOpc = ARM::CMPrr; 1434 else 1435 CmpOpc = isNegativeImm ? ARM::CMNri : ARM::CMPri; 1436 } 1437 break; 1438 } 1439 1440 unsigned SrcReg1 = getRegForValue(Src1Value); 1441 if (SrcReg1 == 0) return false; 1442 1443 unsigned SrcReg2 = 0; 1444 if (!UseImm) { 1445 SrcReg2 = getRegForValue(Src2Value); 1446 if (SrcReg2 == 0) return false; 1447 } 1448 1449 // We have i1, i8, or i16, we need to either zero extend or sign extend. 1450 if (needsExt) { 1451 SrcReg1 = ARMEmitIntExt(SrcVT, SrcReg1, MVT::i32, isZExt); 1452 if (SrcReg1 == 0) return false; 1453 if (!UseImm) { 1454 SrcReg2 = ARMEmitIntExt(SrcVT, SrcReg2, MVT::i32, isZExt); 1455 if (SrcReg2 == 0) return false; 1456 } 1457 } 1458 1459 if (!UseImm) { 1460 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1461 TII.get(CmpOpc)) 1462 .addReg(SrcReg1).addReg(SrcReg2)); 1463 } else { 1464 MachineInstrBuilder MIB; 1465 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1466 .addReg(SrcReg1); 1467 1468 // Only add immediate for icmp as the immediate for fcmp is an implicit 0.0. 1469 if (isICmp) 1470 MIB.addImm(Imm); 1471 AddOptionalDefs(MIB); 1472 } 1473 1474 // For floating point we need to move the result to a comparison register 1475 // that we can then use for branches. 1476 if (Ty->isFloatTy() || Ty->isDoubleTy()) 1477 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1478 TII.get(ARM::FMSTAT))); 1479 return true; 1480} 1481 1482bool ARMFastISel::SelectCmp(const Instruction *I) { 1483 const CmpInst *CI = cast<CmpInst>(I); 1484 1485 // Get the compare predicate. 1486 ARMCC::CondCodes ARMPred = getComparePred(CI->getPredicate()); 1487 1488 // We may not handle every CC for now. 1489 if (ARMPred == ARMCC::AL) return false; 1490 1491 // Emit the compare. 1492 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) 1493 return false; 1494 1495 // Now set a register based on the comparison. Explicitly set the predicates 1496 // here. 1497 unsigned MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi; 1498 const TargetRegisterClass *RC = isThumb2 ? 1499 (const TargetRegisterClass*)&ARM::rGPRRegClass : 1500 (const TargetRegisterClass*)&ARM::GPRRegClass; 1501 unsigned DestReg = createResultReg(RC); 1502 Constant *Zero = ConstantInt::get(Type::getInt32Ty(*Context), 0); 1503 unsigned ZeroReg = TargetMaterializeConstant(Zero); 1504 // ARMEmitCmp emits a FMSTAT when necessary, so it's always safe to use CPSR. 1505 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), DestReg) 1506 .addReg(ZeroReg).addImm(1) 1507 .addImm(ARMPred).addReg(ARM::CPSR); 1508 1509 UpdateValueMap(I, DestReg); 1510 return true; 1511} 1512 1513bool ARMFastISel::SelectFPExt(const Instruction *I) { 1514 // Make sure we have VFP and that we're extending float to double. 1515 if (!Subtarget->hasVFP2()) return false; 1516 1517 Value *V = I->getOperand(0); 1518 if (!I->getType()->isDoubleTy() || 1519 !V->getType()->isFloatTy()) return false; 1520 1521 unsigned Op = getRegForValue(V); 1522 if (Op == 0) return false; 1523 1524 unsigned Result = createResultReg(&ARM::DPRRegClass); 1525 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1526 TII.get(ARM::VCVTDS), Result) 1527 .addReg(Op)); 1528 UpdateValueMap(I, Result); 1529 return true; 1530} 1531 1532bool ARMFastISel::SelectFPTrunc(const Instruction *I) { 1533 // Make sure we have VFP and that we're truncating double to float. 1534 if (!Subtarget->hasVFP2()) return false; 1535 1536 Value *V = I->getOperand(0); 1537 if (!(I->getType()->isFloatTy() && 1538 V->getType()->isDoubleTy())) return false; 1539 1540 unsigned Op = getRegForValue(V); 1541 if (Op == 0) return false; 1542 1543 unsigned Result = createResultReg(&ARM::SPRRegClass); 1544 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1545 TII.get(ARM::VCVTSD), Result) 1546 .addReg(Op)); 1547 UpdateValueMap(I, Result); 1548 return true; 1549} 1550 1551bool ARMFastISel::SelectIToFP(const Instruction *I, bool isSigned) { 1552 // Make sure we have VFP. 1553 if (!Subtarget->hasVFP2()) return false; 1554 1555 MVT DstVT; 1556 Type *Ty = I->getType(); 1557 if (!isTypeLegal(Ty, DstVT)) 1558 return false; 1559 1560 Value *Src = I->getOperand(0); 1561 EVT SrcVT = TLI.getValueType(Src->getType(), true); 1562 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) 1563 return false; 1564 1565 unsigned SrcReg = getRegForValue(Src); 1566 if (SrcReg == 0) return false; 1567 1568 // Handle sign-extension. 1569 if (SrcVT == MVT::i16 || SrcVT == MVT::i8) { 1570 EVT DestVT = MVT::i32; 1571 SrcReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, 1572 /*isZExt*/!isSigned); 1573 if (SrcReg == 0) return false; 1574 } 1575 1576 // The conversion routine works on fp-reg to fp-reg and the operand above 1577 // was an integer, move it to the fp registers if possible. 1578 unsigned FP = ARMMoveToFPReg(MVT::f32, SrcReg); 1579 if (FP == 0) return false; 1580 1581 unsigned Opc; 1582 if (Ty->isFloatTy()) Opc = isSigned ? ARM::VSITOS : ARM::VUITOS; 1583 else if (Ty->isDoubleTy()) Opc = isSigned ? ARM::VSITOD : ARM::VUITOD; 1584 else return false; 1585 1586 unsigned ResultReg = createResultReg(TLI.getRegClassFor(DstVT)); 1587 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1588 ResultReg) 1589 .addReg(FP)); 1590 UpdateValueMap(I, ResultReg); 1591 return true; 1592} 1593 1594bool ARMFastISel::SelectFPToI(const Instruction *I, bool isSigned) { 1595 // Make sure we have VFP. 1596 if (!Subtarget->hasVFP2()) return false; 1597 1598 MVT DstVT; 1599 Type *RetTy = I->getType(); 1600 if (!isTypeLegal(RetTy, DstVT)) 1601 return false; 1602 1603 unsigned Op = getRegForValue(I->getOperand(0)); 1604 if (Op == 0) return false; 1605 1606 unsigned Opc; 1607 Type *OpTy = I->getOperand(0)->getType(); 1608 if (OpTy->isFloatTy()) Opc = isSigned ? ARM::VTOSIZS : ARM::VTOUIZS; 1609 else if (OpTy->isDoubleTy()) Opc = isSigned ? ARM::VTOSIZD : ARM::VTOUIZD; 1610 else return false; 1611 1612 // f64->s32/u32 or f32->s32/u32 both need an intermediate f32 reg. 1613 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32)); 1614 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1615 ResultReg) 1616 .addReg(Op)); 1617 1618 // This result needs to be in an integer register, but the conversion only 1619 // takes place in fp-regs. 1620 unsigned IntReg = ARMMoveToIntReg(DstVT, ResultReg); 1621 if (IntReg == 0) return false; 1622 1623 UpdateValueMap(I, IntReg); 1624 return true; 1625} 1626 1627bool ARMFastISel::SelectSelect(const Instruction *I) { 1628 MVT VT; 1629 if (!isTypeLegal(I->getType(), VT)) 1630 return false; 1631 1632 // Things need to be register sized for register moves. 1633 if (VT != MVT::i32) return false; 1634 const TargetRegisterClass *RC = TLI.getRegClassFor(VT); 1635 1636 unsigned CondReg = getRegForValue(I->getOperand(0)); 1637 if (CondReg == 0) return false; 1638 unsigned Op1Reg = getRegForValue(I->getOperand(1)); 1639 if (Op1Reg == 0) return false; 1640 1641 // Check to see if we can use an immediate in the conditional move. 1642 int Imm = 0; 1643 bool UseImm = false; 1644 bool isNegativeImm = false; 1645 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(I->getOperand(2))) { 1646 assert (VT == MVT::i32 && "Expecting an i32."); 1647 Imm = (int)ConstInt->getValue().getZExtValue(); 1648 if (Imm < 0) { 1649 isNegativeImm = true; 1650 Imm = ~Imm; 1651 } 1652 UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 1653 (ARM_AM::getSOImmVal(Imm) != -1); 1654 } 1655 1656 unsigned Op2Reg = 0; 1657 if (!UseImm) { 1658 Op2Reg = getRegForValue(I->getOperand(2)); 1659 if (Op2Reg == 0) return false; 1660 } 1661 1662 unsigned CmpOpc = isThumb2 ? ARM::t2CMPri : ARM::CMPri; 1663 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1664 .addReg(CondReg).addImm(0)); 1665 1666 unsigned MovCCOpc; 1667 if (!UseImm) { 1668 MovCCOpc = isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr; 1669 } else { 1670 if (!isNegativeImm) { 1671 MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi; 1672 } else { 1673 MovCCOpc = isThumb2 ? ARM::t2MVNCCi : ARM::MVNCCi; 1674 } 1675 } 1676 unsigned ResultReg = createResultReg(RC); 1677 if (!UseImm) 1678 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg) 1679 .addReg(Op2Reg).addReg(Op1Reg).addImm(ARMCC::NE).addReg(ARM::CPSR); 1680 else 1681 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg) 1682 .addReg(Op1Reg).addImm(Imm).addImm(ARMCC::EQ).addReg(ARM::CPSR); 1683 UpdateValueMap(I, ResultReg); 1684 return true; 1685} 1686 1687bool ARMFastISel::SelectDiv(const Instruction *I, bool isSigned) { 1688 MVT VT; 1689 Type *Ty = I->getType(); 1690 if (!isTypeLegal(Ty, VT)) 1691 return false; 1692 1693 // If we have integer div support we should have selected this automagically. 1694 // In case we have a real miss go ahead and return false and we'll pick 1695 // it up later. 1696 if (Subtarget->hasDivide()) return false; 1697 1698 // Otherwise emit a libcall. 1699 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1700 if (VT == MVT::i8) 1701 LC = isSigned ? RTLIB::SDIV_I8 : RTLIB::UDIV_I8; 1702 else if (VT == MVT::i16) 1703 LC = isSigned ? RTLIB::SDIV_I16 : RTLIB::UDIV_I16; 1704 else if (VT == MVT::i32) 1705 LC = isSigned ? RTLIB::SDIV_I32 : RTLIB::UDIV_I32; 1706 else if (VT == MVT::i64) 1707 LC = isSigned ? RTLIB::SDIV_I64 : RTLIB::UDIV_I64; 1708 else if (VT == MVT::i128) 1709 LC = isSigned ? RTLIB::SDIV_I128 : RTLIB::UDIV_I128; 1710 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SDIV!"); 1711 1712 return ARMEmitLibcall(I, LC); 1713} 1714 1715bool ARMFastISel::SelectRem(const Instruction *I, bool isSigned) { 1716 MVT VT; 1717 Type *Ty = I->getType(); 1718 if (!isTypeLegal(Ty, VT)) 1719 return false; 1720 1721 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1722 if (VT == MVT::i8) 1723 LC = isSigned ? RTLIB::SREM_I8 : RTLIB::UREM_I8; 1724 else if (VT == MVT::i16) 1725 LC = isSigned ? RTLIB::SREM_I16 : RTLIB::UREM_I16; 1726 else if (VT == MVT::i32) 1727 LC = isSigned ? RTLIB::SREM_I32 : RTLIB::UREM_I32; 1728 else if (VT == MVT::i64) 1729 LC = isSigned ? RTLIB::SREM_I64 : RTLIB::UREM_I64; 1730 else if (VT == MVT::i128) 1731 LC = isSigned ? RTLIB::SREM_I128 : RTLIB::UREM_I128; 1732 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SREM!"); 1733 1734 return ARMEmitLibcall(I, LC); 1735} 1736 1737bool ARMFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) { 1738 EVT DestVT = TLI.getValueType(I->getType(), true); 1739 1740 // We can get here in the case when we have a binary operation on a non-legal 1741 // type and the target independent selector doesn't know how to handle it. 1742 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1) 1743 return false; 1744 1745 unsigned Opc; 1746 switch (ISDOpcode) { 1747 default: return false; 1748 case ISD::ADD: 1749 Opc = isThumb2 ? ARM::t2ADDrr : ARM::ADDrr; 1750 break; 1751 case ISD::OR: 1752 Opc = isThumb2 ? ARM::t2ORRrr : ARM::ORRrr; 1753 break; 1754 case ISD::SUB: 1755 Opc = isThumb2 ? ARM::t2SUBrr : ARM::SUBrr; 1756 break; 1757 } 1758 1759 unsigned SrcReg1 = getRegForValue(I->getOperand(0)); 1760 if (SrcReg1 == 0) return false; 1761 1762 // TODO: Often the 2nd operand is an immediate, which can be encoded directly 1763 // in the instruction, rather then materializing the value in a register. 1764 unsigned SrcReg2 = getRegForValue(I->getOperand(1)); 1765 if (SrcReg2 == 0) return false; 1766 1767 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 1768 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1769 TII.get(Opc), ResultReg) 1770 .addReg(SrcReg1).addReg(SrcReg2)); 1771 UpdateValueMap(I, ResultReg); 1772 return true; 1773} 1774 1775bool ARMFastISel::SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode) { 1776 EVT VT = TLI.getValueType(I->getType(), true); 1777 1778 // We can get here in the case when we want to use NEON for our fp 1779 // operations, but can't figure out how to. Just use the vfp instructions 1780 // if we have them. 1781 // FIXME: It'd be nice to use NEON instructions. 1782 Type *Ty = I->getType(); 1783 bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy()); 1784 if (isFloat && !Subtarget->hasVFP2()) 1785 return false; 1786 1787 unsigned Opc; 1788 bool is64bit = VT == MVT::f64 || VT == MVT::i64; 1789 switch (ISDOpcode) { 1790 default: return false; 1791 case ISD::FADD: 1792 Opc = is64bit ? ARM::VADDD : ARM::VADDS; 1793 break; 1794 case ISD::FSUB: 1795 Opc = is64bit ? ARM::VSUBD : ARM::VSUBS; 1796 break; 1797 case ISD::FMUL: 1798 Opc = is64bit ? ARM::VMULD : ARM::VMULS; 1799 break; 1800 } 1801 unsigned Op1 = getRegForValue(I->getOperand(0)); 1802 if (Op1 == 0) return false; 1803 1804 unsigned Op2 = getRegForValue(I->getOperand(1)); 1805 if (Op2 == 0) return false; 1806 1807 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT)); 1808 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1809 TII.get(Opc), ResultReg) 1810 .addReg(Op1).addReg(Op2)); 1811 UpdateValueMap(I, ResultReg); 1812 return true; 1813} 1814 1815// Call Handling Code 1816 1817// This is largely taken directly from CCAssignFnForNode 1818// TODO: We may not support all of this. 1819CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC, 1820 bool Return, 1821 bool isVarArg) { 1822 switch (CC) { 1823 default: 1824 llvm_unreachable("Unsupported calling convention"); 1825 case CallingConv::Fast: 1826 if (Subtarget->hasVFP2() && !isVarArg) { 1827 if (!Subtarget->isAAPCS_ABI()) 1828 return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS); 1829 // For AAPCS ABI targets, just use VFP variant of the calling convention. 1830 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1831 } 1832 // Fallthrough 1833 case CallingConv::C: 1834 // Use target triple & subtarget features to do actual dispatch. 1835 if (Subtarget->isAAPCS_ABI()) { 1836 if (Subtarget->hasVFP2() && 1837 TM.Options.FloatABIType == FloatABI::Hard && !isVarArg) 1838 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1839 else 1840 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1841 } else 1842 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1843 case CallingConv::ARM_AAPCS_VFP: 1844 if (!isVarArg) 1845 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1846 // Fall through to soft float variant, variadic functions don't 1847 // use hard floating point ABI. 1848 case CallingConv::ARM_AAPCS: 1849 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1850 case CallingConv::ARM_APCS: 1851 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1852 case CallingConv::GHC: 1853 if (Return) 1854 llvm_unreachable("Can't return in GHC call convention"); 1855 else 1856 return CC_ARM_APCS_GHC; 1857 } 1858} 1859 1860bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args, 1861 SmallVectorImpl<unsigned> &ArgRegs, 1862 SmallVectorImpl<MVT> &ArgVTs, 1863 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 1864 SmallVectorImpl<unsigned> &RegArgs, 1865 CallingConv::ID CC, 1866 unsigned &NumBytes, 1867 bool isVarArg) { 1868 SmallVector<CCValAssign, 16> ArgLocs; 1869 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, ArgLocs, *Context); 1870 CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, 1871 CCAssignFnForCall(CC, false, isVarArg)); 1872 1873 // Check that we can handle all of the arguments. If we can't, then bail out 1874 // now before we add code to the MBB. 1875 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1876 CCValAssign &VA = ArgLocs[i]; 1877 MVT ArgVT = ArgVTs[VA.getValNo()]; 1878 1879 // We don't handle NEON/vector parameters yet. 1880 if (ArgVT.isVector() || ArgVT.getSizeInBits() > 64) 1881 return false; 1882 1883 // Now copy/store arg to correct locations. 1884 if (VA.isRegLoc() && !VA.needsCustom()) { 1885 continue; 1886 } else if (VA.needsCustom()) { 1887 // TODO: We need custom lowering for vector (v2f64) args. 1888 if (VA.getLocVT() != MVT::f64 || 1889 // TODO: Only handle register args for now. 1890 !VA.isRegLoc() || !ArgLocs[++i].isRegLoc()) 1891 return false; 1892 } else { 1893 switch (static_cast<EVT>(ArgVT).getSimpleVT().SimpleTy) { 1894 default: 1895 return false; 1896 case MVT::i1: 1897 case MVT::i8: 1898 case MVT::i16: 1899 case MVT::i32: 1900 break; 1901 case MVT::f32: 1902 if (!Subtarget->hasVFP2()) 1903 return false; 1904 break; 1905 case MVT::f64: 1906 if (!Subtarget->hasVFP2()) 1907 return false; 1908 break; 1909 } 1910 } 1911 } 1912 1913 // At the point, we are able to handle the call's arguments in fast isel. 1914 1915 // Get a count of how many bytes are to be pushed on the stack. 1916 NumBytes = CCInfo.getNextStackOffset(); 1917 1918 // Issue CALLSEQ_START 1919 unsigned AdjStackDown = TII.getCallFrameSetupOpcode(); 1920 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1921 TII.get(AdjStackDown)) 1922 .addImm(NumBytes)); 1923 1924 // Process the args. 1925 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1926 CCValAssign &VA = ArgLocs[i]; 1927 unsigned Arg = ArgRegs[VA.getValNo()]; 1928 MVT ArgVT = ArgVTs[VA.getValNo()]; 1929 1930 assert((!ArgVT.isVector() && ArgVT.getSizeInBits() <= 64) && 1931 "We don't handle NEON/vector parameters yet."); 1932 1933 // Handle arg promotion, etc. 1934 switch (VA.getLocInfo()) { 1935 case CCValAssign::Full: break; 1936 case CCValAssign::SExt: { 1937 MVT DestVT = VA.getLocVT(); 1938 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/false); 1939 assert (Arg != 0 && "Failed to emit a sext"); 1940 ArgVT = DestVT; 1941 break; 1942 } 1943 case CCValAssign::AExt: 1944 // Intentional fall-through. Handle AExt and ZExt. 1945 case CCValAssign::ZExt: { 1946 MVT DestVT = VA.getLocVT(); 1947 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/true); 1948 assert (Arg != 0 && "Failed to emit a sext"); 1949 ArgVT = DestVT; 1950 break; 1951 } 1952 case CCValAssign::BCvt: { 1953 unsigned BC = FastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg, 1954 /*TODO: Kill=*/false); 1955 assert(BC != 0 && "Failed to emit a bitcast!"); 1956 Arg = BC; 1957 ArgVT = VA.getLocVT(); 1958 break; 1959 } 1960 default: llvm_unreachable("Unknown arg promotion!"); 1961 } 1962 1963 // Now copy/store arg to correct locations. 1964 if (VA.isRegLoc() && !VA.needsCustom()) { 1965 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1966 VA.getLocReg()) 1967 .addReg(Arg); 1968 RegArgs.push_back(VA.getLocReg()); 1969 } else if (VA.needsCustom()) { 1970 // TODO: We need custom lowering for vector (v2f64) args. 1971 assert(VA.getLocVT() == MVT::f64 && 1972 "Custom lowering for v2f64 args not available"); 1973 1974 CCValAssign &NextVA = ArgLocs[++i]; 1975 1976 assert(VA.isRegLoc() && NextVA.isRegLoc() && 1977 "We only handle register args!"); 1978 1979 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1980 TII.get(ARM::VMOVRRD), VA.getLocReg()) 1981 .addReg(NextVA.getLocReg(), RegState::Define) 1982 .addReg(Arg)); 1983 RegArgs.push_back(VA.getLocReg()); 1984 RegArgs.push_back(NextVA.getLocReg()); 1985 } else { 1986 assert(VA.isMemLoc()); 1987 // Need to store on the stack. 1988 Address Addr; 1989 Addr.BaseType = Address::RegBase; 1990 Addr.Base.Reg = ARM::SP; 1991 Addr.Offset = VA.getLocMemOffset(); 1992 1993 bool EmitRet = ARMEmitStore(ArgVT, Arg, Addr); (void)EmitRet; 1994 assert(EmitRet && "Could not emit a store for argument!"); 1995 } 1996 } 1997 1998 return true; 1999} 2000 2001bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 2002 const Instruction *I, CallingConv::ID CC, 2003 unsigned &NumBytes, bool isVarArg) { 2004 // Issue CALLSEQ_END 2005 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); 2006 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2007 TII.get(AdjStackUp)) 2008 .addImm(NumBytes).addImm(0)); 2009 2010 // Now the return value. 2011 if (RetVT != MVT::isVoid) { 2012 SmallVector<CCValAssign, 16> RVLocs; 2013 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, RVLocs, *Context); 2014 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, isVarArg)); 2015 2016 // Copy all of the result registers out of their specified physreg. 2017 if (RVLocs.size() == 2 && RetVT == MVT::f64) { 2018 // For this move we copy into two registers and then move into the 2019 // double fp reg we want. 2020 EVT DestVT = RVLocs[0].getValVT(); 2021 const TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT); 2022 unsigned ResultReg = createResultReg(DstRC); 2023 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2024 TII.get(ARM::VMOVDRR), ResultReg) 2025 .addReg(RVLocs[0].getLocReg()) 2026 .addReg(RVLocs[1].getLocReg())); 2027 2028 UsedRegs.push_back(RVLocs[0].getLocReg()); 2029 UsedRegs.push_back(RVLocs[1].getLocReg()); 2030 2031 // Finally update the result. 2032 UpdateValueMap(I, ResultReg); 2033 } else { 2034 assert(RVLocs.size() == 1 &&"Can't handle non-double multi-reg retvals!"); 2035 EVT CopyVT = RVLocs[0].getValVT(); 2036 2037 // Special handling for extended integers. 2038 if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16) 2039 CopyVT = MVT::i32; 2040 2041 const TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT); 2042 2043 unsigned ResultReg = createResultReg(DstRC); 2044 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 2045 ResultReg).addReg(RVLocs[0].getLocReg()); 2046 UsedRegs.push_back(RVLocs[0].getLocReg()); 2047 2048 // Finally update the result. 2049 UpdateValueMap(I, ResultReg); 2050 } 2051 } 2052 2053 return true; 2054} 2055 2056bool ARMFastISel::SelectRet(const Instruction *I) { 2057 const ReturnInst *Ret = cast<ReturnInst>(I); 2058 const Function &F = *I->getParent()->getParent(); 2059 2060 if (!FuncInfo.CanLowerReturn) 2061 return false; 2062 2063 CallingConv::ID CC = F.getCallingConv(); 2064 if (Ret->getNumOperands() > 0) { 2065 SmallVector<ISD::OutputArg, 4> Outs; 2066 GetReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(), 2067 Outs, TLI); 2068 2069 // Analyze operands of the call, assigning locations to each operand. 2070 SmallVector<CCValAssign, 16> ValLocs; 2071 CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, TM, ValLocs,I->getContext()); 2072 CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC, true /* is Ret */, 2073 F.isVarArg())); 2074 2075 const Value *RV = Ret->getOperand(0); 2076 unsigned Reg = getRegForValue(RV); 2077 if (Reg == 0) 2078 return false; 2079 2080 // Only handle a single return value for now. 2081 if (ValLocs.size() != 1) 2082 return false; 2083 2084 CCValAssign &VA = ValLocs[0]; 2085 2086 // Don't bother handling odd stuff for now. 2087 if (VA.getLocInfo() != CCValAssign::Full) 2088 return false; 2089 // Only handle register returns for now. 2090 if (!VA.isRegLoc()) 2091 return false; 2092 2093 unsigned SrcReg = Reg + VA.getValNo(); 2094 EVT RVVT = TLI.getValueType(RV->getType()); 2095 EVT DestVT = VA.getValVT(); 2096 // Special handling for extended integers. 2097 if (RVVT != DestVT) { 2098 if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16) 2099 return false; 2100 2101 assert(DestVT == MVT::i32 && "ARM should always ext to i32"); 2102 2103 // Perform extension if flagged as either zext or sext. Otherwise, do 2104 // nothing. 2105 if (Outs[0].Flags.isZExt() || Outs[0].Flags.isSExt()) { 2106 SrcReg = ARMEmitIntExt(RVVT, SrcReg, DestVT, Outs[0].Flags.isZExt()); 2107 if (SrcReg == 0) return false; 2108 } 2109 } 2110 2111 // Make the copy. 2112 unsigned DstReg = VA.getLocReg(); 2113 const TargetRegisterClass* SrcRC = MRI.getRegClass(SrcReg); 2114 // Avoid a cross-class copy. This is very unlikely. 2115 if (!SrcRC->contains(DstReg)) 2116 return false; 2117 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 2118 DstReg).addReg(SrcReg); 2119 2120 // Mark the register as live out of the function. 2121 MRI.addLiveOut(VA.getLocReg()); 2122 } 2123 2124 unsigned RetOpc = isThumb2 ? ARM::tBX_RET : ARM::BX_RET; 2125 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2126 TII.get(RetOpc))); 2127 return true; 2128} 2129 2130unsigned ARMFastISel::ARMSelectCallOp(bool UseReg) { 2131 if (UseReg) 2132 return isThumb2 ? ARM::tBLXr : ARM::BLX; 2133 else 2134 return isThumb2 ? ARM::tBL : ARM::BL; 2135} 2136 2137unsigned ARMFastISel::getLibcallReg(const Twine &Name) { 2138 GlobalValue *GV = new GlobalVariable(Type::getInt32Ty(*Context), false, 2139 GlobalValue::ExternalLinkage, 0, Name); 2140 return ARMMaterializeGV(GV, TLI.getValueType(GV->getType())); 2141} 2142 2143// A quick function that will emit a call for a named libcall in F with the 2144// vector of passed arguments for the Instruction in I. We can assume that we 2145// can emit a call for any libcall we can produce. This is an abridged version 2146// of the full call infrastructure since we won't need to worry about things 2147// like computed function pointers or strange arguments at call sites. 2148// TODO: Try to unify this and the normal call bits for ARM, then try to unify 2149// with X86. 2150bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) { 2151 CallingConv::ID CC = TLI.getLibcallCallingConv(Call); 2152 2153 // Handle *simple* calls for now. 2154 Type *RetTy = I->getType(); 2155 MVT RetVT; 2156 if (RetTy->isVoidTy()) 2157 RetVT = MVT::isVoid; 2158 else if (!isTypeLegal(RetTy, RetVT)) 2159 return false; 2160 2161 // Can't handle non-double multi-reg retvals. 2162 if (RetVT != MVT::isVoid && RetVT != MVT::i32) { 2163 SmallVector<CCValAssign, 16> RVLocs; 2164 CCState CCInfo(CC, false, *FuncInfo.MF, TM, RVLocs, *Context); 2165 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, false)); 2166 if (RVLocs.size() >= 2 && RetVT != MVT::f64) 2167 return false; 2168 } 2169 2170 // Set up the argument vectors. 2171 SmallVector<Value*, 8> Args; 2172 SmallVector<unsigned, 8> ArgRegs; 2173 SmallVector<MVT, 8> ArgVTs; 2174 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 2175 Args.reserve(I->getNumOperands()); 2176 ArgRegs.reserve(I->getNumOperands()); 2177 ArgVTs.reserve(I->getNumOperands()); 2178 ArgFlags.reserve(I->getNumOperands()); 2179 for (unsigned i = 0; i < I->getNumOperands(); ++i) { 2180 Value *Op = I->getOperand(i); 2181 unsigned Arg = getRegForValue(Op); 2182 if (Arg == 0) return false; 2183 2184 Type *ArgTy = Op->getType(); 2185 MVT ArgVT; 2186 if (!isTypeLegal(ArgTy, ArgVT)) return false; 2187 2188 ISD::ArgFlagsTy Flags; 2189 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 2190 Flags.setOrigAlign(OriginalAlignment); 2191 2192 Args.push_back(Op); 2193 ArgRegs.push_back(Arg); 2194 ArgVTs.push_back(ArgVT); 2195 ArgFlags.push_back(Flags); 2196 } 2197 2198 // Handle the arguments now that we've gotten them. 2199 SmallVector<unsigned, 4> RegArgs; 2200 unsigned NumBytes; 2201 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, 2202 RegArgs, CC, NumBytes, false)) 2203 return false; 2204 2205 unsigned CalleeReg = 0; 2206 if (EnableARMLongCalls) { 2207 CalleeReg = getLibcallReg(TLI.getLibcallName(Call)); 2208 if (CalleeReg == 0) return false; 2209 } 2210 2211 // Issue the call. 2212 unsigned CallOpc = ARMSelectCallOp(EnableARMLongCalls); 2213 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 2214 DL, TII.get(CallOpc)); 2215 if (isThumb2) { 2216 // Explicitly adding the predicate here. 2217 AddDefaultPred(MIB); 2218 if (EnableARMLongCalls) 2219 MIB.addReg(CalleeReg); 2220 else 2221 MIB.addExternalSymbol(TLI.getLibcallName(Call)); 2222 } else { 2223 if (EnableARMLongCalls) 2224 MIB.addReg(CalleeReg); 2225 else 2226 MIB.addExternalSymbol(TLI.getLibcallName(Call)); 2227 2228 // Explicitly adding the predicate here. 2229 AddDefaultPred(MIB); 2230 } 2231 // Add implicit physical register uses to the call. 2232 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 2233 MIB.addReg(RegArgs[i]); 2234 2235 // Add a register mask with the call-preserved registers. 2236 // Proper defs for return values will be added by setPhysRegsDeadExcept(). 2237 MIB.addRegMask(TRI.getCallPreservedMask(CC)); 2238 2239 // Finish off the call including any return values. 2240 SmallVector<unsigned, 4> UsedRegs; 2241 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes, false)) return false; 2242 2243 // Set all unused physreg defs as dead. 2244 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 2245 2246 return true; 2247} 2248 2249bool ARMFastISel::SelectCall(const Instruction *I, 2250 const char *IntrMemName = 0) { 2251 const CallInst *CI = cast<CallInst>(I); 2252 const Value *Callee = CI->getCalledValue(); 2253 2254 // Can't handle inline asm. 2255 if (isa<InlineAsm>(Callee)) return false; 2256 2257 // Check the calling convention. 2258 ImmutableCallSite CS(CI); 2259 CallingConv::ID CC = CS.getCallingConv(); 2260 2261 // TODO: Avoid some calling conventions? 2262 2263 PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType()); 2264 FunctionType *FTy = cast<FunctionType>(PT->getElementType()); 2265 bool isVarArg = FTy->isVarArg(); 2266 2267 // Handle *simple* calls for now. 2268 Type *RetTy = I->getType(); 2269 MVT RetVT; 2270 if (RetTy->isVoidTy()) 2271 RetVT = MVT::isVoid; 2272 else if (!isTypeLegal(RetTy, RetVT) && RetVT != MVT::i16 && 2273 RetVT != MVT::i8 && RetVT != MVT::i1) 2274 return false; 2275 2276 // Can't handle non-double multi-reg retvals. 2277 if (RetVT != MVT::isVoid && RetVT != MVT::i1 && RetVT != MVT::i8 && 2278 RetVT != MVT::i16 && RetVT != MVT::i32) { 2279 SmallVector<CCValAssign, 16> RVLocs; 2280 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, RVLocs, *Context); 2281 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, isVarArg)); 2282 if (RVLocs.size() >= 2 && RetVT != MVT::f64) 2283 return false; 2284 } 2285 2286 // Set up the argument vectors. 2287 SmallVector<Value*, 8> Args; 2288 SmallVector<unsigned, 8> ArgRegs; 2289 SmallVector<MVT, 8> ArgVTs; 2290 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 2291 unsigned arg_size = CS.arg_size(); 2292 Args.reserve(arg_size); 2293 ArgRegs.reserve(arg_size); 2294 ArgVTs.reserve(arg_size); 2295 ArgFlags.reserve(arg_size); 2296 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end(); 2297 i != e; ++i) { 2298 // If we're lowering a memory intrinsic instead of a regular call, skip the 2299 // last two arguments, which shouldn't be passed to the underlying function. 2300 if (IntrMemName && e-i <= 2) 2301 break; 2302 2303 ISD::ArgFlagsTy Flags; 2304 unsigned AttrInd = i - CS.arg_begin() + 1; 2305 if (CS.paramHasAttr(AttrInd, Attribute::SExt)) 2306 Flags.setSExt(); 2307 if (CS.paramHasAttr(AttrInd, Attribute::ZExt)) 2308 Flags.setZExt(); 2309 2310 // FIXME: Only handle *easy* calls for now. 2311 if (CS.paramHasAttr(AttrInd, Attribute::InReg) || 2312 CS.paramHasAttr(AttrInd, Attribute::StructRet) || 2313 CS.paramHasAttr(AttrInd, Attribute::Nest) || 2314 CS.paramHasAttr(AttrInd, Attribute::ByVal)) 2315 return false; 2316 2317 Type *ArgTy = (*i)->getType(); 2318 MVT ArgVT; 2319 if (!isTypeLegal(ArgTy, ArgVT) && ArgVT != MVT::i16 && ArgVT != MVT::i8 && 2320 ArgVT != MVT::i1) 2321 return false; 2322 2323 unsigned Arg = getRegForValue(*i); 2324 if (Arg == 0) 2325 return false; 2326 2327 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 2328 Flags.setOrigAlign(OriginalAlignment); 2329 2330 Args.push_back(*i); 2331 ArgRegs.push_back(Arg); 2332 ArgVTs.push_back(ArgVT); 2333 ArgFlags.push_back(Flags); 2334 } 2335 2336 // Handle the arguments now that we've gotten them. 2337 SmallVector<unsigned, 4> RegArgs; 2338 unsigned NumBytes; 2339 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, 2340 RegArgs, CC, NumBytes, isVarArg)) 2341 return false; 2342 2343 bool UseReg = false; 2344 const GlobalValue *GV = dyn_cast<GlobalValue>(Callee); 2345 if (!GV || EnableARMLongCalls) UseReg = true; 2346 2347 unsigned CalleeReg = 0; 2348 if (UseReg) { 2349 if (IntrMemName) 2350 CalleeReg = getLibcallReg(IntrMemName); 2351 else 2352 CalleeReg = getRegForValue(Callee); 2353 2354 if (CalleeReg == 0) return false; 2355 } 2356 2357 // Issue the call. 2358 unsigned CallOpc = ARMSelectCallOp(UseReg); 2359 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 2360 DL, TII.get(CallOpc)); 2361 if(isThumb2) { 2362 // Explicitly adding the predicate here. 2363 AddDefaultPred(MIB); 2364 if (UseReg) 2365 MIB.addReg(CalleeReg); 2366 else if (!IntrMemName) 2367 MIB.addGlobalAddress(GV, 0, 0); 2368 else 2369 MIB.addExternalSymbol(IntrMemName, 0); 2370 } else { 2371 if (UseReg) 2372 MIB.addReg(CalleeReg); 2373 else if (!IntrMemName) 2374 MIB.addGlobalAddress(GV, 0, 0); 2375 else 2376 MIB.addExternalSymbol(IntrMemName, 0); 2377 2378 // Explicitly adding the predicate here. 2379 AddDefaultPred(MIB); 2380 } 2381 2382 // Add implicit physical register uses to the call. 2383 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 2384 MIB.addReg(RegArgs[i]); 2385 2386 // Add a register mask with the call-preserved registers. 2387 // Proper defs for return values will be added by setPhysRegsDeadExcept(). 2388 MIB.addRegMask(TRI.getCallPreservedMask(CC)); 2389 2390 // Finish off the call including any return values. 2391 SmallVector<unsigned, 4> UsedRegs; 2392 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes, isVarArg)) 2393 return false; 2394 2395 // Set all unused physreg defs as dead. 2396 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 2397 2398 return true; 2399} 2400 2401bool ARMFastISel::ARMIsMemCpySmall(uint64_t Len) { 2402 return Len <= 16; 2403} 2404 2405bool ARMFastISel::ARMTryEmitSmallMemCpy(Address Dest, Address Src, 2406 uint64_t Len) { 2407 // Make sure we don't bloat code by inlining very large memcpy's. 2408 if (!ARMIsMemCpySmall(Len)) 2409 return false; 2410 2411 // We don't care about alignment here since we just emit integer accesses. 2412 while (Len) { 2413 MVT VT; 2414 if (Len >= 4) 2415 VT = MVT::i32; 2416 else if (Len >= 2) 2417 VT = MVT::i16; 2418 else { 2419 assert(Len == 1); 2420 VT = MVT::i8; 2421 } 2422 2423 bool RV; 2424 unsigned ResultReg; 2425 RV = ARMEmitLoad(VT, ResultReg, Src); 2426 assert (RV == true && "Should be able to handle this load."); 2427 RV = ARMEmitStore(VT, ResultReg, Dest); 2428 assert (RV == true && "Should be able to handle this store."); 2429 (void)RV; 2430 2431 unsigned Size = VT.getSizeInBits()/8; 2432 Len -= Size; 2433 Dest.Offset += Size; 2434 Src.Offset += Size; 2435 } 2436 2437 return true; 2438} 2439 2440bool ARMFastISel::SelectIntrinsicCall(const IntrinsicInst &I) { 2441 // FIXME: Handle more intrinsics. 2442 switch (I.getIntrinsicID()) { 2443 default: return false; 2444 case Intrinsic::frameaddress: { 2445 MachineFrameInfo *MFI = FuncInfo.MF->getFrameInfo(); 2446 MFI->setFrameAddressIsTaken(true); 2447 2448 unsigned LdrOpc; 2449 const TargetRegisterClass *RC; 2450 if (isThumb2) { 2451 LdrOpc = ARM::t2LDRi12; 2452 RC = (const TargetRegisterClass*)&ARM::tGPRRegClass; 2453 } else { 2454 LdrOpc = ARM::LDRi12; 2455 RC = (const TargetRegisterClass*)&ARM::GPRRegClass; 2456 } 2457 2458 const ARMBaseRegisterInfo *RegInfo = 2459 static_cast<const ARMBaseRegisterInfo*>(TM.getRegisterInfo()); 2460 unsigned FramePtr = RegInfo->getFrameRegister(*(FuncInfo.MF)); 2461 unsigned SrcReg = FramePtr; 2462 2463 // Recursively load frame address 2464 // ldr r0 [fp] 2465 // ldr r0 [r0] 2466 // ldr r0 [r0] 2467 // ... 2468 unsigned DestReg; 2469 unsigned Depth = cast<ConstantInt>(I.getOperand(0))->getZExtValue(); 2470 while (Depth--) { 2471 DestReg = createResultReg(RC); 2472 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2473 TII.get(LdrOpc), DestReg) 2474 .addReg(SrcReg).addImm(0)); 2475 SrcReg = DestReg; 2476 } 2477 UpdateValueMap(&I, SrcReg); 2478 return true; 2479 } 2480 case Intrinsic::memcpy: 2481 case Intrinsic::memmove: { 2482 const MemTransferInst &MTI = cast<MemTransferInst>(I); 2483 // Don't handle volatile. 2484 if (MTI.isVolatile()) 2485 return false; 2486 2487 // Disable inlining for memmove before calls to ComputeAddress. Otherwise, 2488 // we would emit dead code because we don't currently handle memmoves. 2489 bool isMemCpy = (I.getIntrinsicID() == Intrinsic::memcpy); 2490 if (isa<ConstantInt>(MTI.getLength()) && isMemCpy) { 2491 // Small memcpy's are common enough that we want to do them without a call 2492 // if possible. 2493 uint64_t Len = cast<ConstantInt>(MTI.getLength())->getZExtValue(); 2494 if (ARMIsMemCpySmall(Len)) { 2495 Address Dest, Src; 2496 if (!ARMComputeAddress(MTI.getRawDest(), Dest) || 2497 !ARMComputeAddress(MTI.getRawSource(), Src)) 2498 return false; 2499 if (ARMTryEmitSmallMemCpy(Dest, Src, Len)) 2500 return true; 2501 } 2502 } 2503 2504 if (!MTI.getLength()->getType()->isIntegerTy(32)) 2505 return false; 2506 2507 if (MTI.getSourceAddressSpace() > 255 || MTI.getDestAddressSpace() > 255) 2508 return false; 2509 2510 const char *IntrMemName = isa<MemCpyInst>(I) ? "memcpy" : "memmove"; 2511 return SelectCall(&I, IntrMemName); 2512 } 2513 case Intrinsic::memset: { 2514 const MemSetInst &MSI = cast<MemSetInst>(I); 2515 // Don't handle volatile. 2516 if (MSI.isVolatile()) 2517 return false; 2518 2519 if (!MSI.getLength()->getType()->isIntegerTy(32)) 2520 return false; 2521 2522 if (MSI.getDestAddressSpace() > 255) 2523 return false; 2524 2525 return SelectCall(&I, "memset"); 2526 } 2527 case Intrinsic::trap: { 2528 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::TRAP)); 2529 return true; 2530 } 2531 } 2532} 2533 2534bool ARMFastISel::SelectTrunc(const Instruction *I) { 2535 // The high bits for a type smaller than the register size are assumed to be 2536 // undefined. 2537 Value *Op = I->getOperand(0); 2538 2539 EVT SrcVT, DestVT; 2540 SrcVT = TLI.getValueType(Op->getType(), true); 2541 DestVT = TLI.getValueType(I->getType(), true); 2542 2543 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) 2544 return false; 2545 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1) 2546 return false; 2547 2548 unsigned SrcReg = getRegForValue(Op); 2549 if (!SrcReg) return false; 2550 2551 // Because the high bits are undefined, a truncate doesn't generate 2552 // any code. 2553 UpdateValueMap(I, SrcReg); 2554 return true; 2555} 2556 2557unsigned ARMFastISel::ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT, 2558 bool isZExt) { 2559 if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8) 2560 return 0; 2561 2562 unsigned Opc; 2563 bool isBoolZext = false; 2564 if (!SrcVT.isSimple()) return 0; 2565 switch (SrcVT.getSimpleVT().SimpleTy) { 2566 default: return 0; 2567 case MVT::i16: 2568 if (!Subtarget->hasV6Ops()) return 0; 2569 if (isZExt) 2570 Opc = isThumb2 ? ARM::t2UXTH : ARM::UXTH; 2571 else 2572 Opc = isThumb2 ? ARM::t2SXTH : ARM::SXTH; 2573 break; 2574 case MVT::i8: 2575 if (!Subtarget->hasV6Ops()) return 0; 2576 if (isZExt) 2577 Opc = isThumb2 ? ARM::t2UXTB : ARM::UXTB; 2578 else 2579 Opc = isThumb2 ? ARM::t2SXTB : ARM::SXTB; 2580 break; 2581 case MVT::i1: 2582 if (isZExt) { 2583 Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri; 2584 isBoolZext = true; 2585 break; 2586 } 2587 return 0; 2588 } 2589 2590 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 2591 MachineInstrBuilder MIB; 2592 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg) 2593 .addReg(SrcReg); 2594 if (isBoolZext) 2595 MIB.addImm(1); 2596 else 2597 MIB.addImm(0); 2598 AddOptionalDefs(MIB); 2599 return ResultReg; 2600} 2601 2602bool ARMFastISel::SelectIntExt(const Instruction *I) { 2603 // On ARM, in general, integer casts don't involve legal types; this code 2604 // handles promotable integers. 2605 Type *DestTy = I->getType(); 2606 Value *Src = I->getOperand(0); 2607 Type *SrcTy = Src->getType(); 2608 2609 EVT SrcVT, DestVT; 2610 SrcVT = TLI.getValueType(SrcTy, true); 2611 DestVT = TLI.getValueType(DestTy, true); 2612 2613 bool isZExt = isa<ZExtInst>(I); 2614 unsigned SrcReg = getRegForValue(Src); 2615 if (!SrcReg) return false; 2616 2617 unsigned ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, isZExt); 2618 if (ResultReg == 0) return false; 2619 UpdateValueMap(I, ResultReg); 2620 return true; 2621} 2622 2623bool ARMFastISel::SelectShift(const Instruction *I, 2624 ARM_AM::ShiftOpc ShiftTy) { 2625 // We handle thumb2 mode by target independent selector 2626 // or SelectionDAG ISel. 2627 if (isThumb2) 2628 return false; 2629 2630 // Only handle i32 now. 2631 EVT DestVT = TLI.getValueType(I->getType(), true); 2632 if (DestVT != MVT::i32) 2633 return false; 2634 2635 unsigned Opc = ARM::MOVsr; 2636 unsigned ShiftImm; 2637 Value *Src2Value = I->getOperand(1); 2638 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Src2Value)) { 2639 ShiftImm = CI->getZExtValue(); 2640 2641 // Fall back to selection DAG isel if the shift amount 2642 // is zero or greater than the width of the value type. 2643 if (ShiftImm == 0 || ShiftImm >=32) 2644 return false; 2645 2646 Opc = ARM::MOVsi; 2647 } 2648 2649 Value *Src1Value = I->getOperand(0); 2650 unsigned Reg1 = getRegForValue(Src1Value); 2651 if (Reg1 == 0) return false; 2652 2653 unsigned Reg2; 2654 if (Opc == ARM::MOVsr) { 2655 Reg2 = getRegForValue(Src2Value); 2656 if (Reg2 == 0) return false; 2657 } 2658 2659 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 2660 if(ResultReg == 0) return false; 2661 2662 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2663 TII.get(Opc), ResultReg) 2664 .addReg(Reg1); 2665 2666 if (Opc == ARM::MOVsi) 2667 MIB.addImm(ARM_AM::getSORegOpc(ShiftTy, ShiftImm)); 2668 else if (Opc == ARM::MOVsr) { 2669 MIB.addReg(Reg2); 2670 MIB.addImm(ARM_AM::getSORegOpc(ShiftTy, 0)); 2671 } 2672 2673 AddOptionalDefs(MIB); 2674 UpdateValueMap(I, ResultReg); 2675 return true; 2676} 2677 2678// TODO: SoftFP support. 2679bool ARMFastISel::TargetSelectInstruction(const Instruction *I) { 2680 2681 switch (I->getOpcode()) { 2682 case Instruction::Load: 2683 return SelectLoad(I); 2684 case Instruction::Store: 2685 return SelectStore(I); 2686 case Instruction::Br: 2687 return SelectBranch(I); 2688 case Instruction::IndirectBr: 2689 return SelectIndirectBr(I); 2690 case Instruction::ICmp: 2691 case Instruction::FCmp: 2692 return SelectCmp(I); 2693 case Instruction::FPExt: 2694 return SelectFPExt(I); 2695 case Instruction::FPTrunc: 2696 return SelectFPTrunc(I); 2697 case Instruction::SIToFP: 2698 return SelectIToFP(I, /*isSigned*/ true); 2699 case Instruction::UIToFP: 2700 return SelectIToFP(I, /*isSigned*/ false); 2701 case Instruction::FPToSI: 2702 return SelectFPToI(I, /*isSigned*/ true); 2703 case Instruction::FPToUI: 2704 return SelectFPToI(I, /*isSigned*/ false); 2705 case Instruction::Add: 2706 return SelectBinaryIntOp(I, ISD::ADD); 2707 case Instruction::Or: 2708 return SelectBinaryIntOp(I, ISD::OR); 2709 case Instruction::Sub: 2710 return SelectBinaryIntOp(I, ISD::SUB); 2711 case Instruction::FAdd: 2712 return SelectBinaryFPOp(I, ISD::FADD); 2713 case Instruction::FSub: 2714 return SelectBinaryFPOp(I, ISD::FSUB); 2715 case Instruction::FMul: 2716 return SelectBinaryFPOp(I, ISD::FMUL); 2717 case Instruction::SDiv: 2718 return SelectDiv(I, /*isSigned*/ true); 2719 case Instruction::UDiv: 2720 return SelectDiv(I, /*isSigned*/ false); 2721 case Instruction::SRem: 2722 return SelectRem(I, /*isSigned*/ true); 2723 case Instruction::URem: 2724 return SelectRem(I, /*isSigned*/ false); 2725 case Instruction::Call: 2726 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) 2727 return SelectIntrinsicCall(*II); 2728 return SelectCall(I); 2729 case Instruction::Select: 2730 return SelectSelect(I); 2731 case Instruction::Ret: 2732 return SelectRet(I); 2733 case Instruction::Trunc: 2734 return SelectTrunc(I); 2735 case Instruction::ZExt: 2736 case Instruction::SExt: 2737 return SelectIntExt(I); 2738 case Instruction::Shl: 2739 return SelectShift(I, ARM_AM::lsl); 2740 case Instruction::LShr: 2741 return SelectShift(I, ARM_AM::lsr); 2742 case Instruction::AShr: 2743 return SelectShift(I, ARM_AM::asr); 2744 default: break; 2745 } 2746 return false; 2747} 2748 2749/// TryToFoldLoad - The specified machine instr operand is a vreg, and that 2750/// vreg is being provided by the specified load instruction. If possible, 2751/// try to fold the load as an operand to the instruction, returning true if 2752/// successful. 2753bool ARMFastISel::TryToFoldLoad(MachineInstr *MI, unsigned OpNo, 2754 const LoadInst *LI) { 2755 // Verify we have a legal type before going any further. 2756 MVT VT; 2757 if (!isLoadTypeLegal(LI->getType(), VT)) 2758 return false; 2759 2760 // Combine load followed by zero- or sign-extend. 2761 // ldrb r1, [r0] ldrb r1, [r0] 2762 // uxtb r2, r1 => 2763 // mov r3, r2 mov r3, r1 2764 bool isZExt = true; 2765 switch(MI->getOpcode()) { 2766 default: return false; 2767 case ARM::SXTH: 2768 case ARM::t2SXTH: 2769 isZExt = false; 2770 case ARM::UXTH: 2771 case ARM::t2UXTH: 2772 if (VT != MVT::i16) 2773 return false; 2774 break; 2775 case ARM::SXTB: 2776 case ARM::t2SXTB: 2777 isZExt = false; 2778 case ARM::UXTB: 2779 case ARM::t2UXTB: 2780 if (VT != MVT::i8) 2781 return false; 2782 break; 2783 } 2784 // See if we can handle this address. 2785 Address Addr; 2786 if (!ARMComputeAddress(LI->getOperand(0), Addr)) return false; 2787 2788 unsigned ResultReg = MI->getOperand(0).getReg(); 2789 if (!ARMEmitLoad(VT, ResultReg, Addr, LI->getAlignment(), isZExt, false)) 2790 return false; 2791 MI->eraseFromParent(); 2792 return true; 2793} 2794 2795namespace llvm { 2796 FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo, 2797 const TargetLibraryInfo *libInfo) { 2798 // Completely untested on non-iOS. 2799 const TargetMachine &TM = funcInfo.MF->getTarget(); 2800 2801 // Darwin and thumb1 only for now. 2802 const ARMSubtarget *Subtarget = &TM.getSubtarget<ARMSubtarget>(); 2803 if (Subtarget->isTargetIOS() && !Subtarget->isThumb1Only()) 2804 return new ARMFastISel(funcInfo, libInfo); 2805 return 0; 2806 } 2807} 2808