ARMFastISel.cpp revision 2946549a2817681f9117662139cc0f2241939965
1//===-- ARMFastISel.cpp - ARM FastISel implementation ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the ARM-specific support for the FastISel class. Some 11// of the target-specific code is generated by tablegen in the file 12// ARMGenFastISel.inc, which is #included here. 13// 14//===----------------------------------------------------------------------===// 15 16#include "ARM.h" 17#include "ARMBaseInstrInfo.h" 18#include "ARMCallingConv.h" 19#include "ARMTargetMachine.h" 20#include "ARMSubtarget.h" 21#include "ARMConstantPoolValue.h" 22#include "MCTargetDesc/ARMAddressingModes.h" 23#include "llvm/CallingConv.h" 24#include "llvm/DerivedTypes.h" 25#include "llvm/GlobalVariable.h" 26#include "llvm/Instructions.h" 27#include "llvm/IntrinsicInst.h" 28#include "llvm/Module.h" 29#include "llvm/Operator.h" 30#include "llvm/CodeGen/Analysis.h" 31#include "llvm/CodeGen/FastISel.h" 32#include "llvm/CodeGen/FunctionLoweringInfo.h" 33#include "llvm/CodeGen/MachineInstrBuilder.h" 34#include "llvm/CodeGen/MachineModuleInfo.h" 35#include "llvm/CodeGen/MachineConstantPool.h" 36#include "llvm/CodeGen/MachineFrameInfo.h" 37#include "llvm/CodeGen/MachineMemOperand.h" 38#include "llvm/CodeGen/MachineRegisterInfo.h" 39#include "llvm/Support/CallSite.h" 40#include "llvm/Support/CommandLine.h" 41#include "llvm/Support/ErrorHandling.h" 42#include "llvm/Support/GetElementPtrTypeIterator.h" 43#include "llvm/Target/TargetData.h" 44#include "llvm/Target/TargetInstrInfo.h" 45#include "llvm/Target/TargetLowering.h" 46#include "llvm/Target/TargetMachine.h" 47#include "llvm/Target/TargetOptions.h" 48using namespace llvm; 49 50extern cl::opt<bool> EnableARMLongCalls; 51 52namespace { 53 54 // All possible address modes, plus some. 55 typedef struct Address { 56 enum { 57 RegBase, 58 FrameIndexBase 59 } BaseType; 60 61 union { 62 unsigned Reg; 63 int FI; 64 } Base; 65 66 int Offset; 67 68 // Innocuous defaults for our address. 69 Address() 70 : BaseType(RegBase), Offset(0) { 71 Base.Reg = 0; 72 } 73 } Address; 74 75class ARMFastISel : public FastISel { 76 77 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can 78 /// make the right decision when generating code for different targets. 79 const ARMSubtarget *Subtarget; 80 const TargetMachine &TM; 81 const TargetInstrInfo &TII; 82 const TargetLowering &TLI; 83 ARMFunctionInfo *AFI; 84 85 // Convenience variables to avoid some queries. 86 bool isThumb2; 87 LLVMContext *Context; 88 89 public: 90 explicit ARMFastISel(FunctionLoweringInfo &funcInfo) 91 : FastISel(funcInfo), 92 TM(funcInfo.MF->getTarget()), 93 TII(*TM.getInstrInfo()), 94 TLI(*TM.getTargetLowering()) { 95 Subtarget = &TM.getSubtarget<ARMSubtarget>(); 96 AFI = funcInfo.MF->getInfo<ARMFunctionInfo>(); 97 isThumb2 = AFI->isThumbFunction(); 98 Context = &funcInfo.Fn->getContext(); 99 } 100 101 // Code from FastISel.cpp. 102 virtual unsigned FastEmitInst_(unsigned MachineInstOpcode, 103 const TargetRegisterClass *RC); 104 virtual unsigned FastEmitInst_r(unsigned MachineInstOpcode, 105 const TargetRegisterClass *RC, 106 unsigned Op0, bool Op0IsKill); 107 virtual unsigned FastEmitInst_rr(unsigned MachineInstOpcode, 108 const TargetRegisterClass *RC, 109 unsigned Op0, bool Op0IsKill, 110 unsigned Op1, bool Op1IsKill); 111 virtual unsigned FastEmitInst_rrr(unsigned MachineInstOpcode, 112 const TargetRegisterClass *RC, 113 unsigned Op0, bool Op0IsKill, 114 unsigned Op1, bool Op1IsKill, 115 unsigned Op2, bool Op2IsKill); 116 virtual unsigned FastEmitInst_ri(unsigned MachineInstOpcode, 117 const TargetRegisterClass *RC, 118 unsigned Op0, bool Op0IsKill, 119 uint64_t Imm); 120 virtual unsigned FastEmitInst_rf(unsigned MachineInstOpcode, 121 const TargetRegisterClass *RC, 122 unsigned Op0, bool Op0IsKill, 123 const ConstantFP *FPImm); 124 virtual unsigned FastEmitInst_rri(unsigned MachineInstOpcode, 125 const TargetRegisterClass *RC, 126 unsigned Op0, bool Op0IsKill, 127 unsigned Op1, bool Op1IsKill, 128 uint64_t Imm); 129 virtual unsigned FastEmitInst_i(unsigned MachineInstOpcode, 130 const TargetRegisterClass *RC, 131 uint64_t Imm); 132 virtual unsigned FastEmitInst_ii(unsigned MachineInstOpcode, 133 const TargetRegisterClass *RC, 134 uint64_t Imm1, uint64_t Imm2); 135 136 virtual unsigned FastEmitInst_extractsubreg(MVT RetVT, 137 unsigned Op0, bool Op0IsKill, 138 uint32_t Idx); 139 140 // Backend specific FastISel code. 141 virtual bool TargetSelectInstruction(const Instruction *I); 142 virtual unsigned TargetMaterializeConstant(const Constant *C); 143 virtual unsigned TargetMaterializeAlloca(const AllocaInst *AI); 144 virtual bool TryToFoldLoad(MachineInstr *MI, unsigned OpNo, 145 const LoadInst *LI); 146 147 #include "ARMGenFastISel.inc" 148 149 // Instruction selection routines. 150 private: 151 bool SelectLoad(const Instruction *I); 152 bool SelectStore(const Instruction *I); 153 bool SelectBranch(const Instruction *I); 154 bool SelectIndirectBr(const Instruction *I); 155 bool SelectCmp(const Instruction *I); 156 bool SelectFPExt(const Instruction *I); 157 bool SelectFPTrunc(const Instruction *I); 158 bool SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode); 159 bool SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode); 160 bool SelectIToFP(const Instruction *I, bool isSigned); 161 bool SelectFPToI(const Instruction *I, bool isSigned); 162 bool SelectDiv(const Instruction *I, bool isSigned); 163 bool SelectRem(const Instruction *I, bool isSigned); 164 bool SelectCall(const Instruction *I, const char *IntrMemName); 165 bool SelectIntrinsicCall(const IntrinsicInst &I); 166 bool SelectSelect(const Instruction *I); 167 bool SelectRet(const Instruction *I); 168 bool SelectTrunc(const Instruction *I); 169 bool SelectIntExt(const Instruction *I); 170 bool SelectShift(const Instruction *I, ARM_AM::ShiftOpc ShiftTy); 171 172 // Utility routines. 173 private: 174 bool isTypeLegal(Type *Ty, MVT &VT); 175 bool isLoadTypeLegal(Type *Ty, MVT &VT); 176 bool ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, 177 bool isZExt); 178 bool ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr, 179 unsigned Alignment = 0, bool isZExt = true, 180 bool allocReg = true); 181 bool ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr, 182 unsigned Alignment = 0); 183 bool ARMComputeAddress(const Value *Obj, Address &Addr); 184 void ARMSimplifyAddress(Address &Addr, EVT VT, bool useAM3); 185 bool ARMIsMemCpySmall(uint64_t Len); 186 bool ARMTryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len); 187 unsigned ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT, bool isZExt); 188 unsigned ARMMaterializeFP(const ConstantFP *CFP, EVT VT); 189 unsigned ARMMaterializeInt(const Constant *C, EVT VT); 190 unsigned ARMMaterializeGV(const GlobalValue *GV, EVT VT); 191 unsigned ARMMoveToFPReg(EVT VT, unsigned SrcReg); 192 unsigned ARMMoveToIntReg(EVT VT, unsigned SrcReg); 193 unsigned ARMSelectCallOp(bool UseReg); 194 195 // Call handling routines. 196 private: 197 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, 198 bool Return, 199 bool isVarArg); 200 bool ProcessCallArgs(SmallVectorImpl<Value*> &Args, 201 SmallVectorImpl<unsigned> &ArgRegs, 202 SmallVectorImpl<MVT> &ArgVTs, 203 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 204 SmallVectorImpl<unsigned> &RegArgs, 205 CallingConv::ID CC, 206 unsigned &NumBytes, 207 bool isVarArg); 208 unsigned getLibcallReg(const Twine &Name); 209 bool FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 210 const Instruction *I, CallingConv::ID CC, 211 unsigned &NumBytes, bool isVarArg); 212 bool ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call); 213 214 // OptionalDef handling routines. 215 private: 216 bool isARMNEONPred(const MachineInstr *MI); 217 bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR); 218 const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB); 219 void AddLoadStoreOperands(EVT VT, Address &Addr, 220 const MachineInstrBuilder &MIB, 221 unsigned Flags, bool useAM3); 222}; 223 224} // end anonymous namespace 225 226#include "ARMGenCallingConv.inc" 227 228// DefinesOptionalPredicate - This is different from DefinesPredicate in that 229// we don't care about implicit defs here, just places we'll need to add a 230// default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR. 231bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) { 232 if (!MI->hasOptionalDef()) 233 return false; 234 235 // Look to see if our OptionalDef is defining CPSR or CCR. 236 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 237 const MachineOperand &MO = MI->getOperand(i); 238 if (!MO.isReg() || !MO.isDef()) continue; 239 if (MO.getReg() == ARM::CPSR) 240 *CPSR = true; 241 } 242 return true; 243} 244 245bool ARMFastISel::isARMNEONPred(const MachineInstr *MI) { 246 const MCInstrDesc &MCID = MI->getDesc(); 247 248 // If we're a thumb2 or not NEON function we were handled via isPredicable. 249 if ((MCID.TSFlags & ARMII::DomainMask) != ARMII::DomainNEON || 250 AFI->isThumb2Function()) 251 return false; 252 253 for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) 254 if (MCID.OpInfo[i].isPredicate()) 255 return true; 256 257 return false; 258} 259 260// If the machine is predicable go ahead and add the predicate operands, if 261// it needs default CC operands add those. 262// TODO: If we want to support thumb1 then we'll need to deal with optional 263// CPSR defs that need to be added before the remaining operands. See s_cc_out 264// for descriptions why. 265const MachineInstrBuilder & 266ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) { 267 MachineInstr *MI = &*MIB; 268 269 // Do we use a predicate? or... 270 // Are we NEON in ARM mode and have a predicate operand? If so, I know 271 // we're not predicable but add it anyways. 272 if (TII.isPredicable(MI) || isARMNEONPred(MI)) 273 AddDefaultPred(MIB); 274 275 // Do we optionally set a predicate? Preds is size > 0 iff the predicate 276 // defines CPSR. All other OptionalDefines in ARM are the CCR register. 277 bool CPSR = false; 278 if (DefinesOptionalPredicate(MI, &CPSR)) { 279 if (CPSR) 280 AddDefaultT1CC(MIB); 281 else 282 AddDefaultCC(MIB); 283 } 284 return MIB; 285} 286 287unsigned ARMFastISel::FastEmitInst_(unsigned MachineInstOpcode, 288 const TargetRegisterClass* RC) { 289 unsigned ResultReg = createResultReg(RC); 290 const MCInstrDesc &II = TII.get(MachineInstOpcode); 291 292 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)); 293 return ResultReg; 294} 295 296unsigned ARMFastISel::FastEmitInst_r(unsigned MachineInstOpcode, 297 const TargetRegisterClass *RC, 298 unsigned Op0, bool Op0IsKill) { 299 unsigned ResultReg = createResultReg(RC); 300 const MCInstrDesc &II = TII.get(MachineInstOpcode); 301 302 if (II.getNumDefs() >= 1) { 303 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 304 .addReg(Op0, Op0IsKill * RegState::Kill)); 305 } else { 306 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 307 .addReg(Op0, Op0IsKill * RegState::Kill)); 308 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 309 TII.get(TargetOpcode::COPY), ResultReg) 310 .addReg(II.ImplicitDefs[0])); 311 } 312 return ResultReg; 313} 314 315unsigned ARMFastISel::FastEmitInst_rr(unsigned MachineInstOpcode, 316 const TargetRegisterClass *RC, 317 unsigned Op0, bool Op0IsKill, 318 unsigned Op1, bool Op1IsKill) { 319 unsigned ResultReg = createResultReg(RC); 320 const MCInstrDesc &II = TII.get(MachineInstOpcode); 321 322 if (II.getNumDefs() >= 1) { 323 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 324 .addReg(Op0, Op0IsKill * RegState::Kill) 325 .addReg(Op1, Op1IsKill * RegState::Kill)); 326 } else { 327 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 328 .addReg(Op0, Op0IsKill * RegState::Kill) 329 .addReg(Op1, Op1IsKill * RegState::Kill)); 330 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 331 TII.get(TargetOpcode::COPY), ResultReg) 332 .addReg(II.ImplicitDefs[0])); 333 } 334 return ResultReg; 335} 336 337unsigned ARMFastISel::FastEmitInst_rrr(unsigned MachineInstOpcode, 338 const TargetRegisterClass *RC, 339 unsigned Op0, bool Op0IsKill, 340 unsigned Op1, bool Op1IsKill, 341 unsigned Op2, bool Op2IsKill) { 342 unsigned ResultReg = createResultReg(RC); 343 const MCInstrDesc &II = TII.get(MachineInstOpcode); 344 345 if (II.getNumDefs() >= 1) { 346 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 347 .addReg(Op0, Op0IsKill * RegState::Kill) 348 .addReg(Op1, Op1IsKill * RegState::Kill) 349 .addReg(Op2, Op2IsKill * RegState::Kill)); 350 } else { 351 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 352 .addReg(Op0, Op0IsKill * RegState::Kill) 353 .addReg(Op1, Op1IsKill * RegState::Kill) 354 .addReg(Op2, Op2IsKill * RegState::Kill)); 355 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 356 TII.get(TargetOpcode::COPY), ResultReg) 357 .addReg(II.ImplicitDefs[0])); 358 } 359 return ResultReg; 360} 361 362unsigned ARMFastISel::FastEmitInst_ri(unsigned MachineInstOpcode, 363 const TargetRegisterClass *RC, 364 unsigned Op0, bool Op0IsKill, 365 uint64_t Imm) { 366 unsigned ResultReg = createResultReg(RC); 367 const MCInstrDesc &II = TII.get(MachineInstOpcode); 368 369 if (II.getNumDefs() >= 1) { 370 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 371 .addReg(Op0, Op0IsKill * RegState::Kill) 372 .addImm(Imm)); 373 } else { 374 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 375 .addReg(Op0, Op0IsKill * RegState::Kill) 376 .addImm(Imm)); 377 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 378 TII.get(TargetOpcode::COPY), ResultReg) 379 .addReg(II.ImplicitDefs[0])); 380 } 381 return ResultReg; 382} 383 384unsigned ARMFastISel::FastEmitInst_rf(unsigned MachineInstOpcode, 385 const TargetRegisterClass *RC, 386 unsigned Op0, bool Op0IsKill, 387 const ConstantFP *FPImm) { 388 unsigned ResultReg = createResultReg(RC); 389 const MCInstrDesc &II = TII.get(MachineInstOpcode); 390 391 if (II.getNumDefs() >= 1) { 392 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 393 .addReg(Op0, Op0IsKill * RegState::Kill) 394 .addFPImm(FPImm)); 395 } else { 396 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 397 .addReg(Op0, Op0IsKill * RegState::Kill) 398 .addFPImm(FPImm)); 399 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 400 TII.get(TargetOpcode::COPY), ResultReg) 401 .addReg(II.ImplicitDefs[0])); 402 } 403 return ResultReg; 404} 405 406unsigned ARMFastISel::FastEmitInst_rri(unsigned MachineInstOpcode, 407 const TargetRegisterClass *RC, 408 unsigned Op0, bool Op0IsKill, 409 unsigned Op1, bool Op1IsKill, 410 uint64_t Imm) { 411 unsigned ResultReg = createResultReg(RC); 412 const MCInstrDesc &II = TII.get(MachineInstOpcode); 413 414 if (II.getNumDefs() >= 1) { 415 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 416 .addReg(Op0, Op0IsKill * RegState::Kill) 417 .addReg(Op1, Op1IsKill * RegState::Kill) 418 .addImm(Imm)); 419 } else { 420 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 421 .addReg(Op0, Op0IsKill * RegState::Kill) 422 .addReg(Op1, Op1IsKill * RegState::Kill) 423 .addImm(Imm)); 424 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 425 TII.get(TargetOpcode::COPY), ResultReg) 426 .addReg(II.ImplicitDefs[0])); 427 } 428 return ResultReg; 429} 430 431unsigned ARMFastISel::FastEmitInst_i(unsigned MachineInstOpcode, 432 const TargetRegisterClass *RC, 433 uint64_t Imm) { 434 unsigned ResultReg = createResultReg(RC); 435 const MCInstrDesc &II = TII.get(MachineInstOpcode); 436 437 if (II.getNumDefs() >= 1) { 438 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 439 .addImm(Imm)); 440 } else { 441 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 442 .addImm(Imm)); 443 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 444 TII.get(TargetOpcode::COPY), ResultReg) 445 .addReg(II.ImplicitDefs[0])); 446 } 447 return ResultReg; 448} 449 450unsigned ARMFastISel::FastEmitInst_ii(unsigned MachineInstOpcode, 451 const TargetRegisterClass *RC, 452 uint64_t Imm1, uint64_t Imm2) { 453 unsigned ResultReg = createResultReg(RC); 454 const MCInstrDesc &II = TII.get(MachineInstOpcode); 455 456 if (II.getNumDefs() >= 1) { 457 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 458 .addImm(Imm1).addImm(Imm2)); 459 } else { 460 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 461 .addImm(Imm1).addImm(Imm2)); 462 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 463 TII.get(TargetOpcode::COPY), 464 ResultReg) 465 .addReg(II.ImplicitDefs[0])); 466 } 467 return ResultReg; 468} 469 470unsigned ARMFastISel::FastEmitInst_extractsubreg(MVT RetVT, 471 unsigned Op0, bool Op0IsKill, 472 uint32_t Idx) { 473 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT)); 474 assert(TargetRegisterInfo::isVirtualRegister(Op0) && 475 "Cannot yet extract from physregs"); 476 477 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 478 DL, TII.get(TargetOpcode::COPY), ResultReg) 479 .addReg(Op0, getKillRegState(Op0IsKill), Idx)); 480 return ResultReg; 481} 482 483// TODO: Don't worry about 64-bit now, but when this is fixed remove the 484// checks from the various callers. 485unsigned ARMFastISel::ARMMoveToFPReg(EVT VT, unsigned SrcReg) { 486 if (VT == MVT::f64) return 0; 487 488 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 489 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 490 TII.get(ARM::VMOVSR), MoveReg) 491 .addReg(SrcReg)); 492 return MoveReg; 493} 494 495unsigned ARMFastISel::ARMMoveToIntReg(EVT VT, unsigned SrcReg) { 496 if (VT == MVT::i64) return 0; 497 498 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 499 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 500 TII.get(ARM::VMOVRS), MoveReg) 501 .addReg(SrcReg)); 502 return MoveReg; 503} 504 505// For double width floating point we need to materialize two constants 506// (the high and the low) into integer registers then use a move to get 507// the combined constant into an FP reg. 508unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, EVT VT) { 509 const APFloat Val = CFP->getValueAPF(); 510 bool is64bit = VT == MVT::f64; 511 512 // This checks to see if we can use VFP3 instructions to materialize 513 // a constant, otherwise we have to go through the constant pool. 514 if (TLI.isFPImmLegal(Val, VT)) { 515 int Imm; 516 unsigned Opc; 517 if (is64bit) { 518 Imm = ARM_AM::getFP64Imm(Val); 519 Opc = ARM::FCONSTD; 520 } else { 521 Imm = ARM_AM::getFP32Imm(Val); 522 Opc = ARM::FCONSTS; 523 } 524 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 525 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 526 DestReg) 527 .addImm(Imm)); 528 return DestReg; 529 } 530 531 // Require VFP2 for loading fp constants. 532 if (!Subtarget->hasVFP2()) return false; 533 534 // MachineConstantPool wants an explicit alignment. 535 unsigned Align = TD.getPrefTypeAlignment(CFP->getType()); 536 if (Align == 0) { 537 // TODO: Figure out if this is correct. 538 Align = TD.getTypeAllocSize(CFP->getType()); 539 } 540 unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align); 541 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 542 unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS; 543 544 // The extra reg is for addrmode5. 545 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 546 DestReg) 547 .addConstantPoolIndex(Idx) 548 .addReg(0)); 549 return DestReg; 550} 551 552unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, EVT VT) { 553 554 if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1) 555 return false; 556 557 // If we can do this in a single instruction without a constant pool entry 558 // do so now. 559 const ConstantInt *CI = cast<ConstantInt>(C); 560 if (Subtarget->hasV6T2Ops() && isUInt<16>(CI->getZExtValue())) { 561 unsigned Opc = isThumb2 ? ARM::t2MOVi16 : ARM::MOVi16; 562 unsigned ImmReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 563 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 564 TII.get(Opc), ImmReg) 565 .addImm(CI->getZExtValue())); 566 return ImmReg; 567 } 568 569 // Use MVN to emit negative constants. 570 if (VT == MVT::i32 && Subtarget->hasV6T2Ops() && CI->isNegative()) { 571 unsigned Imm = (unsigned)~(CI->getSExtValue()); 572 bool UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 573 (ARM_AM::getSOImmVal(Imm) != -1); 574 if (UseImm) { 575 unsigned Opc = isThumb2 ? ARM::t2MVNi : ARM::MVNi; 576 unsigned ImmReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 577 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 578 TII.get(Opc), ImmReg) 579 .addImm(Imm)); 580 return ImmReg; 581 } 582 } 583 584 // Load from constant pool. For now 32-bit only. 585 if (VT != MVT::i32) 586 return false; 587 588 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 589 590 // MachineConstantPool wants an explicit alignment. 591 unsigned Align = TD.getPrefTypeAlignment(C->getType()); 592 if (Align == 0) { 593 // TODO: Figure out if this is correct. 594 Align = TD.getTypeAllocSize(C->getType()); 595 } 596 unsigned Idx = MCP.getConstantPoolIndex(C, Align); 597 598 if (isThumb2) 599 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 600 TII.get(ARM::t2LDRpci), DestReg) 601 .addConstantPoolIndex(Idx)); 602 else 603 // The extra immediate is for addrmode2. 604 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 605 TII.get(ARM::LDRcp), DestReg) 606 .addConstantPoolIndex(Idx) 607 .addImm(0)); 608 609 return DestReg; 610} 611 612unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, EVT VT) { 613 // For now 32-bit only. 614 if (VT != MVT::i32) return 0; 615 616 Reloc::Model RelocM = TM.getRelocationModel(); 617 618 // TODO: Need more magic for ARM PIC. 619 if (!isThumb2 && (RelocM == Reloc::PIC_)) return 0; 620 621 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 622 623 // Use movw+movt when possible, it avoids constant pool entries. 624 // Darwin targets don't support movt with Reloc::Static, see 625 // ARMTargetLowering::LowerGlobalAddressDarwin. Other targets only support 626 // static movt relocations. 627 if (Subtarget->useMovt() && 628 Subtarget->isTargetDarwin() == (RelocM != Reloc::Static)) { 629 unsigned Opc; 630 switch (RelocM) { 631 case Reloc::PIC_: 632 Opc = isThumb2 ? ARM::t2MOV_ga_pcrel : ARM::MOV_ga_pcrel; 633 break; 634 case Reloc::DynamicNoPIC: 635 Opc = isThumb2 ? ARM::t2MOV_ga_dyn : ARM::MOV_ga_dyn; 636 break; 637 default: 638 Opc = isThumb2 ? ARM::t2MOVi32imm : ARM::MOVi32imm; 639 break; 640 } 641 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 642 DestReg).addGlobalAddress(GV)); 643 } else { 644 // MachineConstantPool wants an explicit alignment. 645 unsigned Align = TD.getPrefTypeAlignment(GV->getType()); 646 if (Align == 0) { 647 // TODO: Figure out if this is correct. 648 Align = TD.getTypeAllocSize(GV->getType()); 649 } 650 651 // Grab index. 652 unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : 653 (Subtarget->isThumb() ? 4 : 8); 654 unsigned Id = AFI->createPICLabelUId(); 655 ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create(GV, Id, 656 ARMCP::CPValue, 657 PCAdj); 658 unsigned Idx = MCP.getConstantPoolIndex(CPV, Align); 659 660 // Load value. 661 MachineInstrBuilder MIB; 662 if (isThumb2) { 663 unsigned Opc = (RelocM!=Reloc::PIC_) ? ARM::t2LDRpci : ARM::t2LDRpci_pic; 664 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg) 665 .addConstantPoolIndex(Idx); 666 if (RelocM == Reloc::PIC_) 667 MIB.addImm(Id); 668 } else { 669 // The extra immediate is for addrmode2. 670 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRcp), 671 DestReg) 672 .addConstantPoolIndex(Idx) 673 .addImm(0); 674 } 675 AddOptionalDefs(MIB); 676 } 677 678 if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) { 679 MachineInstrBuilder MIB; 680 unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT)); 681 if (isThumb2) 682 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 683 TII.get(ARM::t2LDRi12), NewDestReg) 684 .addReg(DestReg) 685 .addImm(0); 686 else 687 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRi12), 688 NewDestReg) 689 .addReg(DestReg) 690 .addImm(0); 691 DestReg = NewDestReg; 692 AddOptionalDefs(MIB); 693 } 694 695 return DestReg; 696} 697 698unsigned ARMFastISel::TargetMaterializeConstant(const Constant *C) { 699 EVT VT = TLI.getValueType(C->getType(), true); 700 701 // Only handle simple types. 702 if (!VT.isSimple()) return 0; 703 704 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) 705 return ARMMaterializeFP(CFP, VT); 706 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C)) 707 return ARMMaterializeGV(GV, VT); 708 else if (isa<ConstantInt>(C)) 709 return ARMMaterializeInt(C, VT); 710 711 return 0; 712} 713 714// TODO: unsigned ARMFastISel::TargetMaterializeFloatZero(const ConstantFP *CF); 715 716unsigned ARMFastISel::TargetMaterializeAlloca(const AllocaInst *AI) { 717 // Don't handle dynamic allocas. 718 if (!FuncInfo.StaticAllocaMap.count(AI)) return 0; 719 720 MVT VT; 721 if (!isLoadTypeLegal(AI->getType(), VT)) return 0; 722 723 DenseMap<const AllocaInst*, int>::iterator SI = 724 FuncInfo.StaticAllocaMap.find(AI); 725 726 // This will get lowered later into the correct offsets and registers 727 // via rewriteXFrameIndex. 728 if (SI != FuncInfo.StaticAllocaMap.end()) { 729 const TargetRegisterClass* RC = TLI.getRegClassFor(VT); 730 unsigned ResultReg = createResultReg(RC); 731 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri; 732 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 733 TII.get(Opc), ResultReg) 734 .addFrameIndex(SI->second) 735 .addImm(0)); 736 return ResultReg; 737 } 738 739 return 0; 740} 741 742bool ARMFastISel::isTypeLegal(Type *Ty, MVT &VT) { 743 EVT evt = TLI.getValueType(Ty, true); 744 745 // Only handle simple types. 746 if (evt == MVT::Other || !evt.isSimple()) return false; 747 VT = evt.getSimpleVT(); 748 749 // Handle all legal types, i.e. a register that will directly hold this 750 // value. 751 return TLI.isTypeLegal(VT); 752} 753 754bool ARMFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) { 755 if (isTypeLegal(Ty, VT)) return true; 756 757 // If this is a type than can be sign or zero-extended to a basic operation 758 // go ahead and accept it now. 759 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16) 760 return true; 761 762 return false; 763} 764 765// Computes the address to get to an object. 766bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) { 767 // Some boilerplate from the X86 FastISel. 768 const User *U = NULL; 769 unsigned Opcode = Instruction::UserOp1; 770 if (const Instruction *I = dyn_cast<Instruction>(Obj)) { 771 // Don't walk into other basic blocks unless the object is an alloca from 772 // another block, otherwise it may not have a virtual register assigned. 773 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) || 774 FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) { 775 Opcode = I->getOpcode(); 776 U = I; 777 } 778 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) { 779 Opcode = C->getOpcode(); 780 U = C; 781 } 782 783 if (PointerType *Ty = dyn_cast<PointerType>(Obj->getType())) 784 if (Ty->getAddressSpace() > 255) 785 // Fast instruction selection doesn't support the special 786 // address spaces. 787 return false; 788 789 switch (Opcode) { 790 default: 791 break; 792 case Instruction::BitCast: { 793 // Look through bitcasts. 794 return ARMComputeAddress(U->getOperand(0), Addr); 795 } 796 case Instruction::IntToPtr: { 797 // Look past no-op inttoptrs. 798 if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy()) 799 return ARMComputeAddress(U->getOperand(0), Addr); 800 break; 801 } 802 case Instruction::PtrToInt: { 803 // Look past no-op ptrtoints. 804 if (TLI.getValueType(U->getType()) == TLI.getPointerTy()) 805 return ARMComputeAddress(U->getOperand(0), Addr); 806 break; 807 } 808 case Instruction::GetElementPtr: { 809 Address SavedAddr = Addr; 810 int TmpOffset = Addr.Offset; 811 812 // Iterate through the GEP folding the constants into offsets where 813 // we can. 814 gep_type_iterator GTI = gep_type_begin(U); 815 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); 816 i != e; ++i, ++GTI) { 817 const Value *Op = *i; 818 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 819 const StructLayout *SL = TD.getStructLayout(STy); 820 unsigned Idx = cast<ConstantInt>(Op)->getZExtValue(); 821 TmpOffset += SL->getElementOffset(Idx); 822 } else { 823 uint64_t S = TD.getTypeAllocSize(GTI.getIndexedType()); 824 for (;;) { 825 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) { 826 // Constant-offset addressing. 827 TmpOffset += CI->getSExtValue() * S; 828 break; 829 } 830 if (isa<AddOperator>(Op) && 831 (!isa<Instruction>(Op) || 832 FuncInfo.MBBMap[cast<Instruction>(Op)->getParent()] 833 == FuncInfo.MBB) && 834 isa<ConstantInt>(cast<AddOperator>(Op)->getOperand(1))) { 835 // An add (in the same block) with a constant operand. Fold the 836 // constant. 837 ConstantInt *CI = 838 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1)); 839 TmpOffset += CI->getSExtValue() * S; 840 // Iterate on the other operand. 841 Op = cast<AddOperator>(Op)->getOperand(0); 842 continue; 843 } 844 // Unsupported 845 goto unsupported_gep; 846 } 847 } 848 } 849 850 // Try to grab the base operand now. 851 Addr.Offset = TmpOffset; 852 if (ARMComputeAddress(U->getOperand(0), Addr)) return true; 853 854 // We failed, restore everything and try the other options. 855 Addr = SavedAddr; 856 857 unsupported_gep: 858 break; 859 } 860 case Instruction::Alloca: { 861 const AllocaInst *AI = cast<AllocaInst>(Obj); 862 DenseMap<const AllocaInst*, int>::iterator SI = 863 FuncInfo.StaticAllocaMap.find(AI); 864 if (SI != FuncInfo.StaticAllocaMap.end()) { 865 Addr.BaseType = Address::FrameIndexBase; 866 Addr.Base.FI = SI->second; 867 return true; 868 } 869 break; 870 } 871 } 872 873 // Try to get this in a register if nothing else has worked. 874 if (Addr.Base.Reg == 0) Addr.Base.Reg = getRegForValue(Obj); 875 return Addr.Base.Reg != 0; 876} 877 878void ARMFastISel::ARMSimplifyAddress(Address &Addr, EVT VT, bool useAM3) { 879 880 assert(VT.isSimple() && "Non-simple types are invalid here!"); 881 882 bool needsLowering = false; 883 switch (VT.getSimpleVT().SimpleTy) { 884 default: llvm_unreachable("Unhandled load/store type!"); 885 case MVT::i1: 886 case MVT::i8: 887 case MVT::i16: 888 case MVT::i32: 889 if (!useAM3) { 890 // Integer loads/stores handle 12-bit offsets. 891 needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset); 892 // Handle negative offsets. 893 if (needsLowering && isThumb2) 894 needsLowering = !(Subtarget->hasV6T2Ops() && Addr.Offset < 0 && 895 Addr.Offset > -256); 896 } else { 897 // ARM halfword load/stores and signed byte loads use +/-imm8 offsets. 898 needsLowering = (Addr.Offset > 255 || Addr.Offset < -255); 899 } 900 break; 901 case MVT::f32: 902 case MVT::f64: 903 // Floating point operands handle 8-bit offsets. 904 needsLowering = ((Addr.Offset & 0xff) != Addr.Offset); 905 break; 906 } 907 908 // If this is a stack pointer and the offset needs to be simplified then 909 // put the alloca address into a register, set the base type back to 910 // register and continue. This should almost never happen. 911 if (needsLowering && Addr.BaseType == Address::FrameIndexBase) { 912 const TargetRegisterClass *RC = isThumb2 ? 913 (const TargetRegisterClass*)&ARM::tGPRRegClass : 914 (const TargetRegisterClass*)&ARM::GPRRegClass; 915 unsigned ResultReg = createResultReg(RC); 916 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri; 917 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 918 TII.get(Opc), ResultReg) 919 .addFrameIndex(Addr.Base.FI) 920 .addImm(0)); 921 Addr.Base.Reg = ResultReg; 922 Addr.BaseType = Address::RegBase; 923 } 924 925 // Since the offset is too large for the load/store instruction 926 // get the reg+offset into a register. 927 if (needsLowering) { 928 Addr.Base.Reg = FastEmit_ri_(MVT::i32, ISD::ADD, Addr.Base.Reg, 929 /*Op0IsKill*/false, Addr.Offset, MVT::i32); 930 Addr.Offset = 0; 931 } 932} 933 934void ARMFastISel::AddLoadStoreOperands(EVT VT, Address &Addr, 935 const MachineInstrBuilder &MIB, 936 unsigned Flags, bool useAM3) { 937 // addrmode5 output depends on the selection dag addressing dividing the 938 // offset by 4 that it then later multiplies. Do this here as well. 939 if (VT.getSimpleVT().SimpleTy == MVT::f32 || 940 VT.getSimpleVT().SimpleTy == MVT::f64) 941 Addr.Offset /= 4; 942 943 // Frame base works a bit differently. Handle it separately. 944 if (Addr.BaseType == Address::FrameIndexBase) { 945 int FI = Addr.Base.FI; 946 int Offset = Addr.Offset; 947 MachineMemOperand *MMO = 948 FuncInfo.MF->getMachineMemOperand( 949 MachinePointerInfo::getFixedStack(FI, Offset), 950 Flags, 951 MFI.getObjectSize(FI), 952 MFI.getObjectAlignment(FI)); 953 // Now add the rest of the operands. 954 MIB.addFrameIndex(FI); 955 956 // ARM halfword load/stores and signed byte loads need an additional 957 // operand. 958 if (useAM3) { 959 signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset; 960 MIB.addReg(0); 961 MIB.addImm(Imm); 962 } else { 963 MIB.addImm(Addr.Offset); 964 } 965 MIB.addMemOperand(MMO); 966 } else { 967 // Now add the rest of the operands. 968 MIB.addReg(Addr.Base.Reg); 969 970 // ARM halfword load/stores and signed byte loads need an additional 971 // operand. 972 if (useAM3) { 973 signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset; 974 MIB.addReg(0); 975 MIB.addImm(Imm); 976 } else { 977 MIB.addImm(Addr.Offset); 978 } 979 } 980 AddOptionalDefs(MIB); 981} 982 983bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr, 984 unsigned Alignment, bool isZExt, bool allocReg) { 985 assert(VT.isSimple() && "Non-simple types are invalid here!"); 986 unsigned Opc; 987 bool useAM3 = false; 988 bool needVMOV = false; 989 const TargetRegisterClass *RC; 990 switch (VT.getSimpleVT().SimpleTy) { 991 // This is mostly going to be Neon/vector support. 992 default: return false; 993 case MVT::i1: 994 case MVT::i8: 995 if (isThumb2) { 996 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 997 Opc = isZExt ? ARM::t2LDRBi8 : ARM::t2LDRSBi8; 998 else 999 Opc = isZExt ? ARM::t2LDRBi12 : ARM::t2LDRSBi12; 1000 } else { 1001 if (isZExt) { 1002 Opc = ARM::LDRBi12; 1003 } else { 1004 Opc = ARM::LDRSB; 1005 useAM3 = true; 1006 } 1007 } 1008 RC = &ARM::GPRRegClass; 1009 break; 1010 case MVT::i16: 1011 if (isThumb2) { 1012 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1013 Opc = isZExt ? ARM::t2LDRHi8 : ARM::t2LDRSHi8; 1014 else 1015 Opc = isZExt ? ARM::t2LDRHi12 : ARM::t2LDRSHi12; 1016 } else { 1017 Opc = isZExt ? ARM::LDRH : ARM::LDRSH; 1018 useAM3 = true; 1019 } 1020 RC = &ARM::GPRRegClass; 1021 break; 1022 case MVT::i32: 1023 if (isThumb2) { 1024 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1025 Opc = ARM::t2LDRi8; 1026 else 1027 Opc = ARM::t2LDRi12; 1028 } else { 1029 Opc = ARM::LDRi12; 1030 } 1031 RC = &ARM::GPRRegClass; 1032 break; 1033 case MVT::f32: 1034 if (!Subtarget->hasVFP2()) return false; 1035 // Unaligned loads need special handling. Floats require word-alignment. 1036 if (Alignment && Alignment < 4) { 1037 needVMOV = true; 1038 VT = MVT::i32; 1039 Opc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12; 1040 RC = &ARM::GPRRegClass; 1041 } else { 1042 Opc = ARM::VLDRS; 1043 RC = TLI.getRegClassFor(VT); 1044 } 1045 break; 1046 case MVT::f64: 1047 if (!Subtarget->hasVFP2()) return false; 1048 // FIXME: Unaligned loads need special handling. Doublewords require 1049 // word-alignment. 1050 if (Alignment && Alignment < 4) 1051 return false; 1052 1053 Opc = ARM::VLDRD; 1054 RC = TLI.getRegClassFor(VT); 1055 break; 1056 } 1057 // Simplify this down to something we can handle. 1058 ARMSimplifyAddress(Addr, VT, useAM3); 1059 1060 // Create the base instruction, then add the operands. 1061 if (allocReg) 1062 ResultReg = createResultReg(RC); 1063 assert (ResultReg > 255 && "Expected an allocated virtual register."); 1064 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1065 TII.get(Opc), ResultReg); 1066 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOLoad, useAM3); 1067 1068 // If we had an unaligned load of a float we've converted it to an regular 1069 // load. Now we must move from the GRP to the FP register. 1070 if (needVMOV) { 1071 unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::f32)); 1072 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1073 TII.get(ARM::VMOVSR), MoveReg) 1074 .addReg(ResultReg)); 1075 ResultReg = MoveReg; 1076 } 1077 return true; 1078} 1079 1080bool ARMFastISel::SelectLoad(const Instruction *I) { 1081 // Atomic loads need special handling. 1082 if (cast<LoadInst>(I)->isAtomic()) 1083 return false; 1084 1085 // Verify we have a legal type before going any further. 1086 MVT VT; 1087 if (!isLoadTypeLegal(I->getType(), VT)) 1088 return false; 1089 1090 // See if we can handle this address. 1091 Address Addr; 1092 if (!ARMComputeAddress(I->getOperand(0), Addr)) return false; 1093 1094 unsigned ResultReg; 1095 if (!ARMEmitLoad(VT, ResultReg, Addr, cast<LoadInst>(I)->getAlignment())) 1096 return false; 1097 UpdateValueMap(I, ResultReg); 1098 return true; 1099} 1100 1101bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr, 1102 unsigned Alignment) { 1103 unsigned StrOpc; 1104 bool useAM3 = false; 1105 switch (VT.getSimpleVT().SimpleTy) { 1106 // This is mostly going to be Neon/vector support. 1107 default: return false; 1108 case MVT::i1: { 1109 unsigned Res = createResultReg(isThumb2 ? 1110 (const TargetRegisterClass*)&ARM::tGPRRegClass : 1111 (const TargetRegisterClass*)&ARM::GPRRegClass); 1112 unsigned Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri; 1113 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1114 TII.get(Opc), Res) 1115 .addReg(SrcReg).addImm(1)); 1116 SrcReg = Res; 1117 } // Fallthrough here. 1118 case MVT::i8: 1119 if (isThumb2) { 1120 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1121 StrOpc = ARM::t2STRBi8; 1122 else 1123 StrOpc = ARM::t2STRBi12; 1124 } else { 1125 StrOpc = ARM::STRBi12; 1126 } 1127 break; 1128 case MVT::i16: 1129 if (isThumb2) { 1130 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1131 StrOpc = ARM::t2STRHi8; 1132 else 1133 StrOpc = ARM::t2STRHi12; 1134 } else { 1135 StrOpc = ARM::STRH; 1136 useAM3 = true; 1137 } 1138 break; 1139 case MVT::i32: 1140 if (isThumb2) { 1141 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1142 StrOpc = ARM::t2STRi8; 1143 else 1144 StrOpc = ARM::t2STRi12; 1145 } else { 1146 StrOpc = ARM::STRi12; 1147 } 1148 break; 1149 case MVT::f32: 1150 if (!Subtarget->hasVFP2()) return false; 1151 // Unaligned stores need special handling. Floats require word-alignment. 1152 if (Alignment && Alignment < 4) { 1153 unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 1154 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1155 TII.get(ARM::VMOVRS), MoveReg) 1156 .addReg(SrcReg)); 1157 SrcReg = MoveReg; 1158 VT = MVT::i32; 1159 StrOpc = isThumb2 ? ARM::t2STRi12 : ARM::STRi12; 1160 } else { 1161 StrOpc = ARM::VSTRS; 1162 } 1163 break; 1164 case MVT::f64: 1165 if (!Subtarget->hasVFP2()) return false; 1166 // FIXME: Unaligned stores need special handling. Doublewords require 1167 // word-alignment. 1168 if (Alignment && Alignment < 4) 1169 return false; 1170 1171 StrOpc = ARM::VSTRD; 1172 break; 1173 } 1174 // Simplify this down to something we can handle. 1175 ARMSimplifyAddress(Addr, VT, useAM3); 1176 1177 // Create the base instruction, then add the operands. 1178 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1179 TII.get(StrOpc)) 1180 .addReg(SrcReg); 1181 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOStore, useAM3); 1182 return true; 1183} 1184 1185bool ARMFastISel::SelectStore(const Instruction *I) { 1186 Value *Op0 = I->getOperand(0); 1187 unsigned SrcReg = 0; 1188 1189 // Atomic stores need special handling. 1190 if (cast<StoreInst>(I)->isAtomic()) 1191 return false; 1192 1193 // Verify we have a legal type before going any further. 1194 MVT VT; 1195 if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT)) 1196 return false; 1197 1198 // Get the value to be stored into a register. 1199 SrcReg = getRegForValue(Op0); 1200 if (SrcReg == 0) return false; 1201 1202 // See if we can handle this address. 1203 Address Addr; 1204 if (!ARMComputeAddress(I->getOperand(1), Addr)) 1205 return false; 1206 1207 if (!ARMEmitStore(VT, SrcReg, Addr, cast<StoreInst>(I)->getAlignment())) 1208 return false; 1209 return true; 1210} 1211 1212static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred) { 1213 switch (Pred) { 1214 // Needs two compares... 1215 case CmpInst::FCMP_ONE: 1216 case CmpInst::FCMP_UEQ: 1217 default: 1218 // AL is our "false" for now. The other two need more compares. 1219 return ARMCC::AL; 1220 case CmpInst::ICMP_EQ: 1221 case CmpInst::FCMP_OEQ: 1222 return ARMCC::EQ; 1223 case CmpInst::ICMP_SGT: 1224 case CmpInst::FCMP_OGT: 1225 return ARMCC::GT; 1226 case CmpInst::ICMP_SGE: 1227 case CmpInst::FCMP_OGE: 1228 return ARMCC::GE; 1229 case CmpInst::ICMP_UGT: 1230 case CmpInst::FCMP_UGT: 1231 return ARMCC::HI; 1232 case CmpInst::FCMP_OLT: 1233 return ARMCC::MI; 1234 case CmpInst::ICMP_ULE: 1235 case CmpInst::FCMP_OLE: 1236 return ARMCC::LS; 1237 case CmpInst::FCMP_ORD: 1238 return ARMCC::VC; 1239 case CmpInst::FCMP_UNO: 1240 return ARMCC::VS; 1241 case CmpInst::FCMP_UGE: 1242 return ARMCC::PL; 1243 case CmpInst::ICMP_SLT: 1244 case CmpInst::FCMP_ULT: 1245 return ARMCC::LT; 1246 case CmpInst::ICMP_SLE: 1247 case CmpInst::FCMP_ULE: 1248 return ARMCC::LE; 1249 case CmpInst::FCMP_UNE: 1250 case CmpInst::ICMP_NE: 1251 return ARMCC::NE; 1252 case CmpInst::ICMP_UGE: 1253 return ARMCC::HS; 1254 case CmpInst::ICMP_ULT: 1255 return ARMCC::LO; 1256 } 1257} 1258 1259bool ARMFastISel::SelectBranch(const Instruction *I) { 1260 const BranchInst *BI = cast<BranchInst>(I); 1261 MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)]; 1262 MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)]; 1263 1264 // Simple branch support. 1265 1266 // If we can, avoid recomputing the compare - redoing it could lead to wonky 1267 // behavior. 1268 if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) { 1269 if (CI->hasOneUse() && (CI->getParent() == I->getParent())) { 1270 1271 // Get the compare predicate. 1272 // Try to take advantage of fallthrough opportunities. 1273 CmpInst::Predicate Predicate = CI->getPredicate(); 1274 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1275 std::swap(TBB, FBB); 1276 Predicate = CmpInst::getInversePredicate(Predicate); 1277 } 1278 1279 ARMCC::CondCodes ARMPred = getComparePred(Predicate); 1280 1281 // We may not handle every CC for now. 1282 if (ARMPred == ARMCC::AL) return false; 1283 1284 // Emit the compare. 1285 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) 1286 return false; 1287 1288 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1289 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1290 .addMBB(TBB).addImm(ARMPred).addReg(ARM::CPSR); 1291 FastEmitBranch(FBB, DL); 1292 FuncInfo.MBB->addSuccessor(TBB); 1293 return true; 1294 } 1295 } else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) { 1296 MVT SourceVT; 1297 if (TI->hasOneUse() && TI->getParent() == I->getParent() && 1298 (isLoadTypeLegal(TI->getOperand(0)->getType(), SourceVT))) { 1299 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; 1300 unsigned OpReg = getRegForValue(TI->getOperand(0)); 1301 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1302 TII.get(TstOpc)) 1303 .addReg(OpReg).addImm(1)); 1304 1305 unsigned CCMode = ARMCC::NE; 1306 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1307 std::swap(TBB, FBB); 1308 CCMode = ARMCC::EQ; 1309 } 1310 1311 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1312 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1313 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); 1314 1315 FastEmitBranch(FBB, DL); 1316 FuncInfo.MBB->addSuccessor(TBB); 1317 return true; 1318 } 1319 } else if (const ConstantInt *CI = 1320 dyn_cast<ConstantInt>(BI->getCondition())) { 1321 uint64_t Imm = CI->getZExtValue(); 1322 MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB; 1323 FastEmitBranch(Target, DL); 1324 return true; 1325 } 1326 1327 unsigned CmpReg = getRegForValue(BI->getCondition()); 1328 if (CmpReg == 0) return false; 1329 1330 // We've been divorced from our compare! Our block was split, and 1331 // now our compare lives in a predecessor block. We musn't 1332 // re-compare here, as the children of the compare aren't guaranteed 1333 // live across the block boundary (we *could* check for this). 1334 // Regardless, the compare has been done in the predecessor block, 1335 // and it left a value for us in a virtual register. Ergo, we test 1336 // the one-bit value left in the virtual register. 1337 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; 1338 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TstOpc)) 1339 .addReg(CmpReg).addImm(1)); 1340 1341 unsigned CCMode = ARMCC::NE; 1342 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1343 std::swap(TBB, FBB); 1344 CCMode = ARMCC::EQ; 1345 } 1346 1347 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1348 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1349 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); 1350 FastEmitBranch(FBB, DL); 1351 FuncInfo.MBB->addSuccessor(TBB); 1352 return true; 1353} 1354 1355bool ARMFastISel::SelectIndirectBr(const Instruction *I) { 1356 unsigned AddrReg = getRegForValue(I->getOperand(0)); 1357 if (AddrReg == 0) return false; 1358 1359 unsigned Opc = isThumb2 ? ARM::tBRIND : ARM::BX; 1360 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc)) 1361 .addReg(AddrReg)); 1362 return true; 1363} 1364 1365bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, 1366 bool isZExt) { 1367 Type *Ty = Src1Value->getType(); 1368 EVT SrcVT = TLI.getValueType(Ty, true); 1369 if (!SrcVT.isSimple()) return false; 1370 1371 bool isFloat = (Ty->isFloatTy() || Ty->isDoubleTy()); 1372 if (isFloat && !Subtarget->hasVFP2()) 1373 return false; 1374 1375 // Check to see if the 2nd operand is a constant that we can encode directly 1376 // in the compare. 1377 int Imm = 0; 1378 bool UseImm = false; 1379 bool isNegativeImm = false; 1380 // FIXME: At -O0 we don't have anything that canonicalizes operand order. 1381 // Thus, Src1Value may be a ConstantInt, but we're missing it. 1382 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(Src2Value)) { 1383 if (SrcVT == MVT::i32 || SrcVT == MVT::i16 || SrcVT == MVT::i8 || 1384 SrcVT == MVT::i1) { 1385 const APInt &CIVal = ConstInt->getValue(); 1386 Imm = (isZExt) ? (int)CIVal.getZExtValue() : (int)CIVal.getSExtValue(); 1387 // For INT_MIN/LONG_MIN (i.e., 0x80000000) we need to use a cmp, rather 1388 // then a cmn, because there is no way to represent 2147483648 as a 1389 // signed 32-bit int. 1390 if (Imm < 0 && Imm != (int)0x80000000) { 1391 isNegativeImm = true; 1392 Imm = -Imm; 1393 } 1394 UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 1395 (ARM_AM::getSOImmVal(Imm) != -1); 1396 } 1397 } else if (const ConstantFP *ConstFP = dyn_cast<ConstantFP>(Src2Value)) { 1398 if (SrcVT == MVT::f32 || SrcVT == MVT::f64) 1399 if (ConstFP->isZero() && !ConstFP->isNegative()) 1400 UseImm = true; 1401 } 1402 1403 unsigned CmpOpc; 1404 bool isICmp = true; 1405 bool needsExt = false; 1406 switch (SrcVT.getSimpleVT().SimpleTy) { 1407 default: return false; 1408 // TODO: Verify compares. 1409 case MVT::f32: 1410 isICmp = false; 1411 CmpOpc = UseImm ? ARM::VCMPEZS : ARM::VCMPES; 1412 break; 1413 case MVT::f64: 1414 isICmp = false; 1415 CmpOpc = UseImm ? ARM::VCMPEZD : ARM::VCMPED; 1416 break; 1417 case MVT::i1: 1418 case MVT::i8: 1419 case MVT::i16: 1420 needsExt = true; 1421 // Intentional fall-through. 1422 case MVT::i32: 1423 if (isThumb2) { 1424 if (!UseImm) 1425 CmpOpc = ARM::t2CMPrr; 1426 else 1427 CmpOpc = isNegativeImm ? ARM::t2CMNri : ARM::t2CMPri; 1428 } else { 1429 if (!UseImm) 1430 CmpOpc = ARM::CMPrr; 1431 else 1432 CmpOpc = isNegativeImm ? ARM::CMNri : ARM::CMPri; 1433 } 1434 break; 1435 } 1436 1437 unsigned SrcReg1 = getRegForValue(Src1Value); 1438 if (SrcReg1 == 0) return false; 1439 1440 unsigned SrcReg2 = 0; 1441 if (!UseImm) { 1442 SrcReg2 = getRegForValue(Src2Value); 1443 if (SrcReg2 == 0) return false; 1444 } 1445 1446 // We have i1, i8, or i16, we need to either zero extend or sign extend. 1447 if (needsExt) { 1448 SrcReg1 = ARMEmitIntExt(SrcVT, SrcReg1, MVT::i32, isZExt); 1449 if (SrcReg1 == 0) return false; 1450 if (!UseImm) { 1451 SrcReg2 = ARMEmitIntExt(SrcVT, SrcReg2, MVT::i32, isZExt); 1452 if (SrcReg2 == 0) return false; 1453 } 1454 } 1455 1456 if (!UseImm) { 1457 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1458 TII.get(CmpOpc)) 1459 .addReg(SrcReg1).addReg(SrcReg2)); 1460 } else { 1461 MachineInstrBuilder MIB; 1462 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1463 .addReg(SrcReg1); 1464 1465 // Only add immediate for icmp as the immediate for fcmp is an implicit 0.0. 1466 if (isICmp) 1467 MIB.addImm(Imm); 1468 AddOptionalDefs(MIB); 1469 } 1470 1471 // For floating point we need to move the result to a comparison register 1472 // that we can then use for branches. 1473 if (Ty->isFloatTy() || Ty->isDoubleTy()) 1474 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1475 TII.get(ARM::FMSTAT))); 1476 return true; 1477} 1478 1479bool ARMFastISel::SelectCmp(const Instruction *I) { 1480 const CmpInst *CI = cast<CmpInst>(I); 1481 1482 // Get the compare predicate. 1483 ARMCC::CondCodes ARMPred = getComparePred(CI->getPredicate()); 1484 1485 // We may not handle every CC for now. 1486 if (ARMPred == ARMCC::AL) return false; 1487 1488 // Emit the compare. 1489 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) 1490 return false; 1491 1492 // Now set a register based on the comparison. Explicitly set the predicates 1493 // here. 1494 unsigned MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi; 1495 const TargetRegisterClass *RC = isThumb2 ? 1496 (const TargetRegisterClass*)&ARM::rGPRRegClass : 1497 (const TargetRegisterClass*)&ARM::GPRRegClass; 1498 unsigned DestReg = createResultReg(RC); 1499 Constant *Zero = ConstantInt::get(Type::getInt32Ty(*Context), 0); 1500 unsigned ZeroReg = TargetMaterializeConstant(Zero); 1501 // ARMEmitCmp emits a FMSTAT when necessary, so it's always safe to use CPSR. 1502 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), DestReg) 1503 .addReg(ZeroReg).addImm(1) 1504 .addImm(ARMPred).addReg(ARM::CPSR); 1505 1506 UpdateValueMap(I, DestReg); 1507 return true; 1508} 1509 1510bool ARMFastISel::SelectFPExt(const Instruction *I) { 1511 // Make sure we have VFP and that we're extending float to double. 1512 if (!Subtarget->hasVFP2()) return false; 1513 1514 Value *V = I->getOperand(0); 1515 if (!I->getType()->isDoubleTy() || 1516 !V->getType()->isFloatTy()) return false; 1517 1518 unsigned Op = getRegForValue(V); 1519 if (Op == 0) return false; 1520 1521 unsigned Result = createResultReg(&ARM::DPRRegClass); 1522 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1523 TII.get(ARM::VCVTDS), Result) 1524 .addReg(Op)); 1525 UpdateValueMap(I, Result); 1526 return true; 1527} 1528 1529bool ARMFastISel::SelectFPTrunc(const Instruction *I) { 1530 // Make sure we have VFP and that we're truncating double to float. 1531 if (!Subtarget->hasVFP2()) return false; 1532 1533 Value *V = I->getOperand(0); 1534 if (!(I->getType()->isFloatTy() && 1535 V->getType()->isDoubleTy())) return false; 1536 1537 unsigned Op = getRegForValue(V); 1538 if (Op == 0) return false; 1539 1540 unsigned Result = createResultReg(&ARM::SPRRegClass); 1541 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1542 TII.get(ARM::VCVTSD), Result) 1543 .addReg(Op)); 1544 UpdateValueMap(I, Result); 1545 return true; 1546} 1547 1548bool ARMFastISel::SelectIToFP(const Instruction *I, bool isSigned) { 1549 // Make sure we have VFP. 1550 if (!Subtarget->hasVFP2()) return false; 1551 1552 MVT DstVT; 1553 Type *Ty = I->getType(); 1554 if (!isTypeLegal(Ty, DstVT)) 1555 return false; 1556 1557 Value *Src = I->getOperand(0); 1558 EVT SrcVT = TLI.getValueType(Src->getType(), true); 1559 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) 1560 return false; 1561 1562 unsigned SrcReg = getRegForValue(Src); 1563 if (SrcReg == 0) return false; 1564 1565 // Handle sign-extension. 1566 if (SrcVT == MVT::i16 || SrcVT == MVT::i8) { 1567 EVT DestVT = MVT::i32; 1568 SrcReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, 1569 /*isZExt*/!isSigned); 1570 if (SrcReg == 0) return false; 1571 } 1572 1573 // The conversion routine works on fp-reg to fp-reg and the operand above 1574 // was an integer, move it to the fp registers if possible. 1575 unsigned FP = ARMMoveToFPReg(MVT::f32, SrcReg); 1576 if (FP == 0) return false; 1577 1578 unsigned Opc; 1579 if (Ty->isFloatTy()) Opc = isSigned ? ARM::VSITOS : ARM::VUITOS; 1580 else if (Ty->isDoubleTy()) Opc = isSigned ? ARM::VSITOD : ARM::VUITOD; 1581 else return false; 1582 1583 unsigned ResultReg = createResultReg(TLI.getRegClassFor(DstVT)); 1584 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1585 ResultReg) 1586 .addReg(FP)); 1587 UpdateValueMap(I, ResultReg); 1588 return true; 1589} 1590 1591bool ARMFastISel::SelectFPToI(const Instruction *I, bool isSigned) { 1592 // Make sure we have VFP. 1593 if (!Subtarget->hasVFP2()) return false; 1594 1595 MVT DstVT; 1596 Type *RetTy = I->getType(); 1597 if (!isTypeLegal(RetTy, DstVT)) 1598 return false; 1599 1600 unsigned Op = getRegForValue(I->getOperand(0)); 1601 if (Op == 0) return false; 1602 1603 unsigned Opc; 1604 Type *OpTy = I->getOperand(0)->getType(); 1605 if (OpTy->isFloatTy()) Opc = isSigned ? ARM::VTOSIZS : ARM::VTOUIZS; 1606 else if (OpTy->isDoubleTy()) Opc = isSigned ? ARM::VTOSIZD : ARM::VTOUIZD; 1607 else return false; 1608 1609 // f64->s32/u32 or f32->s32/u32 both need an intermediate f32 reg. 1610 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32)); 1611 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1612 ResultReg) 1613 .addReg(Op)); 1614 1615 // This result needs to be in an integer register, but the conversion only 1616 // takes place in fp-regs. 1617 unsigned IntReg = ARMMoveToIntReg(DstVT, ResultReg); 1618 if (IntReg == 0) return false; 1619 1620 UpdateValueMap(I, IntReg); 1621 return true; 1622} 1623 1624bool ARMFastISel::SelectSelect(const Instruction *I) { 1625 MVT VT; 1626 if (!isTypeLegal(I->getType(), VT)) 1627 return false; 1628 1629 // Things need to be register sized for register moves. 1630 if (VT != MVT::i32) return false; 1631 const TargetRegisterClass *RC = TLI.getRegClassFor(VT); 1632 1633 unsigned CondReg = getRegForValue(I->getOperand(0)); 1634 if (CondReg == 0) return false; 1635 unsigned Op1Reg = getRegForValue(I->getOperand(1)); 1636 if (Op1Reg == 0) return false; 1637 1638 // Check to see if we can use an immediate in the conditional move. 1639 int Imm = 0; 1640 bool UseImm = false; 1641 bool isNegativeImm = false; 1642 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(I->getOperand(2))) { 1643 assert (VT == MVT::i32 && "Expecting an i32."); 1644 Imm = (int)ConstInt->getValue().getZExtValue(); 1645 if (Imm < 0) { 1646 isNegativeImm = true; 1647 Imm = ~Imm; 1648 } 1649 UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 1650 (ARM_AM::getSOImmVal(Imm) != -1); 1651 } 1652 1653 unsigned Op2Reg = 0; 1654 if (!UseImm) { 1655 Op2Reg = getRegForValue(I->getOperand(2)); 1656 if (Op2Reg == 0) return false; 1657 } 1658 1659 unsigned CmpOpc = isThumb2 ? ARM::t2CMPri : ARM::CMPri; 1660 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1661 .addReg(CondReg).addImm(0)); 1662 1663 unsigned MovCCOpc; 1664 if (!UseImm) { 1665 MovCCOpc = isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr; 1666 } else { 1667 if (!isNegativeImm) { 1668 MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi; 1669 } else { 1670 MovCCOpc = isThumb2 ? ARM::t2MVNCCi : ARM::MVNCCi; 1671 } 1672 } 1673 unsigned ResultReg = createResultReg(RC); 1674 if (!UseImm) 1675 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg) 1676 .addReg(Op2Reg).addReg(Op1Reg).addImm(ARMCC::NE).addReg(ARM::CPSR); 1677 else 1678 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg) 1679 .addReg(Op1Reg).addImm(Imm).addImm(ARMCC::EQ).addReg(ARM::CPSR); 1680 UpdateValueMap(I, ResultReg); 1681 return true; 1682} 1683 1684bool ARMFastISel::SelectDiv(const Instruction *I, bool isSigned) { 1685 MVT VT; 1686 Type *Ty = I->getType(); 1687 if (!isTypeLegal(Ty, VT)) 1688 return false; 1689 1690 // If we have integer div support we should have selected this automagically. 1691 // In case we have a real miss go ahead and return false and we'll pick 1692 // it up later. 1693 if (Subtarget->hasDivide()) return false; 1694 1695 // Otherwise emit a libcall. 1696 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1697 if (VT == MVT::i8) 1698 LC = isSigned ? RTLIB::SDIV_I8 : RTLIB::UDIV_I8; 1699 else if (VT == MVT::i16) 1700 LC = isSigned ? RTLIB::SDIV_I16 : RTLIB::UDIV_I16; 1701 else if (VT == MVT::i32) 1702 LC = isSigned ? RTLIB::SDIV_I32 : RTLIB::UDIV_I32; 1703 else if (VT == MVT::i64) 1704 LC = isSigned ? RTLIB::SDIV_I64 : RTLIB::UDIV_I64; 1705 else if (VT == MVT::i128) 1706 LC = isSigned ? RTLIB::SDIV_I128 : RTLIB::UDIV_I128; 1707 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SDIV!"); 1708 1709 return ARMEmitLibcall(I, LC); 1710} 1711 1712bool ARMFastISel::SelectRem(const Instruction *I, bool isSigned) { 1713 MVT VT; 1714 Type *Ty = I->getType(); 1715 if (!isTypeLegal(Ty, VT)) 1716 return false; 1717 1718 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1719 if (VT == MVT::i8) 1720 LC = isSigned ? RTLIB::SREM_I8 : RTLIB::UREM_I8; 1721 else if (VT == MVT::i16) 1722 LC = isSigned ? RTLIB::SREM_I16 : RTLIB::UREM_I16; 1723 else if (VT == MVT::i32) 1724 LC = isSigned ? RTLIB::SREM_I32 : RTLIB::UREM_I32; 1725 else if (VT == MVT::i64) 1726 LC = isSigned ? RTLIB::SREM_I64 : RTLIB::UREM_I64; 1727 else if (VT == MVT::i128) 1728 LC = isSigned ? RTLIB::SREM_I128 : RTLIB::UREM_I128; 1729 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SREM!"); 1730 1731 return ARMEmitLibcall(I, LC); 1732} 1733 1734bool ARMFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) { 1735 EVT DestVT = TLI.getValueType(I->getType(), true); 1736 1737 // We can get here in the case when we have a binary operation on a non-legal 1738 // type and the target independent selector doesn't know how to handle it. 1739 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1) 1740 return false; 1741 1742 unsigned Opc; 1743 switch (ISDOpcode) { 1744 default: return false; 1745 case ISD::ADD: 1746 Opc = isThumb2 ? ARM::t2ADDrr : ARM::ADDrr; 1747 break; 1748 case ISD::OR: 1749 Opc = isThumb2 ? ARM::t2ORRrr : ARM::ORRrr; 1750 break; 1751 case ISD::SUB: 1752 Opc = isThumb2 ? ARM::t2SUBrr : ARM::SUBrr; 1753 break; 1754 } 1755 1756 unsigned SrcReg1 = getRegForValue(I->getOperand(0)); 1757 if (SrcReg1 == 0) return false; 1758 1759 // TODO: Often the 2nd operand is an immediate, which can be encoded directly 1760 // in the instruction, rather then materializing the value in a register. 1761 unsigned SrcReg2 = getRegForValue(I->getOperand(1)); 1762 if (SrcReg2 == 0) return false; 1763 1764 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 1765 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1766 TII.get(Opc), ResultReg) 1767 .addReg(SrcReg1).addReg(SrcReg2)); 1768 UpdateValueMap(I, ResultReg); 1769 return true; 1770} 1771 1772bool ARMFastISel::SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode) { 1773 EVT VT = TLI.getValueType(I->getType(), true); 1774 1775 // We can get here in the case when we want to use NEON for our fp 1776 // operations, but can't figure out how to. Just use the vfp instructions 1777 // if we have them. 1778 // FIXME: It'd be nice to use NEON instructions. 1779 Type *Ty = I->getType(); 1780 bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy()); 1781 if (isFloat && !Subtarget->hasVFP2()) 1782 return false; 1783 1784 unsigned Opc; 1785 bool is64bit = VT == MVT::f64 || VT == MVT::i64; 1786 switch (ISDOpcode) { 1787 default: return false; 1788 case ISD::FADD: 1789 Opc = is64bit ? ARM::VADDD : ARM::VADDS; 1790 break; 1791 case ISD::FSUB: 1792 Opc = is64bit ? ARM::VSUBD : ARM::VSUBS; 1793 break; 1794 case ISD::FMUL: 1795 Opc = is64bit ? ARM::VMULD : ARM::VMULS; 1796 break; 1797 } 1798 unsigned Op1 = getRegForValue(I->getOperand(0)); 1799 if (Op1 == 0) return false; 1800 1801 unsigned Op2 = getRegForValue(I->getOperand(1)); 1802 if (Op2 == 0) return false; 1803 1804 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT)); 1805 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1806 TII.get(Opc), ResultReg) 1807 .addReg(Op1).addReg(Op2)); 1808 UpdateValueMap(I, ResultReg); 1809 return true; 1810} 1811 1812// Call Handling Code 1813 1814// This is largely taken directly from CCAssignFnForNode 1815// TODO: We may not support all of this. 1816CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC, 1817 bool Return, 1818 bool isVarArg) { 1819 switch (CC) { 1820 default: 1821 llvm_unreachable("Unsupported calling convention"); 1822 case CallingConv::Fast: 1823 // Ignore fastcc. Silence compiler warnings. 1824 (void)RetFastCC_ARM_APCS; 1825 (void)FastCC_ARM_APCS; 1826 // Fallthrough 1827 case CallingConv::C: 1828 // Use target triple & subtarget features to do actual dispatch. 1829 if (Subtarget->isAAPCS_ABI()) { 1830 if (Subtarget->hasVFP2() && 1831 TM.Options.FloatABIType == FloatABI::Hard && !isVarArg) 1832 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1833 else 1834 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1835 } else 1836 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1837 case CallingConv::ARM_AAPCS_VFP: 1838 if (!isVarArg) 1839 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1840 // Fall through to soft float variant, variadic functions don't 1841 // use hard floating point ABI. 1842 case CallingConv::ARM_AAPCS: 1843 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1844 case CallingConv::ARM_APCS: 1845 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1846 case CallingConv::GHC: 1847 if (Return) 1848 llvm_unreachable("Can't return in GHC call convention"); 1849 else 1850 return CC_ARM_APCS_GHC; 1851 } 1852} 1853 1854bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args, 1855 SmallVectorImpl<unsigned> &ArgRegs, 1856 SmallVectorImpl<MVT> &ArgVTs, 1857 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 1858 SmallVectorImpl<unsigned> &RegArgs, 1859 CallingConv::ID CC, 1860 unsigned &NumBytes, 1861 bool isVarArg) { 1862 SmallVector<CCValAssign, 16> ArgLocs; 1863 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, ArgLocs, *Context); 1864 CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, 1865 CCAssignFnForCall(CC, false, isVarArg)); 1866 1867 // Check that we can handle all of the arguments. If we can't, then bail out 1868 // now before we add code to the MBB. 1869 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1870 CCValAssign &VA = ArgLocs[i]; 1871 MVT ArgVT = ArgVTs[VA.getValNo()]; 1872 1873 // We don't handle NEON/vector parameters yet. 1874 if (ArgVT.isVector() || ArgVT.getSizeInBits() > 64) 1875 return false; 1876 1877 // Now copy/store arg to correct locations. 1878 if (VA.isRegLoc() && !VA.needsCustom()) { 1879 continue; 1880 } else if (VA.needsCustom()) { 1881 // TODO: We need custom lowering for vector (v2f64) args. 1882 if (VA.getLocVT() != MVT::f64 || 1883 // TODO: Only handle register args for now. 1884 !VA.isRegLoc() || !ArgLocs[++i].isRegLoc()) 1885 return false; 1886 } else { 1887 switch (static_cast<EVT>(ArgVT).getSimpleVT().SimpleTy) { 1888 default: 1889 return false; 1890 case MVT::i1: 1891 case MVT::i8: 1892 case MVT::i16: 1893 case MVT::i32: 1894 break; 1895 case MVT::f32: 1896 if (!Subtarget->hasVFP2()) 1897 return false; 1898 break; 1899 case MVT::f64: 1900 if (!Subtarget->hasVFP2()) 1901 return false; 1902 break; 1903 } 1904 } 1905 } 1906 1907 // At the point, we are able to handle the call's arguments in fast isel. 1908 1909 // Get a count of how many bytes are to be pushed on the stack. 1910 NumBytes = CCInfo.getNextStackOffset(); 1911 1912 // Issue CALLSEQ_START 1913 unsigned AdjStackDown = TII.getCallFrameSetupOpcode(); 1914 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1915 TII.get(AdjStackDown)) 1916 .addImm(NumBytes)); 1917 1918 // Process the args. 1919 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1920 CCValAssign &VA = ArgLocs[i]; 1921 unsigned Arg = ArgRegs[VA.getValNo()]; 1922 MVT ArgVT = ArgVTs[VA.getValNo()]; 1923 1924 assert((!ArgVT.isVector() && ArgVT.getSizeInBits() <= 64) && 1925 "We don't handle NEON/vector parameters yet."); 1926 1927 // Handle arg promotion, etc. 1928 switch (VA.getLocInfo()) { 1929 case CCValAssign::Full: break; 1930 case CCValAssign::SExt: { 1931 MVT DestVT = VA.getLocVT(); 1932 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/false); 1933 assert (Arg != 0 && "Failed to emit a sext"); 1934 ArgVT = DestVT; 1935 break; 1936 } 1937 case CCValAssign::AExt: 1938 // Intentional fall-through. Handle AExt and ZExt. 1939 case CCValAssign::ZExt: { 1940 MVT DestVT = VA.getLocVT(); 1941 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/true); 1942 assert (Arg != 0 && "Failed to emit a sext"); 1943 ArgVT = DestVT; 1944 break; 1945 } 1946 case CCValAssign::BCvt: { 1947 unsigned BC = FastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg, 1948 /*TODO: Kill=*/false); 1949 assert(BC != 0 && "Failed to emit a bitcast!"); 1950 Arg = BC; 1951 ArgVT = VA.getLocVT(); 1952 break; 1953 } 1954 default: llvm_unreachable("Unknown arg promotion!"); 1955 } 1956 1957 // Now copy/store arg to correct locations. 1958 if (VA.isRegLoc() && !VA.needsCustom()) { 1959 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1960 VA.getLocReg()) 1961 .addReg(Arg); 1962 RegArgs.push_back(VA.getLocReg()); 1963 } else if (VA.needsCustom()) { 1964 // TODO: We need custom lowering for vector (v2f64) args. 1965 assert(VA.getLocVT() == MVT::f64 && 1966 "Custom lowering for v2f64 args not available"); 1967 1968 CCValAssign &NextVA = ArgLocs[++i]; 1969 1970 assert(VA.isRegLoc() && NextVA.isRegLoc() && 1971 "We only handle register args!"); 1972 1973 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1974 TII.get(ARM::VMOVRRD), VA.getLocReg()) 1975 .addReg(NextVA.getLocReg(), RegState::Define) 1976 .addReg(Arg)); 1977 RegArgs.push_back(VA.getLocReg()); 1978 RegArgs.push_back(NextVA.getLocReg()); 1979 } else { 1980 assert(VA.isMemLoc()); 1981 // Need to store on the stack. 1982 Address Addr; 1983 Addr.BaseType = Address::RegBase; 1984 Addr.Base.Reg = ARM::SP; 1985 Addr.Offset = VA.getLocMemOffset(); 1986 1987 bool EmitRet = ARMEmitStore(ArgVT, Arg, Addr); (void)EmitRet; 1988 assert(EmitRet && "Could not emit a store for argument!"); 1989 } 1990 } 1991 1992 return true; 1993} 1994 1995bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 1996 const Instruction *I, CallingConv::ID CC, 1997 unsigned &NumBytes, bool isVarArg) { 1998 // Issue CALLSEQ_END 1999 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); 2000 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2001 TII.get(AdjStackUp)) 2002 .addImm(NumBytes).addImm(0)); 2003 2004 // Now the return value. 2005 if (RetVT != MVT::isVoid) { 2006 SmallVector<CCValAssign, 16> RVLocs; 2007 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, RVLocs, *Context); 2008 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, isVarArg)); 2009 2010 // Copy all of the result registers out of their specified physreg. 2011 if (RVLocs.size() == 2 && RetVT == MVT::f64) { 2012 // For this move we copy into two registers and then move into the 2013 // double fp reg we want. 2014 EVT DestVT = RVLocs[0].getValVT(); 2015 const TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT); 2016 unsigned ResultReg = createResultReg(DstRC); 2017 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2018 TII.get(ARM::VMOVDRR), ResultReg) 2019 .addReg(RVLocs[0].getLocReg()) 2020 .addReg(RVLocs[1].getLocReg())); 2021 2022 UsedRegs.push_back(RVLocs[0].getLocReg()); 2023 UsedRegs.push_back(RVLocs[1].getLocReg()); 2024 2025 // Finally update the result. 2026 UpdateValueMap(I, ResultReg); 2027 } else { 2028 assert(RVLocs.size() == 1 &&"Can't handle non-double multi-reg retvals!"); 2029 EVT CopyVT = RVLocs[0].getValVT(); 2030 2031 // Special handling for extended integers. 2032 if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16) 2033 CopyVT = MVT::i32; 2034 2035 const TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT); 2036 2037 unsigned ResultReg = createResultReg(DstRC); 2038 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 2039 ResultReg).addReg(RVLocs[0].getLocReg()); 2040 UsedRegs.push_back(RVLocs[0].getLocReg()); 2041 2042 // Finally update the result. 2043 UpdateValueMap(I, ResultReg); 2044 } 2045 } 2046 2047 return true; 2048} 2049 2050bool ARMFastISel::SelectRet(const Instruction *I) { 2051 const ReturnInst *Ret = cast<ReturnInst>(I); 2052 const Function &F = *I->getParent()->getParent(); 2053 2054 if (!FuncInfo.CanLowerReturn) 2055 return false; 2056 2057 CallingConv::ID CC = F.getCallingConv(); 2058 if (Ret->getNumOperands() > 0) { 2059 SmallVector<ISD::OutputArg, 4> Outs; 2060 GetReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(), 2061 Outs, TLI); 2062 2063 // Analyze operands of the call, assigning locations to each operand. 2064 SmallVector<CCValAssign, 16> ValLocs; 2065 CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, TM, ValLocs,I->getContext()); 2066 CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC, true /* is Ret */, 2067 F.isVarArg())); 2068 2069 const Value *RV = Ret->getOperand(0); 2070 unsigned Reg = getRegForValue(RV); 2071 if (Reg == 0) 2072 return false; 2073 2074 // Only handle a single return value for now. 2075 if (ValLocs.size() != 1) 2076 return false; 2077 2078 CCValAssign &VA = ValLocs[0]; 2079 2080 // Don't bother handling odd stuff for now. 2081 if (VA.getLocInfo() != CCValAssign::Full) 2082 return false; 2083 // Only handle register returns for now. 2084 if (!VA.isRegLoc()) 2085 return false; 2086 2087 unsigned SrcReg = Reg + VA.getValNo(); 2088 EVT RVVT = TLI.getValueType(RV->getType()); 2089 EVT DestVT = VA.getValVT(); 2090 // Special handling for extended integers. 2091 if (RVVT != DestVT) { 2092 if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16) 2093 return false; 2094 2095 assert(DestVT == MVT::i32 && "ARM should always ext to i32"); 2096 2097 // Perform extension if flagged as either zext or sext. Otherwise, do 2098 // nothing. 2099 if (Outs[0].Flags.isZExt() || Outs[0].Flags.isSExt()) { 2100 SrcReg = ARMEmitIntExt(RVVT, SrcReg, DestVT, Outs[0].Flags.isZExt()); 2101 if (SrcReg == 0) return false; 2102 } 2103 } 2104 2105 // Make the copy. 2106 unsigned DstReg = VA.getLocReg(); 2107 const TargetRegisterClass* SrcRC = MRI.getRegClass(SrcReg); 2108 // Avoid a cross-class copy. This is very unlikely. 2109 if (!SrcRC->contains(DstReg)) 2110 return false; 2111 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 2112 DstReg).addReg(SrcReg); 2113 2114 // Mark the register as live out of the function. 2115 MRI.addLiveOut(VA.getLocReg()); 2116 } 2117 2118 unsigned RetOpc = isThumb2 ? ARM::tBX_RET : ARM::BX_RET; 2119 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2120 TII.get(RetOpc))); 2121 return true; 2122} 2123 2124unsigned ARMFastISel::ARMSelectCallOp(bool UseReg) { 2125 if (UseReg) 2126 return isThumb2 ? ARM::tBLXr : ARM::BLX; 2127 else 2128 return isThumb2 ? ARM::tBL : ARM::BL; 2129} 2130 2131unsigned ARMFastISel::getLibcallReg(const Twine &Name) { 2132 GlobalValue *GV = new GlobalVariable(Type::getInt32Ty(*Context), false, 2133 GlobalValue::ExternalLinkage, 0, Name); 2134 return ARMMaterializeGV(GV, TLI.getValueType(GV->getType())); 2135} 2136 2137// A quick function that will emit a call for a named libcall in F with the 2138// vector of passed arguments for the Instruction in I. We can assume that we 2139// can emit a call for any libcall we can produce. This is an abridged version 2140// of the full call infrastructure since we won't need to worry about things 2141// like computed function pointers or strange arguments at call sites. 2142// TODO: Try to unify this and the normal call bits for ARM, then try to unify 2143// with X86. 2144bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) { 2145 CallingConv::ID CC = TLI.getLibcallCallingConv(Call); 2146 2147 // Handle *simple* calls for now. 2148 Type *RetTy = I->getType(); 2149 MVT RetVT; 2150 if (RetTy->isVoidTy()) 2151 RetVT = MVT::isVoid; 2152 else if (!isTypeLegal(RetTy, RetVT)) 2153 return false; 2154 2155 // Can't handle non-double multi-reg retvals. 2156 if (RetVT != MVT::isVoid && RetVT != MVT::i32) { 2157 SmallVector<CCValAssign, 16> RVLocs; 2158 CCState CCInfo(CC, false, *FuncInfo.MF, TM, RVLocs, *Context); 2159 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, false)); 2160 if (RVLocs.size() >= 2 && RetVT != MVT::f64) 2161 return false; 2162 } 2163 2164 // Set up the argument vectors. 2165 SmallVector<Value*, 8> Args; 2166 SmallVector<unsigned, 8> ArgRegs; 2167 SmallVector<MVT, 8> ArgVTs; 2168 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 2169 Args.reserve(I->getNumOperands()); 2170 ArgRegs.reserve(I->getNumOperands()); 2171 ArgVTs.reserve(I->getNumOperands()); 2172 ArgFlags.reserve(I->getNumOperands()); 2173 for (unsigned i = 0; i < I->getNumOperands(); ++i) { 2174 Value *Op = I->getOperand(i); 2175 unsigned Arg = getRegForValue(Op); 2176 if (Arg == 0) return false; 2177 2178 Type *ArgTy = Op->getType(); 2179 MVT ArgVT; 2180 if (!isTypeLegal(ArgTy, ArgVT)) return false; 2181 2182 ISD::ArgFlagsTy Flags; 2183 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 2184 Flags.setOrigAlign(OriginalAlignment); 2185 2186 Args.push_back(Op); 2187 ArgRegs.push_back(Arg); 2188 ArgVTs.push_back(ArgVT); 2189 ArgFlags.push_back(Flags); 2190 } 2191 2192 // Handle the arguments now that we've gotten them. 2193 SmallVector<unsigned, 4> RegArgs; 2194 unsigned NumBytes; 2195 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, 2196 RegArgs, CC, NumBytes, false)) 2197 return false; 2198 2199 unsigned CalleeReg = 0; 2200 if (EnableARMLongCalls) { 2201 CalleeReg = getLibcallReg(TLI.getLibcallName(Call)); 2202 if (CalleeReg == 0) return false; 2203 } 2204 2205 // Issue the call. 2206 unsigned CallOpc = ARMSelectCallOp(EnableARMLongCalls); 2207 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 2208 DL, TII.get(CallOpc)); 2209 if (isThumb2) { 2210 // Explicitly adding the predicate here. 2211 AddDefaultPred(MIB); 2212 if (EnableARMLongCalls) 2213 MIB.addReg(CalleeReg); 2214 else 2215 MIB.addExternalSymbol(TLI.getLibcallName(Call)); 2216 } else { 2217 if (EnableARMLongCalls) 2218 MIB.addReg(CalleeReg); 2219 else 2220 MIB.addExternalSymbol(TLI.getLibcallName(Call)); 2221 2222 // Explicitly adding the predicate here. 2223 AddDefaultPred(MIB); 2224 } 2225 // Add implicit physical register uses to the call. 2226 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 2227 MIB.addReg(RegArgs[i]); 2228 2229 // Add a register mask with the call-preserved registers. 2230 // Proper defs for return values will be added by setPhysRegsDeadExcept(). 2231 MIB.addRegMask(TRI.getCallPreservedMask(CC)); 2232 2233 // Finish off the call including any return values. 2234 SmallVector<unsigned, 4> UsedRegs; 2235 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes, false)) return false; 2236 2237 // Set all unused physreg defs as dead. 2238 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 2239 2240 return true; 2241} 2242 2243bool ARMFastISel::SelectCall(const Instruction *I, 2244 const char *IntrMemName = 0) { 2245 const CallInst *CI = cast<CallInst>(I); 2246 const Value *Callee = CI->getCalledValue(); 2247 2248 // Can't handle inline asm. 2249 if (isa<InlineAsm>(Callee)) return false; 2250 2251 // Check the calling convention. 2252 ImmutableCallSite CS(CI); 2253 CallingConv::ID CC = CS.getCallingConv(); 2254 2255 // TODO: Avoid some calling conventions? 2256 2257 PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType()); 2258 FunctionType *FTy = cast<FunctionType>(PT->getElementType()); 2259 bool isVarArg = FTy->isVarArg(); 2260 2261 // Handle *simple* calls for now. 2262 Type *RetTy = I->getType(); 2263 MVT RetVT; 2264 if (RetTy->isVoidTy()) 2265 RetVT = MVT::isVoid; 2266 else if (!isTypeLegal(RetTy, RetVT) && RetVT != MVT::i16 && 2267 RetVT != MVT::i8 && RetVT != MVT::i1) 2268 return false; 2269 2270 // Can't handle non-double multi-reg retvals. 2271 if (RetVT != MVT::isVoid && RetVT != MVT::i1 && RetVT != MVT::i8 && 2272 RetVT != MVT::i16 && RetVT != MVT::i32) { 2273 SmallVector<CCValAssign, 16> RVLocs; 2274 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, RVLocs, *Context); 2275 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, isVarArg)); 2276 if (RVLocs.size() >= 2 && RetVT != MVT::f64) 2277 return false; 2278 } 2279 2280 // Set up the argument vectors. 2281 SmallVector<Value*, 8> Args; 2282 SmallVector<unsigned, 8> ArgRegs; 2283 SmallVector<MVT, 8> ArgVTs; 2284 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 2285 unsigned arg_size = CS.arg_size(); 2286 Args.reserve(arg_size); 2287 ArgRegs.reserve(arg_size); 2288 ArgVTs.reserve(arg_size); 2289 ArgFlags.reserve(arg_size); 2290 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end(); 2291 i != e; ++i) { 2292 // If we're lowering a memory intrinsic instead of a regular call, skip the 2293 // last two arguments, which shouldn't be passed to the underlying function. 2294 if (IntrMemName && e-i <= 2) 2295 break; 2296 2297 ISD::ArgFlagsTy Flags; 2298 unsigned AttrInd = i - CS.arg_begin() + 1; 2299 if (CS.paramHasAttr(AttrInd, Attribute::SExt)) 2300 Flags.setSExt(); 2301 if (CS.paramHasAttr(AttrInd, Attribute::ZExt)) 2302 Flags.setZExt(); 2303 2304 // FIXME: Only handle *easy* calls for now. 2305 if (CS.paramHasAttr(AttrInd, Attribute::InReg) || 2306 CS.paramHasAttr(AttrInd, Attribute::StructRet) || 2307 CS.paramHasAttr(AttrInd, Attribute::Nest) || 2308 CS.paramHasAttr(AttrInd, Attribute::ByVal)) 2309 return false; 2310 2311 Type *ArgTy = (*i)->getType(); 2312 MVT ArgVT; 2313 if (!isTypeLegal(ArgTy, ArgVT) && ArgVT != MVT::i16 && ArgVT != MVT::i8 && 2314 ArgVT != MVT::i1) 2315 return false; 2316 2317 unsigned Arg = getRegForValue(*i); 2318 if (Arg == 0) 2319 return false; 2320 2321 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 2322 Flags.setOrigAlign(OriginalAlignment); 2323 2324 Args.push_back(*i); 2325 ArgRegs.push_back(Arg); 2326 ArgVTs.push_back(ArgVT); 2327 ArgFlags.push_back(Flags); 2328 } 2329 2330 // Handle the arguments now that we've gotten them. 2331 SmallVector<unsigned, 4> RegArgs; 2332 unsigned NumBytes; 2333 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, 2334 RegArgs, CC, NumBytes, isVarArg)) 2335 return false; 2336 2337 bool UseReg = false; 2338 const GlobalValue *GV = dyn_cast<GlobalValue>(Callee); 2339 if (!GV || EnableARMLongCalls) UseReg = true; 2340 2341 unsigned CalleeReg = 0; 2342 if (UseReg) { 2343 if (IntrMemName) 2344 CalleeReg = getLibcallReg(IntrMemName); 2345 else 2346 CalleeReg = getRegForValue(Callee); 2347 2348 if (CalleeReg == 0) return false; 2349 } 2350 2351 // Issue the call. 2352 unsigned CallOpc = ARMSelectCallOp(UseReg); 2353 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 2354 DL, TII.get(CallOpc)); 2355 if(isThumb2) { 2356 // Explicitly adding the predicate here. 2357 AddDefaultPred(MIB); 2358 if (UseReg) 2359 MIB.addReg(CalleeReg); 2360 else if (!IntrMemName) 2361 MIB.addGlobalAddress(GV, 0, 0); 2362 else 2363 MIB.addExternalSymbol(IntrMemName, 0); 2364 } else { 2365 if (UseReg) 2366 MIB.addReg(CalleeReg); 2367 else if (!IntrMemName) 2368 MIB.addGlobalAddress(GV, 0, 0); 2369 else 2370 MIB.addExternalSymbol(IntrMemName, 0); 2371 2372 // Explicitly adding the predicate here. 2373 AddDefaultPred(MIB); 2374 } 2375 2376 // Add implicit physical register uses to the call. 2377 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 2378 MIB.addReg(RegArgs[i]); 2379 2380 // Add a register mask with the call-preserved registers. 2381 // Proper defs for return values will be added by setPhysRegsDeadExcept(). 2382 MIB.addRegMask(TRI.getCallPreservedMask(CC)); 2383 2384 // Finish off the call including any return values. 2385 SmallVector<unsigned, 4> UsedRegs; 2386 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes, isVarArg)) 2387 return false; 2388 2389 // Set all unused physreg defs as dead. 2390 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 2391 2392 return true; 2393} 2394 2395bool ARMFastISel::ARMIsMemCpySmall(uint64_t Len) { 2396 return Len <= 16; 2397} 2398 2399bool ARMFastISel::ARMTryEmitSmallMemCpy(Address Dest, Address Src, 2400 uint64_t Len) { 2401 // Make sure we don't bloat code by inlining very large memcpy's. 2402 if (!ARMIsMemCpySmall(Len)) 2403 return false; 2404 2405 // We don't care about alignment here since we just emit integer accesses. 2406 while (Len) { 2407 MVT VT; 2408 if (Len >= 4) 2409 VT = MVT::i32; 2410 else if (Len >= 2) 2411 VT = MVT::i16; 2412 else { 2413 assert(Len == 1); 2414 VT = MVT::i8; 2415 } 2416 2417 bool RV; 2418 unsigned ResultReg; 2419 RV = ARMEmitLoad(VT, ResultReg, Src); 2420 assert (RV == true && "Should be able to handle this load."); 2421 RV = ARMEmitStore(VT, ResultReg, Dest); 2422 assert (RV == true && "Should be able to handle this store."); 2423 (void)RV; 2424 2425 unsigned Size = VT.getSizeInBits()/8; 2426 Len -= Size; 2427 Dest.Offset += Size; 2428 Src.Offset += Size; 2429 } 2430 2431 return true; 2432} 2433 2434bool ARMFastISel::SelectIntrinsicCall(const IntrinsicInst &I) { 2435 // FIXME: Handle more intrinsics. 2436 switch (I.getIntrinsicID()) { 2437 default: return false; 2438 case Intrinsic::frameaddress: { 2439 MachineFrameInfo *MFI = FuncInfo.MF->getFrameInfo(); 2440 MFI->setFrameAddressIsTaken(true); 2441 2442 unsigned LdrOpc; 2443 const TargetRegisterClass *RC; 2444 if (isThumb2) { 2445 LdrOpc = ARM::t2LDRi12; 2446 RC = (const TargetRegisterClass*)&ARM::tGPRRegClass; 2447 } else { 2448 LdrOpc = ARM::LDRi12; 2449 RC = (const TargetRegisterClass*)&ARM::GPRRegClass; 2450 } 2451 2452 const ARMBaseRegisterInfo *RegInfo = 2453 static_cast<const ARMBaseRegisterInfo*>(TM.getRegisterInfo()); 2454 unsigned FramePtr = RegInfo->getFrameRegister(*(FuncInfo.MF)); 2455 unsigned SrcReg = FramePtr; 2456 2457 // Recursively load frame address 2458 // ldr r0 [fp] 2459 // ldr r0 [r0] 2460 // ldr r0 [r0] 2461 // ... 2462 unsigned DestReg; 2463 unsigned Depth = cast<ConstantInt>(I.getOperand(0))->getZExtValue(); 2464 while (Depth--) { 2465 DestReg = createResultReg(RC); 2466 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2467 TII.get(LdrOpc), DestReg) 2468 .addReg(SrcReg).addImm(0)); 2469 SrcReg = DestReg; 2470 } 2471 UpdateValueMap(&I, SrcReg); 2472 return true; 2473 } 2474 case Intrinsic::memcpy: 2475 case Intrinsic::memmove: { 2476 const MemTransferInst &MTI = cast<MemTransferInst>(I); 2477 // Don't handle volatile. 2478 if (MTI.isVolatile()) 2479 return false; 2480 2481 // Disable inlining for memmove before calls to ComputeAddress. Otherwise, 2482 // we would emit dead code because we don't currently handle memmoves. 2483 bool isMemCpy = (I.getIntrinsicID() == Intrinsic::memcpy); 2484 if (isa<ConstantInt>(MTI.getLength()) && isMemCpy) { 2485 // Small memcpy's are common enough that we want to do them without a call 2486 // if possible. 2487 uint64_t Len = cast<ConstantInt>(MTI.getLength())->getZExtValue(); 2488 if (ARMIsMemCpySmall(Len)) { 2489 Address Dest, Src; 2490 if (!ARMComputeAddress(MTI.getRawDest(), Dest) || 2491 !ARMComputeAddress(MTI.getRawSource(), Src)) 2492 return false; 2493 if (ARMTryEmitSmallMemCpy(Dest, Src, Len)) 2494 return true; 2495 } 2496 } 2497 2498 if (!MTI.getLength()->getType()->isIntegerTy(32)) 2499 return false; 2500 2501 if (MTI.getSourceAddressSpace() > 255 || MTI.getDestAddressSpace() > 255) 2502 return false; 2503 2504 const char *IntrMemName = isa<MemCpyInst>(I) ? "memcpy" : "memmove"; 2505 return SelectCall(&I, IntrMemName); 2506 } 2507 case Intrinsic::memset: { 2508 const MemSetInst &MSI = cast<MemSetInst>(I); 2509 // Don't handle volatile. 2510 if (MSI.isVolatile()) 2511 return false; 2512 2513 if (!MSI.getLength()->getType()->isIntegerTy(32)) 2514 return false; 2515 2516 if (MSI.getDestAddressSpace() > 255) 2517 return false; 2518 2519 return SelectCall(&I, "memset"); 2520 } 2521 case Intrinsic::trap: { 2522 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::TRAP)); 2523 return true; 2524 } 2525 } 2526} 2527 2528bool ARMFastISel::SelectTrunc(const Instruction *I) { 2529 // The high bits for a type smaller than the register size are assumed to be 2530 // undefined. 2531 Value *Op = I->getOperand(0); 2532 2533 EVT SrcVT, DestVT; 2534 SrcVT = TLI.getValueType(Op->getType(), true); 2535 DestVT = TLI.getValueType(I->getType(), true); 2536 2537 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) 2538 return false; 2539 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1) 2540 return false; 2541 2542 unsigned SrcReg = getRegForValue(Op); 2543 if (!SrcReg) return false; 2544 2545 // Because the high bits are undefined, a truncate doesn't generate 2546 // any code. 2547 UpdateValueMap(I, SrcReg); 2548 return true; 2549} 2550 2551unsigned ARMFastISel::ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT, 2552 bool isZExt) { 2553 if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8) 2554 return 0; 2555 2556 unsigned Opc; 2557 bool isBoolZext = false; 2558 if (!SrcVT.isSimple()) return 0; 2559 switch (SrcVT.getSimpleVT().SimpleTy) { 2560 default: return 0; 2561 case MVT::i16: 2562 if (!Subtarget->hasV6Ops()) return 0; 2563 if (isZExt) 2564 Opc = isThumb2 ? ARM::t2UXTH : ARM::UXTH; 2565 else 2566 Opc = isThumb2 ? ARM::t2SXTH : ARM::SXTH; 2567 break; 2568 case MVT::i8: 2569 if (!Subtarget->hasV6Ops()) return 0; 2570 if (isZExt) 2571 Opc = isThumb2 ? ARM::t2UXTB : ARM::UXTB; 2572 else 2573 Opc = isThumb2 ? ARM::t2SXTB : ARM::SXTB; 2574 break; 2575 case MVT::i1: 2576 if (isZExt) { 2577 Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri; 2578 isBoolZext = true; 2579 break; 2580 } 2581 return 0; 2582 } 2583 2584 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 2585 MachineInstrBuilder MIB; 2586 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg) 2587 .addReg(SrcReg); 2588 if (isBoolZext) 2589 MIB.addImm(1); 2590 else 2591 MIB.addImm(0); 2592 AddOptionalDefs(MIB); 2593 return ResultReg; 2594} 2595 2596bool ARMFastISel::SelectIntExt(const Instruction *I) { 2597 // On ARM, in general, integer casts don't involve legal types; this code 2598 // handles promotable integers. 2599 Type *DestTy = I->getType(); 2600 Value *Src = I->getOperand(0); 2601 Type *SrcTy = Src->getType(); 2602 2603 EVT SrcVT, DestVT; 2604 SrcVT = TLI.getValueType(SrcTy, true); 2605 DestVT = TLI.getValueType(DestTy, true); 2606 2607 bool isZExt = isa<ZExtInst>(I); 2608 unsigned SrcReg = getRegForValue(Src); 2609 if (!SrcReg) return false; 2610 2611 unsigned ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, isZExt); 2612 if (ResultReg == 0) return false; 2613 UpdateValueMap(I, ResultReg); 2614 return true; 2615} 2616 2617bool ARMFastISel::SelectShift(const Instruction *I, 2618 ARM_AM::ShiftOpc ShiftTy) { 2619 // We handle thumb2 mode by target independent selector 2620 // or SelectionDAG ISel. 2621 if (isThumb2) 2622 return false; 2623 2624 // Only handle i32 now. 2625 EVT DestVT = TLI.getValueType(I->getType(), true); 2626 if (DestVT != MVT::i32) 2627 return false; 2628 2629 unsigned Opc = ARM::MOVsr; 2630 unsigned ShiftImm; 2631 Value *Src2Value = I->getOperand(1); 2632 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Src2Value)) { 2633 ShiftImm = CI->getZExtValue(); 2634 2635 // Fall back to selection DAG isel if the shift amount 2636 // is zero or greater than the width of the value type. 2637 if (ShiftImm == 0 || ShiftImm >=32) 2638 return false; 2639 2640 Opc = ARM::MOVsi; 2641 } 2642 2643 Value *Src1Value = I->getOperand(0); 2644 unsigned Reg1 = getRegForValue(Src1Value); 2645 if (Reg1 == 0) return false; 2646 2647 unsigned Reg2; 2648 if (Opc == ARM::MOVsr) { 2649 Reg2 = getRegForValue(Src2Value); 2650 if (Reg2 == 0) return false; 2651 } 2652 2653 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 2654 if(ResultReg == 0) return false; 2655 2656 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2657 TII.get(Opc), ResultReg) 2658 .addReg(Reg1); 2659 2660 if (Opc == ARM::MOVsi) 2661 MIB.addImm(ARM_AM::getSORegOpc(ShiftTy, ShiftImm)); 2662 else if (Opc == ARM::MOVsr) { 2663 MIB.addReg(Reg2); 2664 MIB.addImm(ARM_AM::getSORegOpc(ShiftTy, 0)); 2665 } 2666 2667 AddOptionalDefs(MIB); 2668 UpdateValueMap(I, ResultReg); 2669 return true; 2670} 2671 2672// TODO: SoftFP support. 2673bool ARMFastISel::TargetSelectInstruction(const Instruction *I) { 2674 2675 switch (I->getOpcode()) { 2676 case Instruction::Load: 2677 return SelectLoad(I); 2678 case Instruction::Store: 2679 return SelectStore(I); 2680 case Instruction::Br: 2681 return SelectBranch(I); 2682 case Instruction::IndirectBr: 2683 return SelectIndirectBr(I); 2684 case Instruction::ICmp: 2685 case Instruction::FCmp: 2686 return SelectCmp(I); 2687 case Instruction::FPExt: 2688 return SelectFPExt(I); 2689 case Instruction::FPTrunc: 2690 return SelectFPTrunc(I); 2691 case Instruction::SIToFP: 2692 return SelectIToFP(I, /*isSigned*/ true); 2693 case Instruction::UIToFP: 2694 return SelectIToFP(I, /*isSigned*/ false); 2695 case Instruction::FPToSI: 2696 return SelectFPToI(I, /*isSigned*/ true); 2697 case Instruction::FPToUI: 2698 return SelectFPToI(I, /*isSigned*/ false); 2699 case Instruction::Add: 2700 return SelectBinaryIntOp(I, ISD::ADD); 2701 case Instruction::Or: 2702 return SelectBinaryIntOp(I, ISD::OR); 2703 case Instruction::Sub: 2704 return SelectBinaryIntOp(I, ISD::SUB); 2705 case Instruction::FAdd: 2706 return SelectBinaryFPOp(I, ISD::FADD); 2707 case Instruction::FSub: 2708 return SelectBinaryFPOp(I, ISD::FSUB); 2709 case Instruction::FMul: 2710 return SelectBinaryFPOp(I, ISD::FMUL); 2711 case Instruction::SDiv: 2712 return SelectDiv(I, /*isSigned*/ true); 2713 case Instruction::UDiv: 2714 return SelectDiv(I, /*isSigned*/ false); 2715 case Instruction::SRem: 2716 return SelectRem(I, /*isSigned*/ true); 2717 case Instruction::URem: 2718 return SelectRem(I, /*isSigned*/ false); 2719 case Instruction::Call: 2720 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) 2721 return SelectIntrinsicCall(*II); 2722 return SelectCall(I); 2723 case Instruction::Select: 2724 return SelectSelect(I); 2725 case Instruction::Ret: 2726 return SelectRet(I); 2727 case Instruction::Trunc: 2728 return SelectTrunc(I); 2729 case Instruction::ZExt: 2730 case Instruction::SExt: 2731 return SelectIntExt(I); 2732 case Instruction::Shl: 2733 return SelectShift(I, ARM_AM::lsl); 2734 case Instruction::LShr: 2735 return SelectShift(I, ARM_AM::lsr); 2736 case Instruction::AShr: 2737 return SelectShift(I, ARM_AM::asr); 2738 default: break; 2739 } 2740 return false; 2741} 2742 2743/// TryToFoldLoad - The specified machine instr operand is a vreg, and that 2744/// vreg is being provided by the specified load instruction. If possible, 2745/// try to fold the load as an operand to the instruction, returning true if 2746/// successful. 2747bool ARMFastISel::TryToFoldLoad(MachineInstr *MI, unsigned OpNo, 2748 const LoadInst *LI) { 2749 // Verify we have a legal type before going any further. 2750 MVT VT; 2751 if (!isLoadTypeLegal(LI->getType(), VT)) 2752 return false; 2753 2754 // Combine load followed by zero- or sign-extend. 2755 // ldrb r1, [r0] ldrb r1, [r0] 2756 // uxtb r2, r1 => 2757 // mov r3, r2 mov r3, r1 2758 bool isZExt = true; 2759 switch(MI->getOpcode()) { 2760 default: return false; 2761 case ARM::SXTH: 2762 case ARM::t2SXTH: 2763 isZExt = false; 2764 case ARM::UXTH: 2765 case ARM::t2UXTH: 2766 if (VT != MVT::i16) 2767 return false; 2768 break; 2769 case ARM::SXTB: 2770 case ARM::t2SXTB: 2771 isZExt = false; 2772 case ARM::UXTB: 2773 case ARM::t2UXTB: 2774 if (VT != MVT::i8) 2775 return false; 2776 break; 2777 } 2778 // See if we can handle this address. 2779 Address Addr; 2780 if (!ARMComputeAddress(LI->getOperand(0), Addr)) return false; 2781 2782 unsigned ResultReg = MI->getOperand(0).getReg(); 2783 if (!ARMEmitLoad(VT, ResultReg, Addr, LI->getAlignment(), isZExt, false)) 2784 return false; 2785 MI->eraseFromParent(); 2786 return true; 2787} 2788 2789namespace llvm { 2790 FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo) { 2791 // Completely untested on non-iOS. 2792 const TargetMachine &TM = funcInfo.MF->getTarget(); 2793 2794 // Darwin and thumb1 only for now. 2795 const ARMSubtarget *Subtarget = &TM.getSubtarget<ARMSubtarget>(); 2796 if (Subtarget->isTargetIOS() && !Subtarget->isThumb1Only()) 2797 return new ARMFastISel(funcInfo); 2798 return 0; 2799 } 2800} 2801