ARMFastISel.cpp revision 3574eca1b02600bac4e625297f4ecf745f4c4f32
1//===-- ARMFastISel.cpp - ARM FastISel implementation ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the ARM-specific support for the FastISel class. Some 11// of the target-specific code is generated by tablegen in the file 12// ARMGenFastISel.inc, which is #included here. 13// 14//===----------------------------------------------------------------------===// 15 16#include "ARM.h" 17#include "ARMBaseInstrInfo.h" 18#include "ARMCallingConv.h" 19#include "ARMTargetMachine.h" 20#include "ARMSubtarget.h" 21#include "ARMConstantPoolValue.h" 22#include "MCTargetDesc/ARMAddressingModes.h" 23#include "llvm/CallingConv.h" 24#include "llvm/DerivedTypes.h" 25#include "llvm/GlobalVariable.h" 26#include "llvm/Instructions.h" 27#include "llvm/IntrinsicInst.h" 28#include "llvm/Module.h" 29#include "llvm/Operator.h" 30#include "llvm/CodeGen/Analysis.h" 31#include "llvm/CodeGen/FastISel.h" 32#include "llvm/CodeGen/FunctionLoweringInfo.h" 33#include "llvm/CodeGen/MachineInstrBuilder.h" 34#include "llvm/CodeGen/MachineModuleInfo.h" 35#include "llvm/CodeGen/MachineConstantPool.h" 36#include "llvm/CodeGen/MachineFrameInfo.h" 37#include "llvm/CodeGen/MachineMemOperand.h" 38#include "llvm/CodeGen/MachineRegisterInfo.h" 39#include "llvm/Support/CallSite.h" 40#include "llvm/Support/CommandLine.h" 41#include "llvm/Support/ErrorHandling.h" 42#include "llvm/Support/GetElementPtrTypeIterator.h" 43#include "llvm/DataLayout.h" 44#include "llvm/Target/TargetInstrInfo.h" 45#include "llvm/Target/TargetLowering.h" 46#include "llvm/Target/TargetMachine.h" 47#include "llvm/Target/TargetOptions.h" 48using namespace llvm; 49 50extern cl::opt<bool> EnableARMLongCalls; 51 52namespace { 53 54 // All possible address modes, plus some. 55 typedef struct Address { 56 enum { 57 RegBase, 58 FrameIndexBase 59 } BaseType; 60 61 union { 62 unsigned Reg; 63 int FI; 64 } Base; 65 66 int Offset; 67 68 // Innocuous defaults for our address. 69 Address() 70 : BaseType(RegBase), Offset(0) { 71 Base.Reg = 0; 72 } 73 } Address; 74 75class ARMFastISel : public FastISel { 76 77 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can 78 /// make the right decision when generating code for different targets. 79 const ARMSubtarget *Subtarget; 80 const TargetMachine &TM; 81 const TargetInstrInfo &TII; 82 const TargetLowering &TLI; 83 ARMFunctionInfo *AFI; 84 85 // Convenience variables to avoid some queries. 86 bool isThumb2; 87 LLVMContext *Context; 88 89 public: 90 explicit ARMFastISel(FunctionLoweringInfo &funcInfo, 91 const TargetLibraryInfo *libInfo) 92 : FastISel(funcInfo, libInfo), 93 TM(funcInfo.MF->getTarget()), 94 TII(*TM.getInstrInfo()), 95 TLI(*TM.getTargetLowering()) { 96 Subtarget = &TM.getSubtarget<ARMSubtarget>(); 97 AFI = funcInfo.MF->getInfo<ARMFunctionInfo>(); 98 isThumb2 = AFI->isThumbFunction(); 99 Context = &funcInfo.Fn->getContext(); 100 } 101 102 // Code from FastISel.cpp. 103 private: 104 unsigned FastEmitInst_(unsigned MachineInstOpcode, 105 const TargetRegisterClass *RC); 106 unsigned FastEmitInst_r(unsigned MachineInstOpcode, 107 const TargetRegisterClass *RC, 108 unsigned Op0, bool Op0IsKill); 109 unsigned FastEmitInst_rr(unsigned MachineInstOpcode, 110 const TargetRegisterClass *RC, 111 unsigned Op0, bool Op0IsKill, 112 unsigned Op1, bool Op1IsKill); 113 unsigned FastEmitInst_rrr(unsigned MachineInstOpcode, 114 const TargetRegisterClass *RC, 115 unsigned Op0, bool Op0IsKill, 116 unsigned Op1, bool Op1IsKill, 117 unsigned Op2, bool Op2IsKill); 118 unsigned FastEmitInst_ri(unsigned MachineInstOpcode, 119 const TargetRegisterClass *RC, 120 unsigned Op0, bool Op0IsKill, 121 uint64_t Imm); 122 unsigned FastEmitInst_rf(unsigned MachineInstOpcode, 123 const TargetRegisterClass *RC, 124 unsigned Op0, bool Op0IsKill, 125 const ConstantFP *FPImm); 126 unsigned FastEmitInst_rri(unsigned MachineInstOpcode, 127 const TargetRegisterClass *RC, 128 unsigned Op0, bool Op0IsKill, 129 unsigned Op1, bool Op1IsKill, 130 uint64_t Imm); 131 unsigned FastEmitInst_i(unsigned MachineInstOpcode, 132 const TargetRegisterClass *RC, 133 uint64_t Imm); 134 unsigned FastEmitInst_ii(unsigned MachineInstOpcode, 135 const TargetRegisterClass *RC, 136 uint64_t Imm1, uint64_t Imm2); 137 138 unsigned FastEmitInst_extractsubreg(MVT RetVT, 139 unsigned Op0, bool Op0IsKill, 140 uint32_t Idx); 141 142 // Backend specific FastISel code. 143 private: 144 virtual bool TargetSelectInstruction(const Instruction *I); 145 virtual unsigned TargetMaterializeConstant(const Constant *C); 146 virtual unsigned TargetMaterializeAlloca(const AllocaInst *AI); 147 virtual bool TryToFoldLoad(MachineInstr *MI, unsigned OpNo, 148 const LoadInst *LI); 149 private: 150 #include "ARMGenFastISel.inc" 151 152 // Instruction selection routines. 153 private: 154 bool SelectLoad(const Instruction *I); 155 bool SelectStore(const Instruction *I); 156 bool SelectBranch(const Instruction *I); 157 bool SelectIndirectBr(const Instruction *I); 158 bool SelectCmp(const Instruction *I); 159 bool SelectFPExt(const Instruction *I); 160 bool SelectFPTrunc(const Instruction *I); 161 bool SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode); 162 bool SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode); 163 bool SelectIToFP(const Instruction *I, bool isSigned); 164 bool SelectFPToI(const Instruction *I, bool isSigned); 165 bool SelectDiv(const Instruction *I, bool isSigned); 166 bool SelectRem(const Instruction *I, bool isSigned); 167 bool SelectCall(const Instruction *I, const char *IntrMemName); 168 bool SelectIntrinsicCall(const IntrinsicInst &I); 169 bool SelectSelect(const Instruction *I); 170 bool SelectRet(const Instruction *I); 171 bool SelectTrunc(const Instruction *I); 172 bool SelectIntExt(const Instruction *I); 173 bool SelectShift(const Instruction *I, ARM_AM::ShiftOpc ShiftTy); 174 175 // Utility routines. 176 private: 177 bool isTypeLegal(Type *Ty, MVT &VT); 178 bool isLoadTypeLegal(Type *Ty, MVT &VT); 179 bool ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, 180 bool isZExt); 181 bool ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr, 182 unsigned Alignment = 0, bool isZExt = true, 183 bool allocReg = true); 184 bool ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr, 185 unsigned Alignment = 0); 186 bool ARMComputeAddress(const Value *Obj, Address &Addr); 187 void ARMSimplifyAddress(Address &Addr, EVT VT, bool useAM3); 188 bool ARMIsMemCpySmall(uint64_t Len); 189 bool ARMTryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len); 190 unsigned ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT, bool isZExt); 191 unsigned ARMMaterializeFP(const ConstantFP *CFP, EVT VT); 192 unsigned ARMMaterializeInt(const Constant *C, EVT VT); 193 unsigned ARMMaterializeGV(const GlobalValue *GV, EVT VT); 194 unsigned ARMMoveToFPReg(EVT VT, unsigned SrcReg); 195 unsigned ARMMoveToIntReg(EVT VT, unsigned SrcReg); 196 unsigned ARMSelectCallOp(bool UseReg); 197 unsigned ARMLowerPICELF(const GlobalValue *GV, unsigned Align, EVT VT); 198 199 // Call handling routines. 200 private: 201 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, 202 bool Return, 203 bool isVarArg); 204 bool ProcessCallArgs(SmallVectorImpl<Value*> &Args, 205 SmallVectorImpl<unsigned> &ArgRegs, 206 SmallVectorImpl<MVT> &ArgVTs, 207 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 208 SmallVectorImpl<unsigned> &RegArgs, 209 CallingConv::ID CC, 210 unsigned &NumBytes, 211 bool isVarArg); 212 unsigned getLibcallReg(const Twine &Name); 213 bool FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 214 const Instruction *I, CallingConv::ID CC, 215 unsigned &NumBytes, bool isVarArg); 216 bool ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call); 217 218 // OptionalDef handling routines. 219 private: 220 bool isARMNEONPred(const MachineInstr *MI); 221 bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR); 222 const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB); 223 void AddLoadStoreOperands(EVT VT, Address &Addr, 224 const MachineInstrBuilder &MIB, 225 unsigned Flags, bool useAM3); 226}; 227 228} // end anonymous namespace 229 230#include "ARMGenCallingConv.inc" 231 232// DefinesOptionalPredicate - This is different from DefinesPredicate in that 233// we don't care about implicit defs here, just places we'll need to add a 234// default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR. 235bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) { 236 if (!MI->hasOptionalDef()) 237 return false; 238 239 // Look to see if our OptionalDef is defining CPSR or CCR. 240 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 241 const MachineOperand &MO = MI->getOperand(i); 242 if (!MO.isReg() || !MO.isDef()) continue; 243 if (MO.getReg() == ARM::CPSR) 244 *CPSR = true; 245 } 246 return true; 247} 248 249bool ARMFastISel::isARMNEONPred(const MachineInstr *MI) { 250 const MCInstrDesc &MCID = MI->getDesc(); 251 252 // If we're a thumb2 or not NEON function we were handled via isPredicable. 253 if ((MCID.TSFlags & ARMII::DomainMask) != ARMII::DomainNEON || 254 AFI->isThumb2Function()) 255 return false; 256 257 for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) 258 if (MCID.OpInfo[i].isPredicate()) 259 return true; 260 261 return false; 262} 263 264// If the machine is predicable go ahead and add the predicate operands, if 265// it needs default CC operands add those. 266// TODO: If we want to support thumb1 then we'll need to deal with optional 267// CPSR defs that need to be added before the remaining operands. See s_cc_out 268// for descriptions why. 269const MachineInstrBuilder & 270ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) { 271 MachineInstr *MI = &*MIB; 272 273 // Do we use a predicate? or... 274 // Are we NEON in ARM mode and have a predicate operand? If so, I know 275 // we're not predicable but add it anyways. 276 if (TII.isPredicable(MI) || isARMNEONPred(MI)) 277 AddDefaultPred(MIB); 278 279 // Do we optionally set a predicate? Preds is size > 0 iff the predicate 280 // defines CPSR. All other OptionalDefines in ARM are the CCR register. 281 bool CPSR = false; 282 if (DefinesOptionalPredicate(MI, &CPSR)) { 283 if (CPSR) 284 AddDefaultT1CC(MIB); 285 else 286 AddDefaultCC(MIB); 287 } 288 return MIB; 289} 290 291unsigned ARMFastISel::FastEmitInst_(unsigned MachineInstOpcode, 292 const TargetRegisterClass* RC) { 293 unsigned ResultReg = createResultReg(RC); 294 const MCInstrDesc &II = TII.get(MachineInstOpcode); 295 296 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)); 297 return ResultReg; 298} 299 300unsigned ARMFastISel::FastEmitInst_r(unsigned MachineInstOpcode, 301 const TargetRegisterClass *RC, 302 unsigned Op0, bool Op0IsKill) { 303 unsigned ResultReg = createResultReg(RC); 304 const MCInstrDesc &II = TII.get(MachineInstOpcode); 305 306 if (II.getNumDefs() >= 1) { 307 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 308 .addReg(Op0, Op0IsKill * RegState::Kill)); 309 } else { 310 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 311 .addReg(Op0, Op0IsKill * RegState::Kill)); 312 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 313 TII.get(TargetOpcode::COPY), ResultReg) 314 .addReg(II.ImplicitDefs[0])); 315 } 316 return ResultReg; 317} 318 319unsigned ARMFastISel::FastEmitInst_rr(unsigned MachineInstOpcode, 320 const TargetRegisterClass *RC, 321 unsigned Op0, bool Op0IsKill, 322 unsigned Op1, bool Op1IsKill) { 323 unsigned ResultReg = createResultReg(RC); 324 const MCInstrDesc &II = TII.get(MachineInstOpcode); 325 326 if (II.getNumDefs() >= 1) { 327 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 328 .addReg(Op0, Op0IsKill * RegState::Kill) 329 .addReg(Op1, Op1IsKill * RegState::Kill)); 330 } else { 331 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 332 .addReg(Op0, Op0IsKill * RegState::Kill) 333 .addReg(Op1, Op1IsKill * RegState::Kill)); 334 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 335 TII.get(TargetOpcode::COPY), ResultReg) 336 .addReg(II.ImplicitDefs[0])); 337 } 338 return ResultReg; 339} 340 341unsigned ARMFastISel::FastEmitInst_rrr(unsigned MachineInstOpcode, 342 const TargetRegisterClass *RC, 343 unsigned Op0, bool Op0IsKill, 344 unsigned Op1, bool Op1IsKill, 345 unsigned Op2, bool Op2IsKill) { 346 unsigned ResultReg = createResultReg(RC); 347 const MCInstrDesc &II = TII.get(MachineInstOpcode); 348 349 if (II.getNumDefs() >= 1) { 350 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 351 .addReg(Op0, Op0IsKill * RegState::Kill) 352 .addReg(Op1, Op1IsKill * RegState::Kill) 353 .addReg(Op2, Op2IsKill * RegState::Kill)); 354 } else { 355 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 356 .addReg(Op0, Op0IsKill * RegState::Kill) 357 .addReg(Op1, Op1IsKill * RegState::Kill) 358 .addReg(Op2, Op2IsKill * RegState::Kill)); 359 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 360 TII.get(TargetOpcode::COPY), ResultReg) 361 .addReg(II.ImplicitDefs[0])); 362 } 363 return ResultReg; 364} 365 366unsigned ARMFastISel::FastEmitInst_ri(unsigned MachineInstOpcode, 367 const TargetRegisterClass *RC, 368 unsigned Op0, bool Op0IsKill, 369 uint64_t Imm) { 370 unsigned ResultReg = createResultReg(RC); 371 const MCInstrDesc &II = TII.get(MachineInstOpcode); 372 373 if (II.getNumDefs() >= 1) { 374 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 375 .addReg(Op0, Op0IsKill * RegState::Kill) 376 .addImm(Imm)); 377 } else { 378 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 379 .addReg(Op0, Op0IsKill * RegState::Kill) 380 .addImm(Imm)); 381 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 382 TII.get(TargetOpcode::COPY), ResultReg) 383 .addReg(II.ImplicitDefs[0])); 384 } 385 return ResultReg; 386} 387 388unsigned ARMFastISel::FastEmitInst_rf(unsigned MachineInstOpcode, 389 const TargetRegisterClass *RC, 390 unsigned Op0, bool Op0IsKill, 391 const ConstantFP *FPImm) { 392 unsigned ResultReg = createResultReg(RC); 393 const MCInstrDesc &II = TII.get(MachineInstOpcode); 394 395 if (II.getNumDefs() >= 1) { 396 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 397 .addReg(Op0, Op0IsKill * RegState::Kill) 398 .addFPImm(FPImm)); 399 } else { 400 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 401 .addReg(Op0, Op0IsKill * RegState::Kill) 402 .addFPImm(FPImm)); 403 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 404 TII.get(TargetOpcode::COPY), ResultReg) 405 .addReg(II.ImplicitDefs[0])); 406 } 407 return ResultReg; 408} 409 410unsigned ARMFastISel::FastEmitInst_rri(unsigned MachineInstOpcode, 411 const TargetRegisterClass *RC, 412 unsigned Op0, bool Op0IsKill, 413 unsigned Op1, bool Op1IsKill, 414 uint64_t Imm) { 415 unsigned ResultReg = createResultReg(RC); 416 const MCInstrDesc &II = TII.get(MachineInstOpcode); 417 418 if (II.getNumDefs() >= 1) { 419 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 420 .addReg(Op0, Op0IsKill * RegState::Kill) 421 .addReg(Op1, Op1IsKill * RegState::Kill) 422 .addImm(Imm)); 423 } else { 424 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 425 .addReg(Op0, Op0IsKill * RegState::Kill) 426 .addReg(Op1, Op1IsKill * RegState::Kill) 427 .addImm(Imm)); 428 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 429 TII.get(TargetOpcode::COPY), ResultReg) 430 .addReg(II.ImplicitDefs[0])); 431 } 432 return ResultReg; 433} 434 435unsigned ARMFastISel::FastEmitInst_i(unsigned MachineInstOpcode, 436 const TargetRegisterClass *RC, 437 uint64_t Imm) { 438 unsigned ResultReg = createResultReg(RC); 439 const MCInstrDesc &II = TII.get(MachineInstOpcode); 440 441 if (II.getNumDefs() >= 1) { 442 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 443 .addImm(Imm)); 444 } else { 445 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 446 .addImm(Imm)); 447 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 448 TII.get(TargetOpcode::COPY), ResultReg) 449 .addReg(II.ImplicitDefs[0])); 450 } 451 return ResultReg; 452} 453 454unsigned ARMFastISel::FastEmitInst_ii(unsigned MachineInstOpcode, 455 const TargetRegisterClass *RC, 456 uint64_t Imm1, uint64_t Imm2) { 457 unsigned ResultReg = createResultReg(RC); 458 const MCInstrDesc &II = TII.get(MachineInstOpcode); 459 460 if (II.getNumDefs() >= 1) { 461 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 462 .addImm(Imm1).addImm(Imm2)); 463 } else { 464 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 465 .addImm(Imm1).addImm(Imm2)); 466 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 467 TII.get(TargetOpcode::COPY), 468 ResultReg) 469 .addReg(II.ImplicitDefs[0])); 470 } 471 return ResultReg; 472} 473 474unsigned ARMFastISel::FastEmitInst_extractsubreg(MVT RetVT, 475 unsigned Op0, bool Op0IsKill, 476 uint32_t Idx) { 477 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT)); 478 assert(TargetRegisterInfo::isVirtualRegister(Op0) && 479 "Cannot yet extract from physregs"); 480 481 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 482 DL, TII.get(TargetOpcode::COPY), ResultReg) 483 .addReg(Op0, getKillRegState(Op0IsKill), Idx)); 484 return ResultReg; 485} 486 487// TODO: Don't worry about 64-bit now, but when this is fixed remove the 488// checks from the various callers. 489unsigned ARMFastISel::ARMMoveToFPReg(EVT VT, unsigned SrcReg) { 490 if (VT == MVT::f64) return 0; 491 492 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 493 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 494 TII.get(ARM::VMOVSR), MoveReg) 495 .addReg(SrcReg)); 496 return MoveReg; 497} 498 499unsigned ARMFastISel::ARMMoveToIntReg(EVT VT, unsigned SrcReg) { 500 if (VT == MVT::i64) return 0; 501 502 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 503 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 504 TII.get(ARM::VMOVRS), MoveReg) 505 .addReg(SrcReg)); 506 return MoveReg; 507} 508 509// For double width floating point we need to materialize two constants 510// (the high and the low) into integer registers then use a move to get 511// the combined constant into an FP reg. 512unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, EVT VT) { 513 const APFloat Val = CFP->getValueAPF(); 514 bool is64bit = VT == MVT::f64; 515 516 // This checks to see if we can use VFP3 instructions to materialize 517 // a constant, otherwise we have to go through the constant pool. 518 if (TLI.isFPImmLegal(Val, VT)) { 519 int Imm; 520 unsigned Opc; 521 if (is64bit) { 522 Imm = ARM_AM::getFP64Imm(Val); 523 Opc = ARM::FCONSTD; 524 } else { 525 Imm = ARM_AM::getFP32Imm(Val); 526 Opc = ARM::FCONSTS; 527 } 528 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 529 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 530 DestReg) 531 .addImm(Imm)); 532 return DestReg; 533 } 534 535 // Require VFP2 for loading fp constants. 536 if (!Subtarget->hasVFP2()) return false; 537 538 // MachineConstantPool wants an explicit alignment. 539 unsigned Align = TD.getPrefTypeAlignment(CFP->getType()); 540 if (Align == 0) { 541 // TODO: Figure out if this is correct. 542 Align = TD.getTypeAllocSize(CFP->getType()); 543 } 544 unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align); 545 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 546 unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS; 547 548 // The extra reg is for addrmode5. 549 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 550 DestReg) 551 .addConstantPoolIndex(Idx) 552 .addReg(0)); 553 return DestReg; 554} 555 556unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, EVT VT) { 557 558 if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1) 559 return false; 560 561 // If we can do this in a single instruction without a constant pool entry 562 // do so now. 563 const ConstantInt *CI = cast<ConstantInt>(C); 564 if (Subtarget->hasV6T2Ops() && isUInt<16>(CI->getZExtValue())) { 565 unsigned Opc = isThumb2 ? ARM::t2MOVi16 : ARM::MOVi16; 566 unsigned ImmReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 567 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 568 TII.get(Opc), ImmReg) 569 .addImm(CI->getZExtValue())); 570 return ImmReg; 571 } 572 573 // Use MVN to emit negative constants. 574 if (VT == MVT::i32 && Subtarget->hasV6T2Ops() && CI->isNegative()) { 575 unsigned Imm = (unsigned)~(CI->getSExtValue()); 576 bool UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 577 (ARM_AM::getSOImmVal(Imm) != -1); 578 if (UseImm) { 579 unsigned Opc = isThumb2 ? ARM::t2MVNi : ARM::MVNi; 580 unsigned ImmReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 581 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 582 TII.get(Opc), ImmReg) 583 .addImm(Imm)); 584 return ImmReg; 585 } 586 } 587 588 // Load from constant pool. For now 32-bit only. 589 if (VT != MVT::i32) 590 return false; 591 592 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 593 594 // MachineConstantPool wants an explicit alignment. 595 unsigned Align = TD.getPrefTypeAlignment(C->getType()); 596 if (Align == 0) { 597 // TODO: Figure out if this is correct. 598 Align = TD.getTypeAllocSize(C->getType()); 599 } 600 unsigned Idx = MCP.getConstantPoolIndex(C, Align); 601 602 if (isThumb2) 603 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 604 TII.get(ARM::t2LDRpci), DestReg) 605 .addConstantPoolIndex(Idx)); 606 else 607 // The extra immediate is for addrmode2. 608 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 609 TII.get(ARM::LDRcp), DestReg) 610 .addConstantPoolIndex(Idx) 611 .addImm(0)); 612 613 return DestReg; 614} 615 616unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, EVT VT) { 617 // For now 32-bit only. 618 if (VT != MVT::i32) return 0; 619 620 Reloc::Model RelocM = TM.getRelocationModel(); 621 bool IsIndirect = Subtarget->GVIsIndirectSymbol(GV, RelocM); 622 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 623 624 // Use movw+movt when possible, it avoids constant pool entries. 625 // Darwin targets don't support movt with Reloc::Static, see 626 // ARMTargetLowering::LowerGlobalAddressDarwin. Other targets only support 627 // static movt relocations. 628 if (Subtarget->useMovt() && 629 Subtarget->isTargetDarwin() == (RelocM != Reloc::Static)) { 630 unsigned Opc; 631 switch (RelocM) { 632 case Reloc::PIC_: 633 Opc = isThumb2 ? ARM::t2MOV_ga_pcrel : ARM::MOV_ga_pcrel; 634 break; 635 case Reloc::DynamicNoPIC: 636 Opc = isThumb2 ? ARM::t2MOV_ga_dyn : ARM::MOV_ga_dyn; 637 break; 638 default: 639 Opc = isThumb2 ? ARM::t2MOVi32imm : ARM::MOVi32imm; 640 break; 641 } 642 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 643 DestReg).addGlobalAddress(GV)); 644 } else { 645 // MachineConstantPool wants an explicit alignment. 646 unsigned Align = TD.getPrefTypeAlignment(GV->getType()); 647 if (Align == 0) { 648 // TODO: Figure out if this is correct. 649 Align = TD.getTypeAllocSize(GV->getType()); 650 } 651 652 if (Subtarget->isTargetELF() && RelocM == Reloc::PIC_) 653 return ARMLowerPICELF(GV, Align, VT); 654 655 // Grab index. 656 unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : 657 (Subtarget->isThumb() ? 4 : 8); 658 unsigned Id = AFI->createPICLabelUId(); 659 ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create(GV, Id, 660 ARMCP::CPValue, 661 PCAdj); 662 unsigned Idx = MCP.getConstantPoolIndex(CPV, Align); 663 664 // Load value. 665 MachineInstrBuilder MIB; 666 if (isThumb2) { 667 unsigned Opc = (RelocM!=Reloc::PIC_) ? ARM::t2LDRpci : ARM::t2LDRpci_pic; 668 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg) 669 .addConstantPoolIndex(Idx); 670 if (RelocM == Reloc::PIC_) 671 MIB.addImm(Id); 672 AddOptionalDefs(MIB); 673 } else { 674 // The extra immediate is for addrmode2. 675 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRcp), 676 DestReg) 677 .addConstantPoolIndex(Idx) 678 .addImm(0); 679 AddOptionalDefs(MIB); 680 681 if (RelocM == Reloc::PIC_) { 682 unsigned Opc = IsIndirect ? ARM::PICLDR : ARM::PICADD; 683 unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT)); 684 685 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 686 DL, TII.get(Opc), NewDestReg) 687 .addReg(DestReg) 688 .addImm(Id); 689 AddOptionalDefs(MIB); 690 return NewDestReg; 691 } 692 } 693 } 694 695 if (IsIndirect) { 696 MachineInstrBuilder MIB; 697 unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT)); 698 if (isThumb2) 699 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 700 TII.get(ARM::t2LDRi12), NewDestReg) 701 .addReg(DestReg) 702 .addImm(0); 703 else 704 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRi12), 705 NewDestReg) 706 .addReg(DestReg) 707 .addImm(0); 708 DestReg = NewDestReg; 709 AddOptionalDefs(MIB); 710 } 711 712 return DestReg; 713} 714 715unsigned ARMFastISel::TargetMaterializeConstant(const Constant *C) { 716 EVT VT = TLI.getValueType(C->getType(), true); 717 718 // Only handle simple types. 719 if (!VT.isSimple()) return 0; 720 721 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) 722 return ARMMaterializeFP(CFP, VT); 723 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C)) 724 return ARMMaterializeGV(GV, VT); 725 else if (isa<ConstantInt>(C)) 726 return ARMMaterializeInt(C, VT); 727 728 return 0; 729} 730 731// TODO: unsigned ARMFastISel::TargetMaterializeFloatZero(const ConstantFP *CF); 732 733unsigned ARMFastISel::TargetMaterializeAlloca(const AllocaInst *AI) { 734 // Don't handle dynamic allocas. 735 if (!FuncInfo.StaticAllocaMap.count(AI)) return 0; 736 737 MVT VT; 738 if (!isLoadTypeLegal(AI->getType(), VT)) return 0; 739 740 DenseMap<const AllocaInst*, int>::iterator SI = 741 FuncInfo.StaticAllocaMap.find(AI); 742 743 // This will get lowered later into the correct offsets and registers 744 // via rewriteXFrameIndex. 745 if (SI != FuncInfo.StaticAllocaMap.end()) { 746 const TargetRegisterClass* RC = TLI.getRegClassFor(VT); 747 unsigned ResultReg = createResultReg(RC); 748 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri; 749 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 750 TII.get(Opc), ResultReg) 751 .addFrameIndex(SI->second) 752 .addImm(0)); 753 return ResultReg; 754 } 755 756 return 0; 757} 758 759bool ARMFastISel::isTypeLegal(Type *Ty, MVT &VT) { 760 EVT evt = TLI.getValueType(Ty, true); 761 762 // Only handle simple types. 763 if (evt == MVT::Other || !evt.isSimple()) return false; 764 VT = evt.getSimpleVT(); 765 766 // Handle all legal types, i.e. a register that will directly hold this 767 // value. 768 return TLI.isTypeLegal(VT); 769} 770 771bool ARMFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) { 772 if (isTypeLegal(Ty, VT)) return true; 773 774 // If this is a type than can be sign or zero-extended to a basic operation 775 // go ahead and accept it now. 776 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16) 777 return true; 778 779 return false; 780} 781 782// Computes the address to get to an object. 783bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) { 784 // Some boilerplate from the X86 FastISel. 785 const User *U = NULL; 786 unsigned Opcode = Instruction::UserOp1; 787 if (const Instruction *I = dyn_cast<Instruction>(Obj)) { 788 // Don't walk into other basic blocks unless the object is an alloca from 789 // another block, otherwise it may not have a virtual register assigned. 790 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) || 791 FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) { 792 Opcode = I->getOpcode(); 793 U = I; 794 } 795 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) { 796 Opcode = C->getOpcode(); 797 U = C; 798 } 799 800 if (PointerType *Ty = dyn_cast<PointerType>(Obj->getType())) 801 if (Ty->getAddressSpace() > 255) 802 // Fast instruction selection doesn't support the special 803 // address spaces. 804 return false; 805 806 switch (Opcode) { 807 default: 808 break; 809 case Instruction::BitCast: { 810 // Look through bitcasts. 811 return ARMComputeAddress(U->getOperand(0), Addr); 812 } 813 case Instruction::IntToPtr: { 814 // Look past no-op inttoptrs. 815 if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy()) 816 return ARMComputeAddress(U->getOperand(0), Addr); 817 break; 818 } 819 case Instruction::PtrToInt: { 820 // Look past no-op ptrtoints. 821 if (TLI.getValueType(U->getType()) == TLI.getPointerTy()) 822 return ARMComputeAddress(U->getOperand(0), Addr); 823 break; 824 } 825 case Instruction::GetElementPtr: { 826 Address SavedAddr = Addr; 827 int TmpOffset = Addr.Offset; 828 829 // Iterate through the GEP folding the constants into offsets where 830 // we can. 831 gep_type_iterator GTI = gep_type_begin(U); 832 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); 833 i != e; ++i, ++GTI) { 834 const Value *Op = *i; 835 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 836 const StructLayout *SL = TD.getStructLayout(STy); 837 unsigned Idx = cast<ConstantInt>(Op)->getZExtValue(); 838 TmpOffset += SL->getElementOffset(Idx); 839 } else { 840 uint64_t S = TD.getTypeAllocSize(GTI.getIndexedType()); 841 for (;;) { 842 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) { 843 // Constant-offset addressing. 844 TmpOffset += CI->getSExtValue() * S; 845 break; 846 } 847 if (isa<AddOperator>(Op) && 848 (!isa<Instruction>(Op) || 849 FuncInfo.MBBMap[cast<Instruction>(Op)->getParent()] 850 == FuncInfo.MBB) && 851 isa<ConstantInt>(cast<AddOperator>(Op)->getOperand(1))) { 852 // An add (in the same block) with a constant operand. Fold the 853 // constant. 854 ConstantInt *CI = 855 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1)); 856 TmpOffset += CI->getSExtValue() * S; 857 // Iterate on the other operand. 858 Op = cast<AddOperator>(Op)->getOperand(0); 859 continue; 860 } 861 // Unsupported 862 goto unsupported_gep; 863 } 864 } 865 } 866 867 // Try to grab the base operand now. 868 Addr.Offset = TmpOffset; 869 if (ARMComputeAddress(U->getOperand(0), Addr)) return true; 870 871 // We failed, restore everything and try the other options. 872 Addr = SavedAddr; 873 874 unsupported_gep: 875 break; 876 } 877 case Instruction::Alloca: { 878 const AllocaInst *AI = cast<AllocaInst>(Obj); 879 DenseMap<const AllocaInst*, int>::iterator SI = 880 FuncInfo.StaticAllocaMap.find(AI); 881 if (SI != FuncInfo.StaticAllocaMap.end()) { 882 Addr.BaseType = Address::FrameIndexBase; 883 Addr.Base.FI = SI->second; 884 return true; 885 } 886 break; 887 } 888 } 889 890 // Try to get this in a register if nothing else has worked. 891 if (Addr.Base.Reg == 0) Addr.Base.Reg = getRegForValue(Obj); 892 return Addr.Base.Reg != 0; 893} 894 895void ARMFastISel::ARMSimplifyAddress(Address &Addr, EVT VT, bool useAM3) { 896 897 assert(VT.isSimple() && "Non-simple types are invalid here!"); 898 899 bool needsLowering = false; 900 switch (VT.getSimpleVT().SimpleTy) { 901 default: llvm_unreachable("Unhandled load/store type!"); 902 case MVT::i1: 903 case MVT::i8: 904 case MVT::i16: 905 case MVT::i32: 906 if (!useAM3) { 907 // Integer loads/stores handle 12-bit offsets. 908 needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset); 909 // Handle negative offsets. 910 if (needsLowering && isThumb2) 911 needsLowering = !(Subtarget->hasV6T2Ops() && Addr.Offset < 0 && 912 Addr.Offset > -256); 913 } else { 914 // ARM halfword load/stores and signed byte loads use +/-imm8 offsets. 915 needsLowering = (Addr.Offset > 255 || Addr.Offset < -255); 916 } 917 break; 918 case MVT::f32: 919 case MVT::f64: 920 // Floating point operands handle 8-bit offsets. 921 needsLowering = ((Addr.Offset & 0xff) != Addr.Offset); 922 break; 923 } 924 925 // If this is a stack pointer and the offset needs to be simplified then 926 // put the alloca address into a register, set the base type back to 927 // register and continue. This should almost never happen. 928 if (needsLowering && Addr.BaseType == Address::FrameIndexBase) { 929 const TargetRegisterClass *RC = isThumb2 ? 930 (const TargetRegisterClass*)&ARM::tGPRRegClass : 931 (const TargetRegisterClass*)&ARM::GPRRegClass; 932 unsigned ResultReg = createResultReg(RC); 933 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri; 934 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 935 TII.get(Opc), ResultReg) 936 .addFrameIndex(Addr.Base.FI) 937 .addImm(0)); 938 Addr.Base.Reg = ResultReg; 939 Addr.BaseType = Address::RegBase; 940 } 941 942 // Since the offset is too large for the load/store instruction 943 // get the reg+offset into a register. 944 if (needsLowering) { 945 Addr.Base.Reg = FastEmit_ri_(MVT::i32, ISD::ADD, Addr.Base.Reg, 946 /*Op0IsKill*/false, Addr.Offset, MVT::i32); 947 Addr.Offset = 0; 948 } 949} 950 951void ARMFastISel::AddLoadStoreOperands(EVT VT, Address &Addr, 952 const MachineInstrBuilder &MIB, 953 unsigned Flags, bool useAM3) { 954 // addrmode5 output depends on the selection dag addressing dividing the 955 // offset by 4 that it then later multiplies. Do this here as well. 956 if (VT.getSimpleVT().SimpleTy == MVT::f32 || 957 VT.getSimpleVT().SimpleTy == MVT::f64) 958 Addr.Offset /= 4; 959 960 // Frame base works a bit differently. Handle it separately. 961 if (Addr.BaseType == Address::FrameIndexBase) { 962 int FI = Addr.Base.FI; 963 int Offset = Addr.Offset; 964 MachineMemOperand *MMO = 965 FuncInfo.MF->getMachineMemOperand( 966 MachinePointerInfo::getFixedStack(FI, Offset), 967 Flags, 968 MFI.getObjectSize(FI), 969 MFI.getObjectAlignment(FI)); 970 // Now add the rest of the operands. 971 MIB.addFrameIndex(FI); 972 973 // ARM halfword load/stores and signed byte loads need an additional 974 // operand. 975 if (useAM3) { 976 signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset; 977 MIB.addReg(0); 978 MIB.addImm(Imm); 979 } else { 980 MIB.addImm(Addr.Offset); 981 } 982 MIB.addMemOperand(MMO); 983 } else { 984 // Now add the rest of the operands. 985 MIB.addReg(Addr.Base.Reg); 986 987 // ARM halfword load/stores and signed byte loads need an additional 988 // operand. 989 if (useAM3) { 990 signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset; 991 MIB.addReg(0); 992 MIB.addImm(Imm); 993 } else { 994 MIB.addImm(Addr.Offset); 995 } 996 } 997 AddOptionalDefs(MIB); 998} 999 1000bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr, 1001 unsigned Alignment, bool isZExt, bool allocReg) { 1002 assert(VT.isSimple() && "Non-simple types are invalid here!"); 1003 unsigned Opc; 1004 bool useAM3 = false; 1005 bool needVMOV = false; 1006 const TargetRegisterClass *RC; 1007 switch (VT.getSimpleVT().SimpleTy) { 1008 // This is mostly going to be Neon/vector support. 1009 default: return false; 1010 case MVT::i1: 1011 case MVT::i8: 1012 if (isThumb2) { 1013 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1014 Opc = isZExt ? ARM::t2LDRBi8 : ARM::t2LDRSBi8; 1015 else 1016 Opc = isZExt ? ARM::t2LDRBi12 : ARM::t2LDRSBi12; 1017 } else { 1018 if (isZExt) { 1019 Opc = ARM::LDRBi12; 1020 } else { 1021 Opc = ARM::LDRSB; 1022 useAM3 = true; 1023 } 1024 } 1025 RC = &ARM::GPRRegClass; 1026 break; 1027 case MVT::i16: 1028 if (Alignment && Alignment < 2 && !Subtarget->allowsUnalignedMem()) 1029 return false; 1030 1031 if (isThumb2) { 1032 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1033 Opc = isZExt ? ARM::t2LDRHi8 : ARM::t2LDRSHi8; 1034 else 1035 Opc = isZExt ? ARM::t2LDRHi12 : ARM::t2LDRSHi12; 1036 } else { 1037 Opc = isZExt ? ARM::LDRH : ARM::LDRSH; 1038 useAM3 = true; 1039 } 1040 RC = &ARM::GPRRegClass; 1041 break; 1042 case MVT::i32: 1043 if (Alignment && Alignment < 4 && !Subtarget->allowsUnalignedMem()) 1044 return false; 1045 1046 if (isThumb2) { 1047 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1048 Opc = ARM::t2LDRi8; 1049 else 1050 Opc = ARM::t2LDRi12; 1051 } else { 1052 Opc = ARM::LDRi12; 1053 } 1054 RC = &ARM::GPRRegClass; 1055 break; 1056 case MVT::f32: 1057 if (!Subtarget->hasVFP2()) return false; 1058 // Unaligned loads need special handling. Floats require word-alignment. 1059 if (Alignment && Alignment < 4) { 1060 needVMOV = true; 1061 VT = MVT::i32; 1062 Opc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12; 1063 RC = &ARM::GPRRegClass; 1064 } else { 1065 Opc = ARM::VLDRS; 1066 RC = TLI.getRegClassFor(VT); 1067 } 1068 break; 1069 case MVT::f64: 1070 if (!Subtarget->hasVFP2()) return false; 1071 // FIXME: Unaligned loads need special handling. Doublewords require 1072 // word-alignment. 1073 if (Alignment && Alignment < 4) 1074 return false; 1075 1076 Opc = ARM::VLDRD; 1077 RC = TLI.getRegClassFor(VT); 1078 break; 1079 } 1080 // Simplify this down to something we can handle. 1081 ARMSimplifyAddress(Addr, VT, useAM3); 1082 1083 // Create the base instruction, then add the operands. 1084 if (allocReg) 1085 ResultReg = createResultReg(RC); 1086 assert (ResultReg > 255 && "Expected an allocated virtual register."); 1087 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1088 TII.get(Opc), ResultReg); 1089 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOLoad, useAM3); 1090 1091 // If we had an unaligned load of a float we've converted it to an regular 1092 // load. Now we must move from the GRP to the FP register. 1093 if (needVMOV) { 1094 unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::f32)); 1095 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1096 TII.get(ARM::VMOVSR), MoveReg) 1097 .addReg(ResultReg)); 1098 ResultReg = MoveReg; 1099 } 1100 return true; 1101} 1102 1103bool ARMFastISel::SelectLoad(const Instruction *I) { 1104 // Atomic loads need special handling. 1105 if (cast<LoadInst>(I)->isAtomic()) 1106 return false; 1107 1108 // Verify we have a legal type before going any further. 1109 MVT VT; 1110 if (!isLoadTypeLegal(I->getType(), VT)) 1111 return false; 1112 1113 // See if we can handle this address. 1114 Address Addr; 1115 if (!ARMComputeAddress(I->getOperand(0), Addr)) return false; 1116 1117 unsigned ResultReg; 1118 if (!ARMEmitLoad(VT, ResultReg, Addr, cast<LoadInst>(I)->getAlignment())) 1119 return false; 1120 UpdateValueMap(I, ResultReg); 1121 return true; 1122} 1123 1124bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr, 1125 unsigned Alignment) { 1126 unsigned StrOpc; 1127 bool useAM3 = false; 1128 switch (VT.getSimpleVT().SimpleTy) { 1129 // This is mostly going to be Neon/vector support. 1130 default: return false; 1131 case MVT::i1: { 1132 unsigned Res = createResultReg(isThumb2 ? 1133 (const TargetRegisterClass*)&ARM::tGPRRegClass : 1134 (const TargetRegisterClass*)&ARM::GPRRegClass); 1135 unsigned Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri; 1136 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1137 TII.get(Opc), Res) 1138 .addReg(SrcReg).addImm(1)); 1139 SrcReg = Res; 1140 } // Fallthrough here. 1141 case MVT::i8: 1142 if (isThumb2) { 1143 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1144 StrOpc = ARM::t2STRBi8; 1145 else 1146 StrOpc = ARM::t2STRBi12; 1147 } else { 1148 StrOpc = ARM::STRBi12; 1149 } 1150 break; 1151 case MVT::i16: 1152 if (Alignment && Alignment < 2 && !Subtarget->allowsUnalignedMem()) 1153 return false; 1154 1155 if (isThumb2) { 1156 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1157 StrOpc = ARM::t2STRHi8; 1158 else 1159 StrOpc = ARM::t2STRHi12; 1160 } else { 1161 StrOpc = ARM::STRH; 1162 useAM3 = true; 1163 } 1164 break; 1165 case MVT::i32: 1166 if (Alignment && Alignment < 4 && !Subtarget->allowsUnalignedMem()) 1167 return false; 1168 1169 if (isThumb2) { 1170 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1171 StrOpc = ARM::t2STRi8; 1172 else 1173 StrOpc = ARM::t2STRi12; 1174 } else { 1175 StrOpc = ARM::STRi12; 1176 } 1177 break; 1178 case MVT::f32: 1179 if (!Subtarget->hasVFP2()) return false; 1180 // Unaligned stores need special handling. Floats require word-alignment. 1181 if (Alignment && Alignment < 4) { 1182 unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 1183 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1184 TII.get(ARM::VMOVRS), MoveReg) 1185 .addReg(SrcReg)); 1186 SrcReg = MoveReg; 1187 VT = MVT::i32; 1188 StrOpc = isThumb2 ? ARM::t2STRi12 : ARM::STRi12; 1189 } else { 1190 StrOpc = ARM::VSTRS; 1191 } 1192 break; 1193 case MVT::f64: 1194 if (!Subtarget->hasVFP2()) return false; 1195 // FIXME: Unaligned stores need special handling. Doublewords require 1196 // word-alignment. 1197 if (Alignment && Alignment < 4) 1198 return false; 1199 1200 StrOpc = ARM::VSTRD; 1201 break; 1202 } 1203 // Simplify this down to something we can handle. 1204 ARMSimplifyAddress(Addr, VT, useAM3); 1205 1206 // Create the base instruction, then add the operands. 1207 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1208 TII.get(StrOpc)) 1209 .addReg(SrcReg); 1210 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOStore, useAM3); 1211 return true; 1212} 1213 1214bool ARMFastISel::SelectStore(const Instruction *I) { 1215 Value *Op0 = I->getOperand(0); 1216 unsigned SrcReg = 0; 1217 1218 // Atomic stores need special handling. 1219 if (cast<StoreInst>(I)->isAtomic()) 1220 return false; 1221 1222 // Verify we have a legal type before going any further. 1223 MVT VT; 1224 if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT)) 1225 return false; 1226 1227 // Get the value to be stored into a register. 1228 SrcReg = getRegForValue(Op0); 1229 if (SrcReg == 0) return false; 1230 1231 // See if we can handle this address. 1232 Address Addr; 1233 if (!ARMComputeAddress(I->getOperand(1), Addr)) 1234 return false; 1235 1236 if (!ARMEmitStore(VT, SrcReg, Addr, cast<StoreInst>(I)->getAlignment())) 1237 return false; 1238 return true; 1239} 1240 1241static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred) { 1242 switch (Pred) { 1243 // Needs two compares... 1244 case CmpInst::FCMP_ONE: 1245 case CmpInst::FCMP_UEQ: 1246 default: 1247 // AL is our "false" for now. The other two need more compares. 1248 return ARMCC::AL; 1249 case CmpInst::ICMP_EQ: 1250 case CmpInst::FCMP_OEQ: 1251 return ARMCC::EQ; 1252 case CmpInst::ICMP_SGT: 1253 case CmpInst::FCMP_OGT: 1254 return ARMCC::GT; 1255 case CmpInst::ICMP_SGE: 1256 case CmpInst::FCMP_OGE: 1257 return ARMCC::GE; 1258 case CmpInst::ICMP_UGT: 1259 case CmpInst::FCMP_UGT: 1260 return ARMCC::HI; 1261 case CmpInst::FCMP_OLT: 1262 return ARMCC::MI; 1263 case CmpInst::ICMP_ULE: 1264 case CmpInst::FCMP_OLE: 1265 return ARMCC::LS; 1266 case CmpInst::FCMP_ORD: 1267 return ARMCC::VC; 1268 case CmpInst::FCMP_UNO: 1269 return ARMCC::VS; 1270 case CmpInst::FCMP_UGE: 1271 return ARMCC::PL; 1272 case CmpInst::ICMP_SLT: 1273 case CmpInst::FCMP_ULT: 1274 return ARMCC::LT; 1275 case CmpInst::ICMP_SLE: 1276 case CmpInst::FCMP_ULE: 1277 return ARMCC::LE; 1278 case CmpInst::FCMP_UNE: 1279 case CmpInst::ICMP_NE: 1280 return ARMCC::NE; 1281 case CmpInst::ICMP_UGE: 1282 return ARMCC::HS; 1283 case CmpInst::ICMP_ULT: 1284 return ARMCC::LO; 1285 } 1286} 1287 1288bool ARMFastISel::SelectBranch(const Instruction *I) { 1289 const BranchInst *BI = cast<BranchInst>(I); 1290 MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)]; 1291 MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)]; 1292 1293 // Simple branch support. 1294 1295 // If we can, avoid recomputing the compare - redoing it could lead to wonky 1296 // behavior. 1297 if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) { 1298 if (CI->hasOneUse() && (CI->getParent() == I->getParent())) { 1299 1300 // Get the compare predicate. 1301 // Try to take advantage of fallthrough opportunities. 1302 CmpInst::Predicate Predicate = CI->getPredicate(); 1303 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1304 std::swap(TBB, FBB); 1305 Predicate = CmpInst::getInversePredicate(Predicate); 1306 } 1307 1308 ARMCC::CondCodes ARMPred = getComparePred(Predicate); 1309 1310 // We may not handle every CC for now. 1311 if (ARMPred == ARMCC::AL) return false; 1312 1313 // Emit the compare. 1314 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) 1315 return false; 1316 1317 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1318 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1319 .addMBB(TBB).addImm(ARMPred).addReg(ARM::CPSR); 1320 FastEmitBranch(FBB, DL); 1321 FuncInfo.MBB->addSuccessor(TBB); 1322 return true; 1323 } 1324 } else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) { 1325 MVT SourceVT; 1326 if (TI->hasOneUse() && TI->getParent() == I->getParent() && 1327 (isLoadTypeLegal(TI->getOperand(0)->getType(), SourceVT))) { 1328 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; 1329 unsigned OpReg = getRegForValue(TI->getOperand(0)); 1330 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1331 TII.get(TstOpc)) 1332 .addReg(OpReg).addImm(1)); 1333 1334 unsigned CCMode = ARMCC::NE; 1335 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1336 std::swap(TBB, FBB); 1337 CCMode = ARMCC::EQ; 1338 } 1339 1340 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1341 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1342 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); 1343 1344 FastEmitBranch(FBB, DL); 1345 FuncInfo.MBB->addSuccessor(TBB); 1346 return true; 1347 } 1348 } else if (const ConstantInt *CI = 1349 dyn_cast<ConstantInt>(BI->getCondition())) { 1350 uint64_t Imm = CI->getZExtValue(); 1351 MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB; 1352 FastEmitBranch(Target, DL); 1353 return true; 1354 } 1355 1356 unsigned CmpReg = getRegForValue(BI->getCondition()); 1357 if (CmpReg == 0) return false; 1358 1359 // We've been divorced from our compare! Our block was split, and 1360 // now our compare lives in a predecessor block. We musn't 1361 // re-compare here, as the children of the compare aren't guaranteed 1362 // live across the block boundary (we *could* check for this). 1363 // Regardless, the compare has been done in the predecessor block, 1364 // and it left a value for us in a virtual register. Ergo, we test 1365 // the one-bit value left in the virtual register. 1366 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; 1367 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TstOpc)) 1368 .addReg(CmpReg).addImm(1)); 1369 1370 unsigned CCMode = ARMCC::NE; 1371 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1372 std::swap(TBB, FBB); 1373 CCMode = ARMCC::EQ; 1374 } 1375 1376 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1377 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1378 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); 1379 FastEmitBranch(FBB, DL); 1380 FuncInfo.MBB->addSuccessor(TBB); 1381 return true; 1382} 1383 1384bool ARMFastISel::SelectIndirectBr(const Instruction *I) { 1385 unsigned AddrReg = getRegForValue(I->getOperand(0)); 1386 if (AddrReg == 0) return false; 1387 1388 unsigned Opc = isThumb2 ? ARM::tBRIND : ARM::BX; 1389 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc)) 1390 .addReg(AddrReg)); 1391 return true; 1392} 1393 1394bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, 1395 bool isZExt) { 1396 Type *Ty = Src1Value->getType(); 1397 EVT SrcVT = TLI.getValueType(Ty, true); 1398 if (!SrcVT.isSimple()) return false; 1399 1400 bool isFloat = (Ty->isFloatTy() || Ty->isDoubleTy()); 1401 if (isFloat && !Subtarget->hasVFP2()) 1402 return false; 1403 1404 // Check to see if the 2nd operand is a constant that we can encode directly 1405 // in the compare. 1406 int Imm = 0; 1407 bool UseImm = false; 1408 bool isNegativeImm = false; 1409 // FIXME: At -O0 we don't have anything that canonicalizes operand order. 1410 // Thus, Src1Value may be a ConstantInt, but we're missing it. 1411 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(Src2Value)) { 1412 if (SrcVT == MVT::i32 || SrcVT == MVT::i16 || SrcVT == MVT::i8 || 1413 SrcVT == MVT::i1) { 1414 const APInt &CIVal = ConstInt->getValue(); 1415 Imm = (isZExt) ? (int)CIVal.getZExtValue() : (int)CIVal.getSExtValue(); 1416 // For INT_MIN/LONG_MIN (i.e., 0x80000000) we need to use a cmp, rather 1417 // then a cmn, because there is no way to represent 2147483648 as a 1418 // signed 32-bit int. 1419 if (Imm < 0 && Imm != (int)0x80000000) { 1420 isNegativeImm = true; 1421 Imm = -Imm; 1422 } 1423 UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 1424 (ARM_AM::getSOImmVal(Imm) != -1); 1425 } 1426 } else if (const ConstantFP *ConstFP = dyn_cast<ConstantFP>(Src2Value)) { 1427 if (SrcVT == MVT::f32 || SrcVT == MVT::f64) 1428 if (ConstFP->isZero() && !ConstFP->isNegative()) 1429 UseImm = true; 1430 } 1431 1432 unsigned CmpOpc; 1433 bool isICmp = true; 1434 bool needsExt = false; 1435 switch (SrcVT.getSimpleVT().SimpleTy) { 1436 default: return false; 1437 // TODO: Verify compares. 1438 case MVT::f32: 1439 isICmp = false; 1440 CmpOpc = UseImm ? ARM::VCMPEZS : ARM::VCMPES; 1441 break; 1442 case MVT::f64: 1443 isICmp = false; 1444 CmpOpc = UseImm ? ARM::VCMPEZD : ARM::VCMPED; 1445 break; 1446 case MVT::i1: 1447 case MVT::i8: 1448 case MVT::i16: 1449 needsExt = true; 1450 // Intentional fall-through. 1451 case MVT::i32: 1452 if (isThumb2) { 1453 if (!UseImm) 1454 CmpOpc = ARM::t2CMPrr; 1455 else 1456 CmpOpc = isNegativeImm ? ARM::t2CMNri : ARM::t2CMPri; 1457 } else { 1458 if (!UseImm) 1459 CmpOpc = ARM::CMPrr; 1460 else 1461 CmpOpc = isNegativeImm ? ARM::CMNri : ARM::CMPri; 1462 } 1463 break; 1464 } 1465 1466 unsigned SrcReg1 = getRegForValue(Src1Value); 1467 if (SrcReg1 == 0) return false; 1468 1469 unsigned SrcReg2 = 0; 1470 if (!UseImm) { 1471 SrcReg2 = getRegForValue(Src2Value); 1472 if (SrcReg2 == 0) return false; 1473 } 1474 1475 // We have i1, i8, or i16, we need to either zero extend or sign extend. 1476 if (needsExt) { 1477 SrcReg1 = ARMEmitIntExt(SrcVT, SrcReg1, MVT::i32, isZExt); 1478 if (SrcReg1 == 0) return false; 1479 if (!UseImm) { 1480 SrcReg2 = ARMEmitIntExt(SrcVT, SrcReg2, MVT::i32, isZExt); 1481 if (SrcReg2 == 0) return false; 1482 } 1483 } 1484 1485 if (!UseImm) { 1486 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1487 TII.get(CmpOpc)) 1488 .addReg(SrcReg1).addReg(SrcReg2)); 1489 } else { 1490 MachineInstrBuilder MIB; 1491 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1492 .addReg(SrcReg1); 1493 1494 // Only add immediate for icmp as the immediate for fcmp is an implicit 0.0. 1495 if (isICmp) 1496 MIB.addImm(Imm); 1497 AddOptionalDefs(MIB); 1498 } 1499 1500 // For floating point we need to move the result to a comparison register 1501 // that we can then use for branches. 1502 if (Ty->isFloatTy() || Ty->isDoubleTy()) 1503 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1504 TII.get(ARM::FMSTAT))); 1505 return true; 1506} 1507 1508bool ARMFastISel::SelectCmp(const Instruction *I) { 1509 const CmpInst *CI = cast<CmpInst>(I); 1510 1511 // Get the compare predicate. 1512 ARMCC::CondCodes ARMPred = getComparePred(CI->getPredicate()); 1513 1514 // We may not handle every CC for now. 1515 if (ARMPred == ARMCC::AL) return false; 1516 1517 // Emit the compare. 1518 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) 1519 return false; 1520 1521 // Now set a register based on the comparison. Explicitly set the predicates 1522 // here. 1523 unsigned MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi; 1524 const TargetRegisterClass *RC = isThumb2 ? 1525 (const TargetRegisterClass*)&ARM::rGPRRegClass : 1526 (const TargetRegisterClass*)&ARM::GPRRegClass; 1527 unsigned DestReg = createResultReg(RC); 1528 Constant *Zero = ConstantInt::get(Type::getInt32Ty(*Context), 0); 1529 unsigned ZeroReg = TargetMaterializeConstant(Zero); 1530 // ARMEmitCmp emits a FMSTAT when necessary, so it's always safe to use CPSR. 1531 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), DestReg) 1532 .addReg(ZeroReg).addImm(1) 1533 .addImm(ARMPred).addReg(ARM::CPSR); 1534 1535 UpdateValueMap(I, DestReg); 1536 return true; 1537} 1538 1539bool ARMFastISel::SelectFPExt(const Instruction *I) { 1540 // Make sure we have VFP and that we're extending float to double. 1541 if (!Subtarget->hasVFP2()) return false; 1542 1543 Value *V = I->getOperand(0); 1544 if (!I->getType()->isDoubleTy() || 1545 !V->getType()->isFloatTy()) return false; 1546 1547 unsigned Op = getRegForValue(V); 1548 if (Op == 0) return false; 1549 1550 unsigned Result = createResultReg(&ARM::DPRRegClass); 1551 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1552 TII.get(ARM::VCVTDS), Result) 1553 .addReg(Op)); 1554 UpdateValueMap(I, Result); 1555 return true; 1556} 1557 1558bool ARMFastISel::SelectFPTrunc(const Instruction *I) { 1559 // Make sure we have VFP and that we're truncating double to float. 1560 if (!Subtarget->hasVFP2()) return false; 1561 1562 Value *V = I->getOperand(0); 1563 if (!(I->getType()->isFloatTy() && 1564 V->getType()->isDoubleTy())) return false; 1565 1566 unsigned Op = getRegForValue(V); 1567 if (Op == 0) return false; 1568 1569 unsigned Result = createResultReg(&ARM::SPRRegClass); 1570 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1571 TII.get(ARM::VCVTSD), Result) 1572 .addReg(Op)); 1573 UpdateValueMap(I, Result); 1574 return true; 1575} 1576 1577bool ARMFastISel::SelectIToFP(const Instruction *I, bool isSigned) { 1578 // Make sure we have VFP. 1579 if (!Subtarget->hasVFP2()) return false; 1580 1581 MVT DstVT; 1582 Type *Ty = I->getType(); 1583 if (!isTypeLegal(Ty, DstVT)) 1584 return false; 1585 1586 Value *Src = I->getOperand(0); 1587 EVT SrcVT = TLI.getValueType(Src->getType(), true); 1588 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) 1589 return false; 1590 1591 unsigned SrcReg = getRegForValue(Src); 1592 if (SrcReg == 0) return false; 1593 1594 // Handle sign-extension. 1595 if (SrcVT == MVT::i16 || SrcVT == MVT::i8) { 1596 EVT DestVT = MVT::i32; 1597 SrcReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, 1598 /*isZExt*/!isSigned); 1599 if (SrcReg == 0) return false; 1600 } 1601 1602 // The conversion routine works on fp-reg to fp-reg and the operand above 1603 // was an integer, move it to the fp registers if possible. 1604 unsigned FP = ARMMoveToFPReg(MVT::f32, SrcReg); 1605 if (FP == 0) return false; 1606 1607 unsigned Opc; 1608 if (Ty->isFloatTy()) Opc = isSigned ? ARM::VSITOS : ARM::VUITOS; 1609 else if (Ty->isDoubleTy()) Opc = isSigned ? ARM::VSITOD : ARM::VUITOD; 1610 else return false; 1611 1612 unsigned ResultReg = createResultReg(TLI.getRegClassFor(DstVT)); 1613 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1614 ResultReg) 1615 .addReg(FP)); 1616 UpdateValueMap(I, ResultReg); 1617 return true; 1618} 1619 1620bool ARMFastISel::SelectFPToI(const Instruction *I, bool isSigned) { 1621 // Make sure we have VFP. 1622 if (!Subtarget->hasVFP2()) return false; 1623 1624 MVT DstVT; 1625 Type *RetTy = I->getType(); 1626 if (!isTypeLegal(RetTy, DstVT)) 1627 return false; 1628 1629 unsigned Op = getRegForValue(I->getOperand(0)); 1630 if (Op == 0) return false; 1631 1632 unsigned Opc; 1633 Type *OpTy = I->getOperand(0)->getType(); 1634 if (OpTy->isFloatTy()) Opc = isSigned ? ARM::VTOSIZS : ARM::VTOUIZS; 1635 else if (OpTy->isDoubleTy()) Opc = isSigned ? ARM::VTOSIZD : ARM::VTOUIZD; 1636 else return false; 1637 1638 // f64->s32/u32 or f32->s32/u32 both need an intermediate f32 reg. 1639 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32)); 1640 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1641 ResultReg) 1642 .addReg(Op)); 1643 1644 // This result needs to be in an integer register, but the conversion only 1645 // takes place in fp-regs. 1646 unsigned IntReg = ARMMoveToIntReg(DstVT, ResultReg); 1647 if (IntReg == 0) return false; 1648 1649 UpdateValueMap(I, IntReg); 1650 return true; 1651} 1652 1653bool ARMFastISel::SelectSelect(const Instruction *I) { 1654 MVT VT; 1655 if (!isTypeLegal(I->getType(), VT)) 1656 return false; 1657 1658 // Things need to be register sized for register moves. 1659 if (VT != MVT::i32) return false; 1660 const TargetRegisterClass *RC = TLI.getRegClassFor(VT); 1661 1662 unsigned CondReg = getRegForValue(I->getOperand(0)); 1663 if (CondReg == 0) return false; 1664 unsigned Op1Reg = getRegForValue(I->getOperand(1)); 1665 if (Op1Reg == 0) return false; 1666 1667 // Check to see if we can use an immediate in the conditional move. 1668 int Imm = 0; 1669 bool UseImm = false; 1670 bool isNegativeImm = false; 1671 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(I->getOperand(2))) { 1672 assert (VT == MVT::i32 && "Expecting an i32."); 1673 Imm = (int)ConstInt->getValue().getZExtValue(); 1674 if (Imm < 0) { 1675 isNegativeImm = true; 1676 Imm = ~Imm; 1677 } 1678 UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 1679 (ARM_AM::getSOImmVal(Imm) != -1); 1680 } 1681 1682 unsigned Op2Reg = 0; 1683 if (!UseImm) { 1684 Op2Reg = getRegForValue(I->getOperand(2)); 1685 if (Op2Reg == 0) return false; 1686 } 1687 1688 unsigned CmpOpc = isThumb2 ? ARM::t2CMPri : ARM::CMPri; 1689 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1690 .addReg(CondReg).addImm(0)); 1691 1692 unsigned MovCCOpc; 1693 if (!UseImm) { 1694 MovCCOpc = isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr; 1695 } else { 1696 if (!isNegativeImm) { 1697 MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi; 1698 } else { 1699 MovCCOpc = isThumb2 ? ARM::t2MVNCCi : ARM::MVNCCi; 1700 } 1701 } 1702 unsigned ResultReg = createResultReg(RC); 1703 if (!UseImm) 1704 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg) 1705 .addReg(Op2Reg).addReg(Op1Reg).addImm(ARMCC::NE).addReg(ARM::CPSR); 1706 else 1707 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg) 1708 .addReg(Op1Reg).addImm(Imm).addImm(ARMCC::EQ).addReg(ARM::CPSR); 1709 UpdateValueMap(I, ResultReg); 1710 return true; 1711} 1712 1713bool ARMFastISel::SelectDiv(const Instruction *I, bool isSigned) { 1714 MVT VT; 1715 Type *Ty = I->getType(); 1716 if (!isTypeLegal(Ty, VT)) 1717 return false; 1718 1719 // If we have integer div support we should have selected this automagically. 1720 // In case we have a real miss go ahead and return false and we'll pick 1721 // it up later. 1722 if (Subtarget->hasDivide()) return false; 1723 1724 // Otherwise emit a libcall. 1725 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1726 if (VT == MVT::i8) 1727 LC = isSigned ? RTLIB::SDIV_I8 : RTLIB::UDIV_I8; 1728 else if (VT == MVT::i16) 1729 LC = isSigned ? RTLIB::SDIV_I16 : RTLIB::UDIV_I16; 1730 else if (VT == MVT::i32) 1731 LC = isSigned ? RTLIB::SDIV_I32 : RTLIB::UDIV_I32; 1732 else if (VT == MVT::i64) 1733 LC = isSigned ? RTLIB::SDIV_I64 : RTLIB::UDIV_I64; 1734 else if (VT == MVT::i128) 1735 LC = isSigned ? RTLIB::SDIV_I128 : RTLIB::UDIV_I128; 1736 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SDIV!"); 1737 1738 return ARMEmitLibcall(I, LC); 1739} 1740 1741bool ARMFastISel::SelectRem(const Instruction *I, bool isSigned) { 1742 MVT VT; 1743 Type *Ty = I->getType(); 1744 if (!isTypeLegal(Ty, VT)) 1745 return false; 1746 1747 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1748 if (VT == MVT::i8) 1749 LC = isSigned ? RTLIB::SREM_I8 : RTLIB::UREM_I8; 1750 else if (VT == MVT::i16) 1751 LC = isSigned ? RTLIB::SREM_I16 : RTLIB::UREM_I16; 1752 else if (VT == MVT::i32) 1753 LC = isSigned ? RTLIB::SREM_I32 : RTLIB::UREM_I32; 1754 else if (VT == MVT::i64) 1755 LC = isSigned ? RTLIB::SREM_I64 : RTLIB::UREM_I64; 1756 else if (VT == MVT::i128) 1757 LC = isSigned ? RTLIB::SREM_I128 : RTLIB::UREM_I128; 1758 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SREM!"); 1759 1760 return ARMEmitLibcall(I, LC); 1761} 1762 1763bool ARMFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) { 1764 EVT DestVT = TLI.getValueType(I->getType(), true); 1765 1766 // We can get here in the case when we have a binary operation on a non-legal 1767 // type and the target independent selector doesn't know how to handle it. 1768 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1) 1769 return false; 1770 1771 unsigned Opc; 1772 switch (ISDOpcode) { 1773 default: return false; 1774 case ISD::ADD: 1775 Opc = isThumb2 ? ARM::t2ADDrr : ARM::ADDrr; 1776 break; 1777 case ISD::OR: 1778 Opc = isThumb2 ? ARM::t2ORRrr : ARM::ORRrr; 1779 break; 1780 case ISD::SUB: 1781 Opc = isThumb2 ? ARM::t2SUBrr : ARM::SUBrr; 1782 break; 1783 } 1784 1785 unsigned SrcReg1 = getRegForValue(I->getOperand(0)); 1786 if (SrcReg1 == 0) return false; 1787 1788 // TODO: Often the 2nd operand is an immediate, which can be encoded directly 1789 // in the instruction, rather then materializing the value in a register. 1790 unsigned SrcReg2 = getRegForValue(I->getOperand(1)); 1791 if (SrcReg2 == 0) return false; 1792 1793 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 1794 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1795 TII.get(Opc), ResultReg) 1796 .addReg(SrcReg1).addReg(SrcReg2)); 1797 UpdateValueMap(I, ResultReg); 1798 return true; 1799} 1800 1801bool ARMFastISel::SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode) { 1802 EVT VT = TLI.getValueType(I->getType(), true); 1803 1804 // We can get here in the case when we want to use NEON for our fp 1805 // operations, but can't figure out how to. Just use the vfp instructions 1806 // if we have them. 1807 // FIXME: It'd be nice to use NEON instructions. 1808 Type *Ty = I->getType(); 1809 bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy()); 1810 if (isFloat && !Subtarget->hasVFP2()) 1811 return false; 1812 1813 unsigned Opc; 1814 bool is64bit = VT == MVT::f64 || VT == MVT::i64; 1815 switch (ISDOpcode) { 1816 default: return false; 1817 case ISD::FADD: 1818 Opc = is64bit ? ARM::VADDD : ARM::VADDS; 1819 break; 1820 case ISD::FSUB: 1821 Opc = is64bit ? ARM::VSUBD : ARM::VSUBS; 1822 break; 1823 case ISD::FMUL: 1824 Opc = is64bit ? ARM::VMULD : ARM::VMULS; 1825 break; 1826 } 1827 unsigned Op1 = getRegForValue(I->getOperand(0)); 1828 if (Op1 == 0) return false; 1829 1830 unsigned Op2 = getRegForValue(I->getOperand(1)); 1831 if (Op2 == 0) return false; 1832 1833 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT)); 1834 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1835 TII.get(Opc), ResultReg) 1836 .addReg(Op1).addReg(Op2)); 1837 UpdateValueMap(I, ResultReg); 1838 return true; 1839} 1840 1841// Call Handling Code 1842 1843// This is largely taken directly from CCAssignFnForNode 1844// TODO: We may not support all of this. 1845CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC, 1846 bool Return, 1847 bool isVarArg) { 1848 switch (CC) { 1849 default: 1850 llvm_unreachable("Unsupported calling convention"); 1851 case CallingConv::Fast: 1852 if (Subtarget->hasVFP2() && !isVarArg) { 1853 if (!Subtarget->isAAPCS_ABI()) 1854 return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS); 1855 // For AAPCS ABI targets, just use VFP variant of the calling convention. 1856 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1857 } 1858 // Fallthrough 1859 case CallingConv::C: 1860 // Use target triple & subtarget features to do actual dispatch. 1861 if (Subtarget->isAAPCS_ABI()) { 1862 if (Subtarget->hasVFP2() && 1863 TM.Options.FloatABIType == FloatABI::Hard && !isVarArg) 1864 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1865 else 1866 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1867 } else 1868 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1869 case CallingConv::ARM_AAPCS_VFP: 1870 if (!isVarArg) 1871 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1872 // Fall through to soft float variant, variadic functions don't 1873 // use hard floating point ABI. 1874 case CallingConv::ARM_AAPCS: 1875 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1876 case CallingConv::ARM_APCS: 1877 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1878 case CallingConv::GHC: 1879 if (Return) 1880 llvm_unreachable("Can't return in GHC call convention"); 1881 else 1882 return CC_ARM_APCS_GHC; 1883 } 1884} 1885 1886bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args, 1887 SmallVectorImpl<unsigned> &ArgRegs, 1888 SmallVectorImpl<MVT> &ArgVTs, 1889 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 1890 SmallVectorImpl<unsigned> &RegArgs, 1891 CallingConv::ID CC, 1892 unsigned &NumBytes, 1893 bool isVarArg) { 1894 SmallVector<CCValAssign, 16> ArgLocs; 1895 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, ArgLocs, *Context); 1896 CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, 1897 CCAssignFnForCall(CC, false, isVarArg)); 1898 1899 // Check that we can handle all of the arguments. If we can't, then bail out 1900 // now before we add code to the MBB. 1901 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1902 CCValAssign &VA = ArgLocs[i]; 1903 MVT ArgVT = ArgVTs[VA.getValNo()]; 1904 1905 // We don't handle NEON/vector parameters yet. 1906 if (ArgVT.isVector() || ArgVT.getSizeInBits() > 64) 1907 return false; 1908 1909 // Now copy/store arg to correct locations. 1910 if (VA.isRegLoc() && !VA.needsCustom()) { 1911 continue; 1912 } else if (VA.needsCustom()) { 1913 // TODO: We need custom lowering for vector (v2f64) args. 1914 if (VA.getLocVT() != MVT::f64 || 1915 // TODO: Only handle register args for now. 1916 !VA.isRegLoc() || !ArgLocs[++i].isRegLoc()) 1917 return false; 1918 } else { 1919 switch (static_cast<EVT>(ArgVT).getSimpleVT().SimpleTy) { 1920 default: 1921 return false; 1922 case MVT::i1: 1923 case MVT::i8: 1924 case MVT::i16: 1925 case MVT::i32: 1926 break; 1927 case MVT::f32: 1928 if (!Subtarget->hasVFP2()) 1929 return false; 1930 break; 1931 case MVT::f64: 1932 if (!Subtarget->hasVFP2()) 1933 return false; 1934 break; 1935 } 1936 } 1937 } 1938 1939 // At the point, we are able to handle the call's arguments in fast isel. 1940 1941 // Get a count of how many bytes are to be pushed on the stack. 1942 NumBytes = CCInfo.getNextStackOffset(); 1943 1944 // Issue CALLSEQ_START 1945 unsigned AdjStackDown = TII.getCallFrameSetupOpcode(); 1946 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1947 TII.get(AdjStackDown)) 1948 .addImm(NumBytes)); 1949 1950 // Process the args. 1951 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1952 CCValAssign &VA = ArgLocs[i]; 1953 unsigned Arg = ArgRegs[VA.getValNo()]; 1954 MVT ArgVT = ArgVTs[VA.getValNo()]; 1955 1956 assert((!ArgVT.isVector() && ArgVT.getSizeInBits() <= 64) && 1957 "We don't handle NEON/vector parameters yet."); 1958 1959 // Handle arg promotion, etc. 1960 switch (VA.getLocInfo()) { 1961 case CCValAssign::Full: break; 1962 case CCValAssign::SExt: { 1963 MVT DestVT = VA.getLocVT(); 1964 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/false); 1965 assert (Arg != 0 && "Failed to emit a sext"); 1966 ArgVT = DestVT; 1967 break; 1968 } 1969 case CCValAssign::AExt: 1970 // Intentional fall-through. Handle AExt and ZExt. 1971 case CCValAssign::ZExt: { 1972 MVT DestVT = VA.getLocVT(); 1973 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/true); 1974 assert (Arg != 0 && "Failed to emit a sext"); 1975 ArgVT = DestVT; 1976 break; 1977 } 1978 case CCValAssign::BCvt: { 1979 unsigned BC = FastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg, 1980 /*TODO: Kill=*/false); 1981 assert(BC != 0 && "Failed to emit a bitcast!"); 1982 Arg = BC; 1983 ArgVT = VA.getLocVT(); 1984 break; 1985 } 1986 default: llvm_unreachable("Unknown arg promotion!"); 1987 } 1988 1989 // Now copy/store arg to correct locations. 1990 if (VA.isRegLoc() && !VA.needsCustom()) { 1991 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1992 VA.getLocReg()) 1993 .addReg(Arg); 1994 RegArgs.push_back(VA.getLocReg()); 1995 } else if (VA.needsCustom()) { 1996 // TODO: We need custom lowering for vector (v2f64) args. 1997 assert(VA.getLocVT() == MVT::f64 && 1998 "Custom lowering for v2f64 args not available"); 1999 2000 CCValAssign &NextVA = ArgLocs[++i]; 2001 2002 assert(VA.isRegLoc() && NextVA.isRegLoc() && 2003 "We only handle register args!"); 2004 2005 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2006 TII.get(ARM::VMOVRRD), VA.getLocReg()) 2007 .addReg(NextVA.getLocReg(), RegState::Define) 2008 .addReg(Arg)); 2009 RegArgs.push_back(VA.getLocReg()); 2010 RegArgs.push_back(NextVA.getLocReg()); 2011 } else { 2012 assert(VA.isMemLoc()); 2013 // Need to store on the stack. 2014 Address Addr; 2015 Addr.BaseType = Address::RegBase; 2016 Addr.Base.Reg = ARM::SP; 2017 Addr.Offset = VA.getLocMemOffset(); 2018 2019 bool EmitRet = ARMEmitStore(ArgVT, Arg, Addr); (void)EmitRet; 2020 assert(EmitRet && "Could not emit a store for argument!"); 2021 } 2022 } 2023 2024 return true; 2025} 2026 2027bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 2028 const Instruction *I, CallingConv::ID CC, 2029 unsigned &NumBytes, bool isVarArg) { 2030 // Issue CALLSEQ_END 2031 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); 2032 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2033 TII.get(AdjStackUp)) 2034 .addImm(NumBytes).addImm(0)); 2035 2036 // Now the return value. 2037 if (RetVT != MVT::isVoid) { 2038 SmallVector<CCValAssign, 16> RVLocs; 2039 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, RVLocs, *Context); 2040 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, isVarArg)); 2041 2042 // Copy all of the result registers out of their specified physreg. 2043 if (RVLocs.size() == 2 && RetVT == MVT::f64) { 2044 // For this move we copy into two registers and then move into the 2045 // double fp reg we want. 2046 EVT DestVT = RVLocs[0].getValVT(); 2047 const TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT); 2048 unsigned ResultReg = createResultReg(DstRC); 2049 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2050 TII.get(ARM::VMOVDRR), ResultReg) 2051 .addReg(RVLocs[0].getLocReg()) 2052 .addReg(RVLocs[1].getLocReg())); 2053 2054 UsedRegs.push_back(RVLocs[0].getLocReg()); 2055 UsedRegs.push_back(RVLocs[1].getLocReg()); 2056 2057 // Finally update the result. 2058 UpdateValueMap(I, ResultReg); 2059 } else { 2060 assert(RVLocs.size() == 1 &&"Can't handle non-double multi-reg retvals!"); 2061 EVT CopyVT = RVLocs[0].getValVT(); 2062 2063 // Special handling for extended integers. 2064 if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16) 2065 CopyVT = MVT::i32; 2066 2067 const TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT); 2068 2069 unsigned ResultReg = createResultReg(DstRC); 2070 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 2071 ResultReg).addReg(RVLocs[0].getLocReg()); 2072 UsedRegs.push_back(RVLocs[0].getLocReg()); 2073 2074 // Finally update the result. 2075 UpdateValueMap(I, ResultReg); 2076 } 2077 } 2078 2079 return true; 2080} 2081 2082bool ARMFastISel::SelectRet(const Instruction *I) { 2083 const ReturnInst *Ret = cast<ReturnInst>(I); 2084 const Function &F = *I->getParent()->getParent(); 2085 2086 if (!FuncInfo.CanLowerReturn) 2087 return false; 2088 2089 CallingConv::ID CC = F.getCallingConv(); 2090 if (Ret->getNumOperands() > 0) { 2091 SmallVector<ISD::OutputArg, 4> Outs; 2092 GetReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(), 2093 Outs, TLI); 2094 2095 // Analyze operands of the call, assigning locations to each operand. 2096 SmallVector<CCValAssign, 16> ValLocs; 2097 CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, TM, ValLocs,I->getContext()); 2098 CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC, true /* is Ret */, 2099 F.isVarArg())); 2100 2101 const Value *RV = Ret->getOperand(0); 2102 unsigned Reg = getRegForValue(RV); 2103 if (Reg == 0) 2104 return false; 2105 2106 // Only handle a single return value for now. 2107 if (ValLocs.size() != 1) 2108 return false; 2109 2110 CCValAssign &VA = ValLocs[0]; 2111 2112 // Don't bother handling odd stuff for now. 2113 if (VA.getLocInfo() != CCValAssign::Full) 2114 return false; 2115 // Only handle register returns for now. 2116 if (!VA.isRegLoc()) 2117 return false; 2118 2119 unsigned SrcReg = Reg + VA.getValNo(); 2120 EVT RVVT = TLI.getValueType(RV->getType()); 2121 EVT DestVT = VA.getValVT(); 2122 // Special handling for extended integers. 2123 if (RVVT != DestVT) { 2124 if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16) 2125 return false; 2126 2127 assert(DestVT == MVT::i32 && "ARM should always ext to i32"); 2128 2129 // Perform extension if flagged as either zext or sext. Otherwise, do 2130 // nothing. 2131 if (Outs[0].Flags.isZExt() || Outs[0].Flags.isSExt()) { 2132 SrcReg = ARMEmitIntExt(RVVT, SrcReg, DestVT, Outs[0].Flags.isZExt()); 2133 if (SrcReg == 0) return false; 2134 } 2135 } 2136 2137 // Make the copy. 2138 unsigned DstReg = VA.getLocReg(); 2139 const TargetRegisterClass* SrcRC = MRI.getRegClass(SrcReg); 2140 // Avoid a cross-class copy. This is very unlikely. 2141 if (!SrcRC->contains(DstReg)) 2142 return false; 2143 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 2144 DstReg).addReg(SrcReg); 2145 2146 // Mark the register as live out of the function. 2147 MRI.addLiveOut(VA.getLocReg()); 2148 } 2149 2150 unsigned RetOpc = isThumb2 ? ARM::tBX_RET : ARM::BX_RET; 2151 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2152 TII.get(RetOpc))); 2153 return true; 2154} 2155 2156unsigned ARMFastISel::ARMSelectCallOp(bool UseReg) { 2157 if (UseReg) 2158 return isThumb2 ? ARM::tBLXr : ARM::BLX; 2159 else 2160 return isThumb2 ? ARM::tBL : ARM::BL; 2161} 2162 2163unsigned ARMFastISel::getLibcallReg(const Twine &Name) { 2164 GlobalValue *GV = new GlobalVariable(Type::getInt32Ty(*Context), false, 2165 GlobalValue::ExternalLinkage, 0, Name); 2166 return ARMMaterializeGV(GV, TLI.getValueType(GV->getType())); 2167} 2168 2169// A quick function that will emit a call for a named libcall in F with the 2170// vector of passed arguments for the Instruction in I. We can assume that we 2171// can emit a call for any libcall we can produce. This is an abridged version 2172// of the full call infrastructure since we won't need to worry about things 2173// like computed function pointers or strange arguments at call sites. 2174// TODO: Try to unify this and the normal call bits for ARM, then try to unify 2175// with X86. 2176bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) { 2177 CallingConv::ID CC = TLI.getLibcallCallingConv(Call); 2178 2179 // Handle *simple* calls for now. 2180 Type *RetTy = I->getType(); 2181 MVT RetVT; 2182 if (RetTy->isVoidTy()) 2183 RetVT = MVT::isVoid; 2184 else if (!isTypeLegal(RetTy, RetVT)) 2185 return false; 2186 2187 // Can't handle non-double multi-reg retvals. 2188 if (RetVT != MVT::isVoid && RetVT != MVT::i32) { 2189 SmallVector<CCValAssign, 16> RVLocs; 2190 CCState CCInfo(CC, false, *FuncInfo.MF, TM, RVLocs, *Context); 2191 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, false)); 2192 if (RVLocs.size() >= 2 && RetVT != MVT::f64) 2193 return false; 2194 } 2195 2196 // Set up the argument vectors. 2197 SmallVector<Value*, 8> Args; 2198 SmallVector<unsigned, 8> ArgRegs; 2199 SmallVector<MVT, 8> ArgVTs; 2200 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 2201 Args.reserve(I->getNumOperands()); 2202 ArgRegs.reserve(I->getNumOperands()); 2203 ArgVTs.reserve(I->getNumOperands()); 2204 ArgFlags.reserve(I->getNumOperands()); 2205 for (unsigned i = 0; i < I->getNumOperands(); ++i) { 2206 Value *Op = I->getOperand(i); 2207 unsigned Arg = getRegForValue(Op); 2208 if (Arg == 0) return false; 2209 2210 Type *ArgTy = Op->getType(); 2211 MVT ArgVT; 2212 if (!isTypeLegal(ArgTy, ArgVT)) return false; 2213 2214 ISD::ArgFlagsTy Flags; 2215 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 2216 Flags.setOrigAlign(OriginalAlignment); 2217 2218 Args.push_back(Op); 2219 ArgRegs.push_back(Arg); 2220 ArgVTs.push_back(ArgVT); 2221 ArgFlags.push_back(Flags); 2222 } 2223 2224 // Handle the arguments now that we've gotten them. 2225 SmallVector<unsigned, 4> RegArgs; 2226 unsigned NumBytes; 2227 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, 2228 RegArgs, CC, NumBytes, false)) 2229 return false; 2230 2231 unsigned CalleeReg = 0; 2232 if (EnableARMLongCalls) { 2233 CalleeReg = getLibcallReg(TLI.getLibcallName(Call)); 2234 if (CalleeReg == 0) return false; 2235 } 2236 2237 // Issue the call. 2238 unsigned CallOpc = ARMSelectCallOp(EnableARMLongCalls); 2239 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 2240 DL, TII.get(CallOpc)); 2241 // BL / BLX don't take a predicate, but tBL / tBLX do. 2242 if (isThumb2) 2243 AddDefaultPred(MIB); 2244 if (EnableARMLongCalls) 2245 MIB.addReg(CalleeReg); 2246 else 2247 MIB.addExternalSymbol(TLI.getLibcallName(Call)); 2248 2249 // Add implicit physical register uses to the call. 2250 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 2251 MIB.addReg(RegArgs[i], RegState::Implicit); 2252 2253 // Add a register mask with the call-preserved registers. 2254 // Proper defs for return values will be added by setPhysRegsDeadExcept(). 2255 MIB.addRegMask(TRI.getCallPreservedMask(CC)); 2256 2257 // Finish off the call including any return values. 2258 SmallVector<unsigned, 4> UsedRegs; 2259 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes, false)) return false; 2260 2261 // Set all unused physreg defs as dead. 2262 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 2263 2264 return true; 2265} 2266 2267bool ARMFastISel::SelectCall(const Instruction *I, 2268 const char *IntrMemName = 0) { 2269 const CallInst *CI = cast<CallInst>(I); 2270 const Value *Callee = CI->getCalledValue(); 2271 2272 // Can't handle inline asm. 2273 if (isa<InlineAsm>(Callee)) return false; 2274 2275 // Check the calling convention. 2276 ImmutableCallSite CS(CI); 2277 CallingConv::ID CC = CS.getCallingConv(); 2278 2279 // TODO: Avoid some calling conventions? 2280 2281 PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType()); 2282 FunctionType *FTy = cast<FunctionType>(PT->getElementType()); 2283 bool isVarArg = FTy->isVarArg(); 2284 2285 // Handle *simple* calls for now. 2286 Type *RetTy = I->getType(); 2287 MVT RetVT; 2288 if (RetTy->isVoidTy()) 2289 RetVT = MVT::isVoid; 2290 else if (!isTypeLegal(RetTy, RetVT) && RetVT != MVT::i16 && 2291 RetVT != MVT::i8 && RetVT != MVT::i1) 2292 return false; 2293 2294 // Can't handle non-double multi-reg retvals. 2295 if (RetVT != MVT::isVoid && RetVT != MVT::i1 && RetVT != MVT::i8 && 2296 RetVT != MVT::i16 && RetVT != MVT::i32) { 2297 SmallVector<CCValAssign, 16> RVLocs; 2298 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, RVLocs, *Context); 2299 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, isVarArg)); 2300 if (RVLocs.size() >= 2 && RetVT != MVT::f64) 2301 return false; 2302 } 2303 2304 // Set up the argument vectors. 2305 SmallVector<Value*, 8> Args; 2306 SmallVector<unsigned, 8> ArgRegs; 2307 SmallVector<MVT, 8> ArgVTs; 2308 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 2309 unsigned arg_size = CS.arg_size(); 2310 Args.reserve(arg_size); 2311 ArgRegs.reserve(arg_size); 2312 ArgVTs.reserve(arg_size); 2313 ArgFlags.reserve(arg_size); 2314 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end(); 2315 i != e; ++i) { 2316 // If we're lowering a memory intrinsic instead of a regular call, skip the 2317 // last two arguments, which shouldn't be passed to the underlying function. 2318 if (IntrMemName && e-i <= 2) 2319 break; 2320 2321 ISD::ArgFlagsTy Flags; 2322 unsigned AttrInd = i - CS.arg_begin() + 1; 2323 if (CS.paramHasSExtAttr(AttrInd)) 2324 Flags.setSExt(); 2325 if (CS.paramHasZExtAttr(AttrInd)) 2326 Flags.setZExt(); 2327 2328 // FIXME: Only handle *easy* calls for now. 2329 if (CS.paramHasInRegAttr(AttrInd) || 2330 CS.paramHasStructRetAttr(AttrInd) || 2331 CS.paramHasNestAttr(AttrInd) || 2332 CS.paramHasByValAttr(AttrInd)) 2333 return false; 2334 2335 Type *ArgTy = (*i)->getType(); 2336 MVT ArgVT; 2337 if (!isTypeLegal(ArgTy, ArgVT) && ArgVT != MVT::i16 && ArgVT != MVT::i8 && 2338 ArgVT != MVT::i1) 2339 return false; 2340 2341 unsigned Arg = getRegForValue(*i); 2342 if (Arg == 0) 2343 return false; 2344 2345 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 2346 Flags.setOrigAlign(OriginalAlignment); 2347 2348 Args.push_back(*i); 2349 ArgRegs.push_back(Arg); 2350 ArgVTs.push_back(ArgVT); 2351 ArgFlags.push_back(Flags); 2352 } 2353 2354 // Handle the arguments now that we've gotten them. 2355 SmallVector<unsigned, 4> RegArgs; 2356 unsigned NumBytes; 2357 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, 2358 RegArgs, CC, NumBytes, isVarArg)) 2359 return false; 2360 2361 bool UseReg = false; 2362 const GlobalValue *GV = dyn_cast<GlobalValue>(Callee); 2363 if (!GV || EnableARMLongCalls) UseReg = true; 2364 2365 unsigned CalleeReg = 0; 2366 if (UseReg) { 2367 if (IntrMemName) 2368 CalleeReg = getLibcallReg(IntrMemName); 2369 else 2370 CalleeReg = getRegForValue(Callee); 2371 2372 if (CalleeReg == 0) return false; 2373 } 2374 2375 // Issue the call. 2376 unsigned CallOpc = ARMSelectCallOp(UseReg); 2377 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 2378 DL, TII.get(CallOpc)); 2379 2380 // ARM calls don't take a predicate, but tBL / tBLX do. 2381 if(isThumb2) 2382 AddDefaultPred(MIB); 2383 if (UseReg) 2384 MIB.addReg(CalleeReg); 2385 else if (!IntrMemName) 2386 MIB.addGlobalAddress(GV, 0, 0); 2387 else 2388 MIB.addExternalSymbol(IntrMemName, 0); 2389 2390 // Add implicit physical register uses to the call. 2391 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 2392 MIB.addReg(RegArgs[i], RegState::Implicit); 2393 2394 // Add a register mask with the call-preserved registers. 2395 // Proper defs for return values will be added by setPhysRegsDeadExcept(). 2396 MIB.addRegMask(TRI.getCallPreservedMask(CC)); 2397 2398 // Finish off the call including any return values. 2399 SmallVector<unsigned, 4> UsedRegs; 2400 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes, isVarArg)) 2401 return false; 2402 2403 // Set all unused physreg defs as dead. 2404 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 2405 2406 return true; 2407} 2408 2409bool ARMFastISel::ARMIsMemCpySmall(uint64_t Len) { 2410 return Len <= 16; 2411} 2412 2413bool ARMFastISel::ARMTryEmitSmallMemCpy(Address Dest, Address Src, 2414 uint64_t Len) { 2415 // Make sure we don't bloat code by inlining very large memcpy's. 2416 if (!ARMIsMemCpySmall(Len)) 2417 return false; 2418 2419 // We don't care about alignment here since we just emit integer accesses. 2420 while (Len) { 2421 MVT VT; 2422 if (Len >= 4) 2423 VT = MVT::i32; 2424 else if (Len >= 2) 2425 VT = MVT::i16; 2426 else { 2427 assert(Len == 1); 2428 VT = MVT::i8; 2429 } 2430 2431 bool RV; 2432 unsigned ResultReg; 2433 RV = ARMEmitLoad(VT, ResultReg, Src); 2434 assert (RV == true && "Should be able to handle this load."); 2435 RV = ARMEmitStore(VT, ResultReg, Dest); 2436 assert (RV == true && "Should be able to handle this store."); 2437 (void)RV; 2438 2439 unsigned Size = VT.getSizeInBits()/8; 2440 Len -= Size; 2441 Dest.Offset += Size; 2442 Src.Offset += Size; 2443 } 2444 2445 return true; 2446} 2447 2448bool ARMFastISel::SelectIntrinsicCall(const IntrinsicInst &I) { 2449 // FIXME: Handle more intrinsics. 2450 switch (I.getIntrinsicID()) { 2451 default: return false; 2452 case Intrinsic::frameaddress: { 2453 MachineFrameInfo *MFI = FuncInfo.MF->getFrameInfo(); 2454 MFI->setFrameAddressIsTaken(true); 2455 2456 unsigned LdrOpc; 2457 const TargetRegisterClass *RC; 2458 if (isThumb2) { 2459 LdrOpc = ARM::t2LDRi12; 2460 RC = (const TargetRegisterClass*)&ARM::tGPRRegClass; 2461 } else { 2462 LdrOpc = ARM::LDRi12; 2463 RC = (const TargetRegisterClass*)&ARM::GPRRegClass; 2464 } 2465 2466 const ARMBaseRegisterInfo *RegInfo = 2467 static_cast<const ARMBaseRegisterInfo*>(TM.getRegisterInfo()); 2468 unsigned FramePtr = RegInfo->getFrameRegister(*(FuncInfo.MF)); 2469 unsigned SrcReg = FramePtr; 2470 2471 // Recursively load frame address 2472 // ldr r0 [fp] 2473 // ldr r0 [r0] 2474 // ldr r0 [r0] 2475 // ... 2476 unsigned DestReg; 2477 unsigned Depth = cast<ConstantInt>(I.getOperand(0))->getZExtValue(); 2478 while (Depth--) { 2479 DestReg = createResultReg(RC); 2480 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2481 TII.get(LdrOpc), DestReg) 2482 .addReg(SrcReg).addImm(0)); 2483 SrcReg = DestReg; 2484 } 2485 UpdateValueMap(&I, SrcReg); 2486 return true; 2487 } 2488 case Intrinsic::memcpy: 2489 case Intrinsic::memmove: { 2490 const MemTransferInst &MTI = cast<MemTransferInst>(I); 2491 // Don't handle volatile. 2492 if (MTI.isVolatile()) 2493 return false; 2494 2495 // Disable inlining for memmove before calls to ComputeAddress. Otherwise, 2496 // we would emit dead code because we don't currently handle memmoves. 2497 bool isMemCpy = (I.getIntrinsicID() == Intrinsic::memcpy); 2498 if (isa<ConstantInt>(MTI.getLength()) && isMemCpy) { 2499 // Small memcpy's are common enough that we want to do them without a call 2500 // if possible. 2501 uint64_t Len = cast<ConstantInt>(MTI.getLength())->getZExtValue(); 2502 if (ARMIsMemCpySmall(Len)) { 2503 Address Dest, Src; 2504 if (!ARMComputeAddress(MTI.getRawDest(), Dest) || 2505 !ARMComputeAddress(MTI.getRawSource(), Src)) 2506 return false; 2507 if (ARMTryEmitSmallMemCpy(Dest, Src, Len)) 2508 return true; 2509 } 2510 } 2511 2512 if (!MTI.getLength()->getType()->isIntegerTy(32)) 2513 return false; 2514 2515 if (MTI.getSourceAddressSpace() > 255 || MTI.getDestAddressSpace() > 255) 2516 return false; 2517 2518 const char *IntrMemName = isa<MemCpyInst>(I) ? "memcpy" : "memmove"; 2519 return SelectCall(&I, IntrMemName); 2520 } 2521 case Intrinsic::memset: { 2522 const MemSetInst &MSI = cast<MemSetInst>(I); 2523 // Don't handle volatile. 2524 if (MSI.isVolatile()) 2525 return false; 2526 2527 if (!MSI.getLength()->getType()->isIntegerTy(32)) 2528 return false; 2529 2530 if (MSI.getDestAddressSpace() > 255) 2531 return false; 2532 2533 return SelectCall(&I, "memset"); 2534 } 2535 case Intrinsic::trap: { 2536 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::TRAP)); 2537 return true; 2538 } 2539 } 2540} 2541 2542bool ARMFastISel::SelectTrunc(const Instruction *I) { 2543 // The high bits for a type smaller than the register size are assumed to be 2544 // undefined. 2545 Value *Op = I->getOperand(0); 2546 2547 EVT SrcVT, DestVT; 2548 SrcVT = TLI.getValueType(Op->getType(), true); 2549 DestVT = TLI.getValueType(I->getType(), true); 2550 2551 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) 2552 return false; 2553 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1) 2554 return false; 2555 2556 unsigned SrcReg = getRegForValue(Op); 2557 if (!SrcReg) return false; 2558 2559 // Because the high bits are undefined, a truncate doesn't generate 2560 // any code. 2561 UpdateValueMap(I, SrcReg); 2562 return true; 2563} 2564 2565unsigned ARMFastISel::ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT, 2566 bool isZExt) { 2567 if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8) 2568 return 0; 2569 2570 unsigned Opc; 2571 bool isBoolZext = false; 2572 if (!SrcVT.isSimple()) return 0; 2573 switch (SrcVT.getSimpleVT().SimpleTy) { 2574 default: return 0; 2575 case MVT::i16: 2576 if (!Subtarget->hasV6Ops()) return 0; 2577 if (isZExt) 2578 Opc = isThumb2 ? ARM::t2UXTH : ARM::UXTH; 2579 else 2580 Opc = isThumb2 ? ARM::t2SXTH : ARM::SXTH; 2581 break; 2582 case MVT::i8: 2583 if (!Subtarget->hasV6Ops()) return 0; 2584 if (isZExt) 2585 Opc = isThumb2 ? ARM::t2UXTB : ARM::UXTB; 2586 else 2587 Opc = isThumb2 ? ARM::t2SXTB : ARM::SXTB; 2588 break; 2589 case MVT::i1: 2590 if (isZExt) { 2591 Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri; 2592 isBoolZext = true; 2593 break; 2594 } 2595 return 0; 2596 } 2597 2598 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 2599 MachineInstrBuilder MIB; 2600 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg) 2601 .addReg(SrcReg); 2602 if (isBoolZext) 2603 MIB.addImm(1); 2604 else 2605 MIB.addImm(0); 2606 AddOptionalDefs(MIB); 2607 return ResultReg; 2608} 2609 2610bool ARMFastISel::SelectIntExt(const Instruction *I) { 2611 // On ARM, in general, integer casts don't involve legal types; this code 2612 // handles promotable integers. 2613 Type *DestTy = I->getType(); 2614 Value *Src = I->getOperand(0); 2615 Type *SrcTy = Src->getType(); 2616 2617 EVT SrcVT, DestVT; 2618 SrcVT = TLI.getValueType(SrcTy, true); 2619 DestVT = TLI.getValueType(DestTy, true); 2620 2621 bool isZExt = isa<ZExtInst>(I); 2622 unsigned SrcReg = getRegForValue(Src); 2623 if (!SrcReg) return false; 2624 2625 unsigned ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, isZExt); 2626 if (ResultReg == 0) return false; 2627 UpdateValueMap(I, ResultReg); 2628 return true; 2629} 2630 2631bool ARMFastISel::SelectShift(const Instruction *I, 2632 ARM_AM::ShiftOpc ShiftTy) { 2633 // We handle thumb2 mode by target independent selector 2634 // or SelectionDAG ISel. 2635 if (isThumb2) 2636 return false; 2637 2638 // Only handle i32 now. 2639 EVT DestVT = TLI.getValueType(I->getType(), true); 2640 if (DestVT != MVT::i32) 2641 return false; 2642 2643 unsigned Opc = ARM::MOVsr; 2644 unsigned ShiftImm; 2645 Value *Src2Value = I->getOperand(1); 2646 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Src2Value)) { 2647 ShiftImm = CI->getZExtValue(); 2648 2649 // Fall back to selection DAG isel if the shift amount 2650 // is zero or greater than the width of the value type. 2651 if (ShiftImm == 0 || ShiftImm >=32) 2652 return false; 2653 2654 Opc = ARM::MOVsi; 2655 } 2656 2657 Value *Src1Value = I->getOperand(0); 2658 unsigned Reg1 = getRegForValue(Src1Value); 2659 if (Reg1 == 0) return false; 2660 2661 unsigned Reg2 = 0; 2662 if (Opc == ARM::MOVsr) { 2663 Reg2 = getRegForValue(Src2Value); 2664 if (Reg2 == 0) return false; 2665 } 2666 2667 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 2668 if(ResultReg == 0) return false; 2669 2670 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2671 TII.get(Opc), ResultReg) 2672 .addReg(Reg1); 2673 2674 if (Opc == ARM::MOVsi) 2675 MIB.addImm(ARM_AM::getSORegOpc(ShiftTy, ShiftImm)); 2676 else if (Opc == ARM::MOVsr) { 2677 MIB.addReg(Reg2); 2678 MIB.addImm(ARM_AM::getSORegOpc(ShiftTy, 0)); 2679 } 2680 2681 AddOptionalDefs(MIB); 2682 UpdateValueMap(I, ResultReg); 2683 return true; 2684} 2685 2686// TODO: SoftFP support. 2687bool ARMFastISel::TargetSelectInstruction(const Instruction *I) { 2688 2689 switch (I->getOpcode()) { 2690 case Instruction::Load: 2691 return SelectLoad(I); 2692 case Instruction::Store: 2693 return SelectStore(I); 2694 case Instruction::Br: 2695 return SelectBranch(I); 2696 case Instruction::IndirectBr: 2697 return SelectIndirectBr(I); 2698 case Instruction::ICmp: 2699 case Instruction::FCmp: 2700 return SelectCmp(I); 2701 case Instruction::FPExt: 2702 return SelectFPExt(I); 2703 case Instruction::FPTrunc: 2704 return SelectFPTrunc(I); 2705 case Instruction::SIToFP: 2706 return SelectIToFP(I, /*isSigned*/ true); 2707 case Instruction::UIToFP: 2708 return SelectIToFP(I, /*isSigned*/ false); 2709 case Instruction::FPToSI: 2710 return SelectFPToI(I, /*isSigned*/ true); 2711 case Instruction::FPToUI: 2712 return SelectFPToI(I, /*isSigned*/ false); 2713 case Instruction::Add: 2714 return SelectBinaryIntOp(I, ISD::ADD); 2715 case Instruction::Or: 2716 return SelectBinaryIntOp(I, ISD::OR); 2717 case Instruction::Sub: 2718 return SelectBinaryIntOp(I, ISD::SUB); 2719 case Instruction::FAdd: 2720 return SelectBinaryFPOp(I, ISD::FADD); 2721 case Instruction::FSub: 2722 return SelectBinaryFPOp(I, ISD::FSUB); 2723 case Instruction::FMul: 2724 return SelectBinaryFPOp(I, ISD::FMUL); 2725 case Instruction::SDiv: 2726 return SelectDiv(I, /*isSigned*/ true); 2727 case Instruction::UDiv: 2728 return SelectDiv(I, /*isSigned*/ false); 2729 case Instruction::SRem: 2730 return SelectRem(I, /*isSigned*/ true); 2731 case Instruction::URem: 2732 return SelectRem(I, /*isSigned*/ false); 2733 case Instruction::Call: 2734 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) 2735 return SelectIntrinsicCall(*II); 2736 return SelectCall(I); 2737 case Instruction::Select: 2738 return SelectSelect(I); 2739 case Instruction::Ret: 2740 return SelectRet(I); 2741 case Instruction::Trunc: 2742 return SelectTrunc(I); 2743 case Instruction::ZExt: 2744 case Instruction::SExt: 2745 return SelectIntExt(I); 2746 case Instruction::Shl: 2747 return SelectShift(I, ARM_AM::lsl); 2748 case Instruction::LShr: 2749 return SelectShift(I, ARM_AM::lsr); 2750 case Instruction::AShr: 2751 return SelectShift(I, ARM_AM::asr); 2752 default: break; 2753 } 2754 return false; 2755} 2756 2757/// TryToFoldLoad - The specified machine instr operand is a vreg, and that 2758/// vreg is being provided by the specified load instruction. If possible, 2759/// try to fold the load as an operand to the instruction, returning true if 2760/// successful. 2761bool ARMFastISel::TryToFoldLoad(MachineInstr *MI, unsigned OpNo, 2762 const LoadInst *LI) { 2763 // Verify we have a legal type before going any further. 2764 MVT VT; 2765 if (!isLoadTypeLegal(LI->getType(), VT)) 2766 return false; 2767 2768 // Combine load followed by zero- or sign-extend. 2769 // ldrb r1, [r0] ldrb r1, [r0] 2770 // uxtb r2, r1 => 2771 // mov r3, r2 mov r3, r1 2772 bool isZExt = true; 2773 switch(MI->getOpcode()) { 2774 default: return false; 2775 case ARM::SXTH: 2776 case ARM::t2SXTH: 2777 isZExt = false; 2778 case ARM::UXTH: 2779 case ARM::t2UXTH: 2780 if (VT != MVT::i16) 2781 return false; 2782 break; 2783 case ARM::SXTB: 2784 case ARM::t2SXTB: 2785 isZExt = false; 2786 case ARM::UXTB: 2787 case ARM::t2UXTB: 2788 if (VT != MVT::i8) 2789 return false; 2790 break; 2791 } 2792 // See if we can handle this address. 2793 Address Addr; 2794 if (!ARMComputeAddress(LI->getOperand(0), Addr)) return false; 2795 2796 unsigned ResultReg = MI->getOperand(0).getReg(); 2797 if (!ARMEmitLoad(VT, ResultReg, Addr, LI->getAlignment(), isZExt, false)) 2798 return false; 2799 MI->eraseFromParent(); 2800 return true; 2801} 2802 2803unsigned ARMFastISel::ARMLowerPICELF(const GlobalValue *GV, 2804 unsigned Align, EVT VT) { 2805 bool UseGOTOFF = GV->hasLocalLinkage() || GV->hasHiddenVisibility(); 2806 ARMConstantPoolConstant *CPV = 2807 ARMConstantPoolConstant::Create(GV, UseGOTOFF ? ARMCP::GOTOFF : ARMCP::GOT); 2808 unsigned Idx = MCP.getConstantPoolIndex(CPV, Align); 2809 2810 unsigned Opc; 2811 unsigned DestReg1 = createResultReg(TLI.getRegClassFor(VT)); 2812 // Load value. 2813 if (isThumb2) { 2814 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2815 TII.get(ARM::t2LDRpci), DestReg1) 2816 .addConstantPoolIndex(Idx)); 2817 Opc = UseGOTOFF ? ARM::t2ADDrr : ARM::t2LDRs; 2818 } else { 2819 // The extra immediate is for addrmode2. 2820 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 2821 DL, TII.get(ARM::LDRcp), DestReg1) 2822 .addConstantPoolIndex(Idx).addImm(0)); 2823 Opc = UseGOTOFF ? ARM::ADDrr : ARM::LDRrs; 2824 } 2825 2826 unsigned GlobalBaseReg = AFI->getGlobalBaseReg(); 2827 if (GlobalBaseReg == 0) { 2828 GlobalBaseReg = MRI.createVirtualRegister(TLI.getRegClassFor(VT)); 2829 AFI->setGlobalBaseReg(GlobalBaseReg); 2830 } 2831 2832 unsigned DestReg2 = createResultReg(TLI.getRegClassFor(VT)); 2833 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 2834 DL, TII.get(Opc), DestReg2) 2835 .addReg(DestReg1) 2836 .addReg(GlobalBaseReg); 2837 if (!UseGOTOFF) 2838 MIB.addImm(0); 2839 AddOptionalDefs(MIB); 2840 2841 return DestReg2; 2842} 2843 2844namespace llvm { 2845 FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo, 2846 const TargetLibraryInfo *libInfo) { 2847 // Completely untested on non-iOS. 2848 const TargetMachine &TM = funcInfo.MF->getTarget(); 2849 2850 // Darwin and thumb1 only for now. 2851 const ARMSubtarget *Subtarget = &TM.getSubtarget<ARMSubtarget>(); 2852 if (Subtarget->isTargetIOS() && !Subtarget->isThumb1Only()) 2853 return new ARMFastISel(funcInfo, libInfo); 2854 return 0; 2855 } 2856} 2857