ARMFastISel.cpp revision 8b62abdd7b9c8fc5d78dad86093f4afdfeba949d
1//===-- ARMFastISel.cpp - ARM FastISel implementation ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the ARM-specific support for the FastISel class. Some 11// of the target-specific code is generated by tablegen in the file 12// ARMGenFastISel.inc, which is #included here. 13// 14//===----------------------------------------------------------------------===// 15 16#include "ARM.h" 17#include "ARMBaseInstrInfo.h" 18#include "ARMCallingConv.h" 19#include "ARMConstantPoolValue.h" 20#include "ARMSubtarget.h" 21#include "ARMTargetMachine.h" 22#include "MCTargetDesc/ARMAddressingModes.h" 23#include "llvm/CallingConv.h" 24#include "llvm/CodeGen/Analysis.h" 25#include "llvm/CodeGen/FastISel.h" 26#include "llvm/CodeGen/FunctionLoweringInfo.h" 27#include "llvm/CodeGen/MachineConstantPool.h" 28#include "llvm/CodeGen/MachineFrameInfo.h" 29#include "llvm/CodeGen/MachineInstrBuilder.h" 30#include "llvm/CodeGen/MachineMemOperand.h" 31#include "llvm/CodeGen/MachineModuleInfo.h" 32#include "llvm/CodeGen/MachineRegisterInfo.h" 33#include "llvm/DataLayout.h" 34#include "llvm/DerivedTypes.h" 35#include "llvm/GlobalVariable.h" 36#include "llvm/Instructions.h" 37#include "llvm/IntrinsicInst.h" 38#include "llvm/Module.h" 39#include "llvm/Operator.h" 40#include "llvm/Support/CallSite.h" 41#include "llvm/Support/CommandLine.h" 42#include "llvm/Support/ErrorHandling.h" 43#include "llvm/Support/GetElementPtrTypeIterator.h" 44#include "llvm/Target/TargetInstrInfo.h" 45#include "llvm/Target/TargetLowering.h" 46#include "llvm/Target/TargetMachine.h" 47#include "llvm/Target/TargetOptions.h" 48using namespace llvm; 49 50extern cl::opt<bool> EnableARMLongCalls; 51 52namespace { 53 54 // All possible address modes, plus some. 55 typedef struct Address { 56 enum { 57 RegBase, 58 FrameIndexBase 59 } BaseType; 60 61 union { 62 unsigned Reg; 63 int FI; 64 } Base; 65 66 int Offset; 67 68 // Innocuous defaults for our address. 69 Address() 70 : BaseType(RegBase), Offset(0) { 71 Base.Reg = 0; 72 } 73 } Address; 74 75class ARMFastISel : public FastISel { 76 77 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can 78 /// make the right decision when generating code for different targets. 79 const ARMSubtarget *Subtarget; 80 const TargetMachine &TM; 81 const TargetInstrInfo &TII; 82 const TargetLowering &TLI; 83 ARMFunctionInfo *AFI; 84 85 // Convenience variables to avoid some queries. 86 bool isThumb2; 87 LLVMContext *Context; 88 89 public: 90 explicit ARMFastISel(FunctionLoweringInfo &funcInfo, 91 const TargetLibraryInfo *libInfo) 92 : FastISel(funcInfo, libInfo), 93 TM(funcInfo.MF->getTarget()), 94 TII(*TM.getInstrInfo()), 95 TLI(*TM.getTargetLowering()) { 96 Subtarget = &TM.getSubtarget<ARMSubtarget>(); 97 AFI = funcInfo.MF->getInfo<ARMFunctionInfo>(); 98 isThumb2 = AFI->isThumbFunction(); 99 Context = &funcInfo.Fn->getContext(); 100 } 101 102 // Code from FastISel.cpp. 103 private: 104 unsigned FastEmitInst_(unsigned MachineInstOpcode, 105 const TargetRegisterClass *RC); 106 unsigned FastEmitInst_r(unsigned MachineInstOpcode, 107 const TargetRegisterClass *RC, 108 unsigned Op0, bool Op0IsKill); 109 unsigned FastEmitInst_rr(unsigned MachineInstOpcode, 110 const TargetRegisterClass *RC, 111 unsigned Op0, bool Op0IsKill, 112 unsigned Op1, bool Op1IsKill); 113 unsigned FastEmitInst_rrr(unsigned MachineInstOpcode, 114 const TargetRegisterClass *RC, 115 unsigned Op0, bool Op0IsKill, 116 unsigned Op1, bool Op1IsKill, 117 unsigned Op2, bool Op2IsKill); 118 unsigned FastEmitInst_ri(unsigned MachineInstOpcode, 119 const TargetRegisterClass *RC, 120 unsigned Op0, bool Op0IsKill, 121 uint64_t Imm); 122 unsigned FastEmitInst_rf(unsigned MachineInstOpcode, 123 const TargetRegisterClass *RC, 124 unsigned Op0, bool Op0IsKill, 125 const ConstantFP *FPImm); 126 unsigned FastEmitInst_rri(unsigned MachineInstOpcode, 127 const TargetRegisterClass *RC, 128 unsigned Op0, bool Op0IsKill, 129 unsigned Op1, bool Op1IsKill, 130 uint64_t Imm); 131 unsigned FastEmitInst_i(unsigned MachineInstOpcode, 132 const TargetRegisterClass *RC, 133 uint64_t Imm); 134 unsigned FastEmitInst_ii(unsigned MachineInstOpcode, 135 const TargetRegisterClass *RC, 136 uint64_t Imm1, uint64_t Imm2); 137 138 unsigned FastEmitInst_extractsubreg(MVT RetVT, 139 unsigned Op0, bool Op0IsKill, 140 uint32_t Idx); 141 142 // Backend specific FastISel code. 143 private: 144 virtual bool TargetSelectInstruction(const Instruction *I); 145 virtual unsigned TargetMaterializeConstant(const Constant *C); 146 virtual unsigned TargetMaterializeAlloca(const AllocaInst *AI); 147 virtual bool TryToFoldLoad(MachineInstr *MI, unsigned OpNo, 148 const LoadInst *LI); 149 private: 150 #include "ARMGenFastISel.inc" 151 152 // Instruction selection routines. 153 private: 154 bool SelectLoad(const Instruction *I); 155 bool SelectStore(const Instruction *I); 156 bool SelectBranch(const Instruction *I); 157 bool SelectIndirectBr(const Instruction *I); 158 bool SelectCmp(const Instruction *I); 159 bool SelectFPExt(const Instruction *I); 160 bool SelectFPTrunc(const Instruction *I); 161 bool SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode); 162 bool SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode); 163 bool SelectIToFP(const Instruction *I, bool isSigned); 164 bool SelectFPToI(const Instruction *I, bool isSigned); 165 bool SelectDiv(const Instruction *I, bool isSigned); 166 bool SelectRem(const Instruction *I, bool isSigned); 167 bool SelectCall(const Instruction *I, const char *IntrMemName); 168 bool SelectIntrinsicCall(const IntrinsicInst &I); 169 bool SelectSelect(const Instruction *I); 170 bool SelectRet(const Instruction *I); 171 bool SelectTrunc(const Instruction *I); 172 bool SelectIntExt(const Instruction *I); 173 bool SelectShift(const Instruction *I, ARM_AM::ShiftOpc ShiftTy); 174 175 // Utility routines. 176 private: 177 bool isTypeLegal(Type *Ty, MVT &VT); 178 bool isLoadTypeLegal(Type *Ty, MVT &VT); 179 bool ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, 180 bool isZExt); 181 bool ARMEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr, 182 unsigned Alignment = 0, bool isZExt = true, 183 bool allocReg = true); 184 bool ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr, 185 unsigned Alignment = 0); 186 bool ARMComputeAddress(const Value *Obj, Address &Addr); 187 void ARMSimplifyAddress(Address &Addr, MVT VT, bool useAM3); 188 bool ARMIsMemCpySmall(uint64_t Len); 189 bool ARMTryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len, 190 unsigned Alignment); 191 unsigned ARMEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, bool isZExt); 192 unsigned ARMMaterializeFP(const ConstantFP *CFP, MVT VT); 193 unsigned ARMMaterializeInt(const Constant *C, MVT VT); 194 unsigned ARMMaterializeGV(const GlobalValue *GV, MVT VT); 195 unsigned ARMMoveToFPReg(MVT VT, unsigned SrcReg); 196 unsigned ARMMoveToIntReg(MVT VT, unsigned SrcReg); 197 unsigned ARMSelectCallOp(bool UseReg); 198 unsigned ARMLowerPICELF(const GlobalValue *GV, unsigned Align, MVT VT); 199 200 // Call handling routines. 201 private: 202 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, 203 bool Return, 204 bool isVarArg); 205 bool ProcessCallArgs(SmallVectorImpl<Value*> &Args, 206 SmallVectorImpl<unsigned> &ArgRegs, 207 SmallVectorImpl<MVT> &ArgVTs, 208 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 209 SmallVectorImpl<unsigned> &RegArgs, 210 CallingConv::ID CC, 211 unsigned &NumBytes, 212 bool isVarArg); 213 unsigned getLibcallReg(const Twine &Name); 214 bool FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 215 const Instruction *I, CallingConv::ID CC, 216 unsigned &NumBytes, bool isVarArg); 217 bool ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call); 218 219 // OptionalDef handling routines. 220 private: 221 bool isARMNEONPred(const MachineInstr *MI); 222 bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR); 223 const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB); 224 void AddLoadStoreOperands(MVT VT, Address &Addr, 225 const MachineInstrBuilder &MIB, 226 unsigned Flags, bool useAM3); 227}; 228 229} // end anonymous namespace 230 231#include "ARMGenCallingConv.inc" 232 233// DefinesOptionalPredicate - This is different from DefinesPredicate in that 234// we don't care about implicit defs here, just places we'll need to add a 235// default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR. 236bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) { 237 if (!MI->hasOptionalDef()) 238 return false; 239 240 // Look to see if our OptionalDef is defining CPSR or CCR. 241 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 242 const MachineOperand &MO = MI->getOperand(i); 243 if (!MO.isReg() || !MO.isDef()) continue; 244 if (MO.getReg() == ARM::CPSR) 245 *CPSR = true; 246 } 247 return true; 248} 249 250bool ARMFastISel::isARMNEONPred(const MachineInstr *MI) { 251 const MCInstrDesc &MCID = MI->getDesc(); 252 253 // If we're a thumb2 or not NEON function we were handled via isPredicable. 254 if ((MCID.TSFlags & ARMII::DomainMask) != ARMII::DomainNEON || 255 AFI->isThumb2Function()) 256 return false; 257 258 for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) 259 if (MCID.OpInfo[i].isPredicate()) 260 return true; 261 262 return false; 263} 264 265// If the machine is predicable go ahead and add the predicate operands, if 266// it needs default CC operands add those. 267// TODO: If we want to support thumb1 then we'll need to deal with optional 268// CPSR defs that need to be added before the remaining operands. See s_cc_out 269// for descriptions why. 270const MachineInstrBuilder & 271ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) { 272 MachineInstr *MI = &*MIB; 273 274 // Do we use a predicate? or... 275 // Are we NEON in ARM mode and have a predicate operand? If so, I know 276 // we're not predicable but add it anyways. 277 if (TII.isPredicable(MI) || isARMNEONPred(MI)) 278 AddDefaultPred(MIB); 279 280 // Do we optionally set a predicate? Preds is size > 0 iff the predicate 281 // defines CPSR. All other OptionalDefines in ARM are the CCR register. 282 bool CPSR = false; 283 if (DefinesOptionalPredicate(MI, &CPSR)) { 284 if (CPSR) 285 AddDefaultT1CC(MIB); 286 else 287 AddDefaultCC(MIB); 288 } 289 return MIB; 290} 291 292unsigned ARMFastISel::FastEmitInst_(unsigned MachineInstOpcode, 293 const TargetRegisterClass* RC) { 294 unsigned ResultReg = createResultReg(RC); 295 const MCInstrDesc &II = TII.get(MachineInstOpcode); 296 297 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)); 298 return ResultReg; 299} 300 301unsigned ARMFastISel::FastEmitInst_r(unsigned MachineInstOpcode, 302 const TargetRegisterClass *RC, 303 unsigned Op0, bool Op0IsKill) { 304 unsigned ResultReg = createResultReg(RC); 305 const MCInstrDesc &II = TII.get(MachineInstOpcode); 306 307 if (II.getNumDefs() >= 1) { 308 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 309 .addReg(Op0, Op0IsKill * RegState::Kill)); 310 } else { 311 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 312 .addReg(Op0, Op0IsKill * RegState::Kill)); 313 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 314 TII.get(TargetOpcode::COPY), ResultReg) 315 .addReg(II.ImplicitDefs[0])); 316 } 317 return ResultReg; 318} 319 320unsigned ARMFastISel::FastEmitInst_rr(unsigned MachineInstOpcode, 321 const TargetRegisterClass *RC, 322 unsigned Op0, bool Op0IsKill, 323 unsigned Op1, bool Op1IsKill) { 324 unsigned ResultReg = createResultReg(RC); 325 const MCInstrDesc &II = TII.get(MachineInstOpcode); 326 327 if (II.getNumDefs() >= 1) { 328 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 329 .addReg(Op0, Op0IsKill * RegState::Kill) 330 .addReg(Op1, Op1IsKill * RegState::Kill)); 331 } else { 332 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 333 .addReg(Op0, Op0IsKill * RegState::Kill) 334 .addReg(Op1, Op1IsKill * RegState::Kill)); 335 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 336 TII.get(TargetOpcode::COPY), ResultReg) 337 .addReg(II.ImplicitDefs[0])); 338 } 339 return ResultReg; 340} 341 342unsigned ARMFastISel::FastEmitInst_rrr(unsigned MachineInstOpcode, 343 const TargetRegisterClass *RC, 344 unsigned Op0, bool Op0IsKill, 345 unsigned Op1, bool Op1IsKill, 346 unsigned Op2, bool Op2IsKill) { 347 unsigned ResultReg = createResultReg(RC); 348 const MCInstrDesc &II = TII.get(MachineInstOpcode); 349 350 if (II.getNumDefs() >= 1) { 351 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 352 .addReg(Op0, Op0IsKill * RegState::Kill) 353 .addReg(Op1, Op1IsKill * RegState::Kill) 354 .addReg(Op2, Op2IsKill * RegState::Kill)); 355 } else { 356 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 357 .addReg(Op0, Op0IsKill * RegState::Kill) 358 .addReg(Op1, Op1IsKill * RegState::Kill) 359 .addReg(Op2, Op2IsKill * RegState::Kill)); 360 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 361 TII.get(TargetOpcode::COPY), ResultReg) 362 .addReg(II.ImplicitDefs[0])); 363 } 364 return ResultReg; 365} 366 367unsigned ARMFastISel::FastEmitInst_ri(unsigned MachineInstOpcode, 368 const TargetRegisterClass *RC, 369 unsigned Op0, bool Op0IsKill, 370 uint64_t Imm) { 371 unsigned ResultReg = createResultReg(RC); 372 const MCInstrDesc &II = TII.get(MachineInstOpcode); 373 374 if (II.getNumDefs() >= 1) { 375 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 376 .addReg(Op0, Op0IsKill * RegState::Kill) 377 .addImm(Imm)); 378 } else { 379 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 380 .addReg(Op0, Op0IsKill * RegState::Kill) 381 .addImm(Imm)); 382 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 383 TII.get(TargetOpcode::COPY), ResultReg) 384 .addReg(II.ImplicitDefs[0])); 385 } 386 return ResultReg; 387} 388 389unsigned ARMFastISel::FastEmitInst_rf(unsigned MachineInstOpcode, 390 const TargetRegisterClass *RC, 391 unsigned Op0, bool Op0IsKill, 392 const ConstantFP *FPImm) { 393 unsigned ResultReg = createResultReg(RC); 394 const MCInstrDesc &II = TII.get(MachineInstOpcode); 395 396 if (II.getNumDefs() >= 1) { 397 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 398 .addReg(Op0, Op0IsKill * RegState::Kill) 399 .addFPImm(FPImm)); 400 } else { 401 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 402 .addReg(Op0, Op0IsKill * RegState::Kill) 403 .addFPImm(FPImm)); 404 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 405 TII.get(TargetOpcode::COPY), ResultReg) 406 .addReg(II.ImplicitDefs[0])); 407 } 408 return ResultReg; 409} 410 411unsigned ARMFastISel::FastEmitInst_rri(unsigned MachineInstOpcode, 412 const TargetRegisterClass *RC, 413 unsigned Op0, bool Op0IsKill, 414 unsigned Op1, bool Op1IsKill, 415 uint64_t Imm) { 416 unsigned ResultReg = createResultReg(RC); 417 const MCInstrDesc &II = TII.get(MachineInstOpcode); 418 419 if (II.getNumDefs() >= 1) { 420 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 421 .addReg(Op0, Op0IsKill * RegState::Kill) 422 .addReg(Op1, Op1IsKill * RegState::Kill) 423 .addImm(Imm)); 424 } else { 425 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 426 .addReg(Op0, Op0IsKill * RegState::Kill) 427 .addReg(Op1, Op1IsKill * RegState::Kill) 428 .addImm(Imm)); 429 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 430 TII.get(TargetOpcode::COPY), ResultReg) 431 .addReg(II.ImplicitDefs[0])); 432 } 433 return ResultReg; 434} 435 436unsigned ARMFastISel::FastEmitInst_i(unsigned MachineInstOpcode, 437 const TargetRegisterClass *RC, 438 uint64_t Imm) { 439 unsigned ResultReg = createResultReg(RC); 440 const MCInstrDesc &II = TII.get(MachineInstOpcode); 441 442 if (II.getNumDefs() >= 1) { 443 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 444 .addImm(Imm)); 445 } else { 446 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 447 .addImm(Imm)); 448 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 449 TII.get(TargetOpcode::COPY), ResultReg) 450 .addReg(II.ImplicitDefs[0])); 451 } 452 return ResultReg; 453} 454 455unsigned ARMFastISel::FastEmitInst_ii(unsigned MachineInstOpcode, 456 const TargetRegisterClass *RC, 457 uint64_t Imm1, uint64_t Imm2) { 458 unsigned ResultReg = createResultReg(RC); 459 const MCInstrDesc &II = TII.get(MachineInstOpcode); 460 461 if (II.getNumDefs() >= 1) { 462 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 463 .addImm(Imm1).addImm(Imm2)); 464 } else { 465 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 466 .addImm(Imm1).addImm(Imm2)); 467 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 468 TII.get(TargetOpcode::COPY), 469 ResultReg) 470 .addReg(II.ImplicitDefs[0])); 471 } 472 return ResultReg; 473} 474 475unsigned ARMFastISel::FastEmitInst_extractsubreg(MVT RetVT, 476 unsigned Op0, bool Op0IsKill, 477 uint32_t Idx) { 478 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT)); 479 assert(TargetRegisterInfo::isVirtualRegister(Op0) && 480 "Cannot yet extract from physregs"); 481 482 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 483 DL, TII.get(TargetOpcode::COPY), ResultReg) 484 .addReg(Op0, getKillRegState(Op0IsKill), Idx)); 485 return ResultReg; 486} 487 488// TODO: Don't worry about 64-bit now, but when this is fixed remove the 489// checks from the various callers. 490unsigned ARMFastISel::ARMMoveToFPReg(MVT VT, unsigned SrcReg) { 491 if (VT == MVT::f64) return 0; 492 493 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 494 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 495 TII.get(ARM::VMOVSR), MoveReg) 496 .addReg(SrcReg)); 497 return MoveReg; 498} 499 500unsigned ARMFastISel::ARMMoveToIntReg(MVT VT, unsigned SrcReg) { 501 if (VT == MVT::i64) return 0; 502 503 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 504 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 505 TII.get(ARM::VMOVRS), MoveReg) 506 .addReg(SrcReg)); 507 return MoveReg; 508} 509 510// For double width floating point we need to materialize two constants 511// (the high and the low) into integer registers then use a move to get 512// the combined constant into an FP reg. 513unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, MVT VT) { 514 const APFloat Val = CFP->getValueAPF(); 515 bool is64bit = VT == MVT::f64; 516 517 // This checks to see if we can use VFP3 instructions to materialize 518 // a constant, otherwise we have to go through the constant pool. 519 if (TLI.isFPImmLegal(Val, VT)) { 520 int Imm; 521 unsigned Opc; 522 if (is64bit) { 523 Imm = ARM_AM::getFP64Imm(Val); 524 Opc = ARM::FCONSTD; 525 } else { 526 Imm = ARM_AM::getFP32Imm(Val); 527 Opc = ARM::FCONSTS; 528 } 529 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 530 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 531 DestReg) 532 .addImm(Imm)); 533 return DestReg; 534 } 535 536 // Require VFP2 for loading fp constants. 537 if (!Subtarget->hasVFP2()) return false; 538 539 // MachineConstantPool wants an explicit alignment. 540 unsigned Align = TD.getPrefTypeAlignment(CFP->getType()); 541 if (Align == 0) { 542 // TODO: Figure out if this is correct. 543 Align = TD.getTypeAllocSize(CFP->getType()); 544 } 545 unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align); 546 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 547 unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS; 548 549 // The extra reg is for addrmode5. 550 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 551 DestReg) 552 .addConstantPoolIndex(Idx) 553 .addReg(0)); 554 return DestReg; 555} 556 557unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, MVT VT) { 558 559 if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1) 560 return false; 561 562 // If we can do this in a single instruction without a constant pool entry 563 // do so now. 564 const ConstantInt *CI = cast<ConstantInt>(C); 565 if (Subtarget->hasV6T2Ops() && isUInt<16>(CI->getZExtValue())) { 566 unsigned Opc = isThumb2 ? ARM::t2MOVi16 : ARM::MOVi16; 567 const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass : 568 &ARM::GPRRegClass; 569 unsigned ImmReg = createResultReg(RC); 570 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 571 TII.get(Opc), ImmReg) 572 .addImm(CI->getZExtValue())); 573 return ImmReg; 574 } 575 576 // Use MVN to emit negative constants. 577 if (VT == MVT::i32 && Subtarget->hasV6T2Ops() && CI->isNegative()) { 578 unsigned Imm = (unsigned)~(CI->getSExtValue()); 579 bool UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 580 (ARM_AM::getSOImmVal(Imm) != -1); 581 if (UseImm) { 582 unsigned Opc = isThumb2 ? ARM::t2MVNi : ARM::MVNi; 583 unsigned ImmReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 584 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 585 TII.get(Opc), ImmReg) 586 .addImm(Imm)); 587 return ImmReg; 588 } 589 } 590 591 // Load from constant pool. For now 32-bit only. 592 if (VT != MVT::i32) 593 return false; 594 595 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 596 597 // MachineConstantPool wants an explicit alignment. 598 unsigned Align = TD.getPrefTypeAlignment(C->getType()); 599 if (Align == 0) { 600 // TODO: Figure out if this is correct. 601 Align = TD.getTypeAllocSize(C->getType()); 602 } 603 unsigned Idx = MCP.getConstantPoolIndex(C, Align); 604 605 if (isThumb2) 606 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 607 TII.get(ARM::t2LDRpci), DestReg) 608 .addConstantPoolIndex(Idx)); 609 else 610 // The extra immediate is for addrmode2. 611 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 612 TII.get(ARM::LDRcp), DestReg) 613 .addConstantPoolIndex(Idx) 614 .addImm(0)); 615 616 return DestReg; 617} 618 619unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, MVT VT) { 620 // For now 32-bit only. 621 if (VT != MVT::i32) return 0; 622 623 Reloc::Model RelocM = TM.getRelocationModel(); 624 bool IsIndirect = Subtarget->GVIsIndirectSymbol(GV, RelocM); 625 const TargetRegisterClass *RC = isThumb2 ? 626 (const TargetRegisterClass*)&ARM::rGPRRegClass : 627 (const TargetRegisterClass*)&ARM::GPRRegClass; 628 unsigned DestReg = createResultReg(RC); 629 630 // Use movw+movt when possible, it avoids constant pool entries. 631 // Darwin targets don't support movt with Reloc::Static, see 632 // ARMTargetLowering::LowerGlobalAddressDarwin. Other targets only support 633 // static movt relocations. 634 if (Subtarget->useMovt() && 635 Subtarget->isTargetDarwin() == (RelocM != Reloc::Static)) { 636 unsigned Opc; 637 switch (RelocM) { 638 case Reloc::PIC_: 639 Opc = isThumb2 ? ARM::t2MOV_ga_pcrel : ARM::MOV_ga_pcrel; 640 break; 641 case Reloc::DynamicNoPIC: 642 Opc = isThumb2 ? ARM::t2MOV_ga_dyn : ARM::MOV_ga_dyn; 643 break; 644 default: 645 Opc = isThumb2 ? ARM::t2MOVi32imm : ARM::MOVi32imm; 646 break; 647 } 648 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 649 DestReg).addGlobalAddress(GV)); 650 } else { 651 // MachineConstantPool wants an explicit alignment. 652 unsigned Align = TD.getPrefTypeAlignment(GV->getType()); 653 if (Align == 0) { 654 // TODO: Figure out if this is correct. 655 Align = TD.getTypeAllocSize(GV->getType()); 656 } 657 658 if (Subtarget->isTargetELF() && RelocM == Reloc::PIC_) 659 return ARMLowerPICELF(GV, Align, VT); 660 661 // Grab index. 662 unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : 663 (Subtarget->isThumb() ? 4 : 8); 664 unsigned Id = AFI->createPICLabelUId(); 665 ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create(GV, Id, 666 ARMCP::CPValue, 667 PCAdj); 668 unsigned Idx = MCP.getConstantPoolIndex(CPV, Align); 669 670 // Load value. 671 MachineInstrBuilder MIB; 672 if (isThumb2) { 673 unsigned Opc = (RelocM!=Reloc::PIC_) ? ARM::t2LDRpci : ARM::t2LDRpci_pic; 674 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg) 675 .addConstantPoolIndex(Idx); 676 if (RelocM == Reloc::PIC_) 677 MIB.addImm(Id); 678 AddOptionalDefs(MIB); 679 } else { 680 // The extra immediate is for addrmode2. 681 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRcp), 682 DestReg) 683 .addConstantPoolIndex(Idx) 684 .addImm(0); 685 AddOptionalDefs(MIB); 686 687 if (RelocM == Reloc::PIC_) { 688 unsigned Opc = IsIndirect ? ARM::PICLDR : ARM::PICADD; 689 unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT)); 690 691 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 692 DL, TII.get(Opc), NewDestReg) 693 .addReg(DestReg) 694 .addImm(Id); 695 AddOptionalDefs(MIB); 696 return NewDestReg; 697 } 698 } 699 } 700 701 if (IsIndirect) { 702 MachineInstrBuilder MIB; 703 unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT)); 704 if (isThumb2) 705 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 706 TII.get(ARM::t2LDRi12), NewDestReg) 707 .addReg(DestReg) 708 .addImm(0); 709 else 710 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRi12), 711 NewDestReg) 712 .addReg(DestReg) 713 .addImm(0); 714 DestReg = NewDestReg; 715 AddOptionalDefs(MIB); 716 } 717 718 return DestReg; 719} 720 721unsigned ARMFastISel::TargetMaterializeConstant(const Constant *C) { 722 EVT CEVT = TLI.getValueType(C->getType(), true); 723 724 // Only handle simple types. 725 if (!CEVT.isSimple()) return 0; 726 MVT VT = CEVT.getSimpleVT(); 727 728 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) 729 return ARMMaterializeFP(CFP, VT); 730 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C)) 731 return ARMMaterializeGV(GV, VT); 732 else if (isa<ConstantInt>(C)) 733 return ARMMaterializeInt(C, VT); 734 735 return 0; 736} 737 738// TODO: unsigned ARMFastISel::TargetMaterializeFloatZero(const ConstantFP *CF); 739 740unsigned ARMFastISel::TargetMaterializeAlloca(const AllocaInst *AI) { 741 // Don't handle dynamic allocas. 742 if (!FuncInfo.StaticAllocaMap.count(AI)) return 0; 743 744 MVT VT; 745 if (!isLoadTypeLegal(AI->getType(), VT)) return 0; 746 747 DenseMap<const AllocaInst*, int>::iterator SI = 748 FuncInfo.StaticAllocaMap.find(AI); 749 750 // This will get lowered later into the correct offsets and registers 751 // via rewriteXFrameIndex. 752 if (SI != FuncInfo.StaticAllocaMap.end()) { 753 const TargetRegisterClass* RC = TLI.getRegClassFor(VT); 754 unsigned ResultReg = createResultReg(RC); 755 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri; 756 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 757 TII.get(Opc), ResultReg) 758 .addFrameIndex(SI->second) 759 .addImm(0)); 760 return ResultReg; 761 } 762 763 return 0; 764} 765 766bool ARMFastISel::isTypeLegal(Type *Ty, MVT &VT) { 767 EVT evt = TLI.getValueType(Ty, true); 768 769 // Only handle simple types. 770 if (evt == MVT::Other || !evt.isSimple()) return false; 771 VT = evt.getSimpleVT(); 772 773 // Handle all legal types, i.e. a register that will directly hold this 774 // value. 775 return TLI.isTypeLegal(VT); 776} 777 778bool ARMFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) { 779 if (isTypeLegal(Ty, VT)) return true; 780 781 // If this is a type than can be sign or zero-extended to a basic operation 782 // go ahead and accept it now. 783 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16) 784 return true; 785 786 return false; 787} 788 789// Computes the address to get to an object. 790bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) { 791 // Some boilerplate from the X86 FastISel. 792 const User *U = NULL; 793 unsigned Opcode = Instruction::UserOp1; 794 if (const Instruction *I = dyn_cast<Instruction>(Obj)) { 795 // Don't walk into other basic blocks unless the object is an alloca from 796 // another block, otherwise it may not have a virtual register assigned. 797 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) || 798 FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) { 799 Opcode = I->getOpcode(); 800 U = I; 801 } 802 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) { 803 Opcode = C->getOpcode(); 804 U = C; 805 } 806 807 if (PointerType *Ty = dyn_cast<PointerType>(Obj->getType())) 808 if (Ty->getAddressSpace() > 255) 809 // Fast instruction selection doesn't support the special 810 // address spaces. 811 return false; 812 813 switch (Opcode) { 814 default: 815 break; 816 case Instruction::BitCast: { 817 // Look through bitcasts. 818 return ARMComputeAddress(U->getOperand(0), Addr); 819 } 820 case Instruction::IntToPtr: { 821 // Look past no-op inttoptrs. 822 if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy()) 823 return ARMComputeAddress(U->getOperand(0), Addr); 824 break; 825 } 826 case Instruction::PtrToInt: { 827 // Look past no-op ptrtoints. 828 if (TLI.getValueType(U->getType()) == TLI.getPointerTy()) 829 return ARMComputeAddress(U->getOperand(0), Addr); 830 break; 831 } 832 case Instruction::GetElementPtr: { 833 Address SavedAddr = Addr; 834 int TmpOffset = Addr.Offset; 835 836 // Iterate through the GEP folding the constants into offsets where 837 // we can. 838 gep_type_iterator GTI = gep_type_begin(U); 839 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); 840 i != e; ++i, ++GTI) { 841 const Value *Op = *i; 842 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 843 const StructLayout *SL = TD.getStructLayout(STy); 844 unsigned Idx = cast<ConstantInt>(Op)->getZExtValue(); 845 TmpOffset += SL->getElementOffset(Idx); 846 } else { 847 uint64_t S = TD.getTypeAllocSize(GTI.getIndexedType()); 848 for (;;) { 849 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) { 850 // Constant-offset addressing. 851 TmpOffset += CI->getSExtValue() * S; 852 break; 853 } 854 if (isa<AddOperator>(Op) && 855 (!isa<Instruction>(Op) || 856 FuncInfo.MBBMap[cast<Instruction>(Op)->getParent()] 857 == FuncInfo.MBB) && 858 isa<ConstantInt>(cast<AddOperator>(Op)->getOperand(1))) { 859 // An add (in the same block) with a constant operand. Fold the 860 // constant. 861 ConstantInt *CI = 862 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1)); 863 TmpOffset += CI->getSExtValue() * S; 864 // Iterate on the other operand. 865 Op = cast<AddOperator>(Op)->getOperand(0); 866 continue; 867 } 868 // Unsupported 869 goto unsupported_gep; 870 } 871 } 872 } 873 874 // Try to grab the base operand now. 875 Addr.Offset = TmpOffset; 876 if (ARMComputeAddress(U->getOperand(0), Addr)) return true; 877 878 // We failed, restore everything and try the other options. 879 Addr = SavedAddr; 880 881 unsupported_gep: 882 break; 883 } 884 case Instruction::Alloca: { 885 const AllocaInst *AI = cast<AllocaInst>(Obj); 886 DenseMap<const AllocaInst*, int>::iterator SI = 887 FuncInfo.StaticAllocaMap.find(AI); 888 if (SI != FuncInfo.StaticAllocaMap.end()) { 889 Addr.BaseType = Address::FrameIndexBase; 890 Addr.Base.FI = SI->second; 891 return true; 892 } 893 break; 894 } 895 } 896 897 // Try to get this in a register if nothing else has worked. 898 if (Addr.Base.Reg == 0) Addr.Base.Reg = getRegForValue(Obj); 899 return Addr.Base.Reg != 0; 900} 901 902void ARMFastISel::ARMSimplifyAddress(Address &Addr, MVT VT, bool useAM3) { 903 bool needsLowering = false; 904 switch (VT.SimpleTy) { 905 default: llvm_unreachable("Unhandled load/store type!"); 906 case MVT::i1: 907 case MVT::i8: 908 case MVT::i16: 909 case MVT::i32: 910 if (!useAM3) { 911 // Integer loads/stores handle 12-bit offsets. 912 needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset); 913 // Handle negative offsets. 914 if (needsLowering && isThumb2) 915 needsLowering = !(Subtarget->hasV6T2Ops() && Addr.Offset < 0 && 916 Addr.Offset > -256); 917 } else { 918 // ARM halfword load/stores and signed byte loads use +/-imm8 offsets. 919 needsLowering = (Addr.Offset > 255 || Addr.Offset < -255); 920 } 921 break; 922 case MVT::f32: 923 case MVT::f64: 924 // Floating point operands handle 8-bit offsets. 925 needsLowering = ((Addr.Offset & 0xff) != Addr.Offset); 926 break; 927 } 928 929 // If this is a stack pointer and the offset needs to be simplified then 930 // put the alloca address into a register, set the base type back to 931 // register and continue. This should almost never happen. 932 if (needsLowering && Addr.BaseType == Address::FrameIndexBase) { 933 const TargetRegisterClass *RC = isThumb2 ? 934 (const TargetRegisterClass*)&ARM::tGPRRegClass : 935 (const TargetRegisterClass*)&ARM::GPRRegClass; 936 unsigned ResultReg = createResultReg(RC); 937 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri; 938 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 939 TII.get(Opc), ResultReg) 940 .addFrameIndex(Addr.Base.FI) 941 .addImm(0)); 942 Addr.Base.Reg = ResultReg; 943 Addr.BaseType = Address::RegBase; 944 } 945 946 // Since the offset is too large for the load/store instruction 947 // get the reg+offset into a register. 948 if (needsLowering) { 949 Addr.Base.Reg = FastEmit_ri_(MVT::i32, ISD::ADD, Addr.Base.Reg, 950 /*Op0IsKill*/false, Addr.Offset, MVT::i32); 951 Addr.Offset = 0; 952 } 953} 954 955void ARMFastISel::AddLoadStoreOperands(MVT VT, Address &Addr, 956 const MachineInstrBuilder &MIB, 957 unsigned Flags, bool useAM3) { 958 // addrmode5 output depends on the selection dag addressing dividing the 959 // offset by 4 that it then later multiplies. Do this here as well. 960 if (VT.SimpleTy == MVT::f32 || VT.SimpleTy == MVT::f64) 961 Addr.Offset /= 4; 962 963 // Frame base works a bit differently. Handle it separately. 964 if (Addr.BaseType == Address::FrameIndexBase) { 965 int FI = Addr.Base.FI; 966 int Offset = Addr.Offset; 967 MachineMemOperand *MMO = 968 FuncInfo.MF->getMachineMemOperand( 969 MachinePointerInfo::getFixedStack(FI, Offset), 970 Flags, 971 MFI.getObjectSize(FI), 972 MFI.getObjectAlignment(FI)); 973 // Now add the rest of the operands. 974 MIB.addFrameIndex(FI); 975 976 // ARM halfword load/stores and signed byte loads need an additional 977 // operand. 978 if (useAM3) { 979 signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset; 980 MIB.addReg(0); 981 MIB.addImm(Imm); 982 } else { 983 MIB.addImm(Addr.Offset); 984 } 985 MIB.addMemOperand(MMO); 986 } else { 987 // Now add the rest of the operands. 988 MIB.addReg(Addr.Base.Reg); 989 990 // ARM halfword load/stores and signed byte loads need an additional 991 // operand. 992 if (useAM3) { 993 signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset; 994 MIB.addReg(0); 995 MIB.addImm(Imm); 996 } else { 997 MIB.addImm(Addr.Offset); 998 } 999 } 1000 AddOptionalDefs(MIB); 1001} 1002 1003bool ARMFastISel::ARMEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr, 1004 unsigned Alignment, bool isZExt, bool allocReg) { 1005 unsigned Opc; 1006 bool useAM3 = false; 1007 bool needVMOV = false; 1008 const TargetRegisterClass *RC; 1009 switch (VT.SimpleTy) { 1010 // This is mostly going to be Neon/vector support. 1011 default: return false; 1012 case MVT::i1: 1013 case MVT::i8: 1014 if (isThumb2) { 1015 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1016 Opc = isZExt ? ARM::t2LDRBi8 : ARM::t2LDRSBi8; 1017 else 1018 Opc = isZExt ? ARM::t2LDRBi12 : ARM::t2LDRSBi12; 1019 } else { 1020 if (isZExt) { 1021 Opc = ARM::LDRBi12; 1022 } else { 1023 Opc = ARM::LDRSB; 1024 useAM3 = true; 1025 } 1026 } 1027 RC = &ARM::GPRRegClass; 1028 break; 1029 case MVT::i16: 1030 if (Alignment && Alignment < 2 && !Subtarget->allowsUnalignedMem()) 1031 return false; 1032 1033 if (isThumb2) { 1034 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1035 Opc = isZExt ? ARM::t2LDRHi8 : ARM::t2LDRSHi8; 1036 else 1037 Opc = isZExt ? ARM::t2LDRHi12 : ARM::t2LDRSHi12; 1038 } else { 1039 Opc = isZExt ? ARM::LDRH : ARM::LDRSH; 1040 useAM3 = true; 1041 } 1042 RC = &ARM::GPRRegClass; 1043 break; 1044 case MVT::i32: 1045 if (Alignment && Alignment < 4 && !Subtarget->allowsUnalignedMem()) 1046 return false; 1047 1048 if (isThumb2) { 1049 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1050 Opc = ARM::t2LDRi8; 1051 else 1052 Opc = ARM::t2LDRi12; 1053 } else { 1054 Opc = ARM::LDRi12; 1055 } 1056 RC = &ARM::GPRRegClass; 1057 break; 1058 case MVT::f32: 1059 if (!Subtarget->hasVFP2()) return false; 1060 // Unaligned loads need special handling. Floats require word-alignment. 1061 if (Alignment && Alignment < 4) { 1062 needVMOV = true; 1063 VT = MVT::i32; 1064 Opc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12; 1065 RC = &ARM::GPRRegClass; 1066 } else { 1067 Opc = ARM::VLDRS; 1068 RC = TLI.getRegClassFor(VT); 1069 } 1070 break; 1071 case MVT::f64: 1072 if (!Subtarget->hasVFP2()) return false; 1073 // FIXME: Unaligned loads need special handling. Doublewords require 1074 // word-alignment. 1075 if (Alignment && Alignment < 4) 1076 return false; 1077 1078 Opc = ARM::VLDRD; 1079 RC = TLI.getRegClassFor(VT); 1080 break; 1081 } 1082 // Simplify this down to something we can handle. 1083 ARMSimplifyAddress(Addr, VT, useAM3); 1084 1085 // Create the base instruction, then add the operands. 1086 if (allocReg) 1087 ResultReg = createResultReg(RC); 1088 assert (ResultReg > 255 && "Expected an allocated virtual register."); 1089 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1090 TII.get(Opc), ResultReg); 1091 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOLoad, useAM3); 1092 1093 // If we had an unaligned load of a float we've converted it to an regular 1094 // load. Now we must move from the GRP to the FP register. 1095 if (needVMOV) { 1096 unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::f32)); 1097 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1098 TII.get(ARM::VMOVSR), MoveReg) 1099 .addReg(ResultReg)); 1100 ResultReg = MoveReg; 1101 } 1102 return true; 1103} 1104 1105bool ARMFastISel::SelectLoad(const Instruction *I) { 1106 // Atomic loads need special handling. 1107 if (cast<LoadInst>(I)->isAtomic()) 1108 return false; 1109 1110 // Verify we have a legal type before going any further. 1111 MVT VT; 1112 if (!isLoadTypeLegal(I->getType(), VT)) 1113 return false; 1114 1115 // See if we can handle this address. 1116 Address Addr; 1117 if (!ARMComputeAddress(I->getOperand(0), Addr)) return false; 1118 1119 unsigned ResultReg; 1120 if (!ARMEmitLoad(VT, ResultReg, Addr, cast<LoadInst>(I)->getAlignment())) 1121 return false; 1122 UpdateValueMap(I, ResultReg); 1123 return true; 1124} 1125 1126bool ARMFastISel::ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr, 1127 unsigned Alignment) { 1128 unsigned StrOpc; 1129 bool useAM3 = false; 1130 switch (VT.SimpleTy) { 1131 // This is mostly going to be Neon/vector support. 1132 default: return false; 1133 case MVT::i1: { 1134 unsigned Res = createResultReg(isThumb2 ? 1135 (const TargetRegisterClass*)&ARM::tGPRRegClass : 1136 (const TargetRegisterClass*)&ARM::GPRRegClass); 1137 unsigned Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri; 1138 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1139 TII.get(Opc), Res) 1140 .addReg(SrcReg).addImm(1)); 1141 SrcReg = Res; 1142 } // Fallthrough here. 1143 case MVT::i8: 1144 if (isThumb2) { 1145 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1146 StrOpc = ARM::t2STRBi8; 1147 else 1148 StrOpc = ARM::t2STRBi12; 1149 } else { 1150 StrOpc = ARM::STRBi12; 1151 } 1152 break; 1153 case MVT::i16: 1154 if (Alignment && Alignment < 2 && !Subtarget->allowsUnalignedMem()) 1155 return false; 1156 1157 if (isThumb2) { 1158 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1159 StrOpc = ARM::t2STRHi8; 1160 else 1161 StrOpc = ARM::t2STRHi12; 1162 } else { 1163 StrOpc = ARM::STRH; 1164 useAM3 = true; 1165 } 1166 break; 1167 case MVT::i32: 1168 if (Alignment && Alignment < 4 && !Subtarget->allowsUnalignedMem()) 1169 return false; 1170 1171 if (isThumb2) { 1172 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1173 StrOpc = ARM::t2STRi8; 1174 else 1175 StrOpc = ARM::t2STRi12; 1176 } else { 1177 StrOpc = ARM::STRi12; 1178 } 1179 break; 1180 case MVT::f32: 1181 if (!Subtarget->hasVFP2()) return false; 1182 // Unaligned stores need special handling. Floats require word-alignment. 1183 if (Alignment && Alignment < 4) { 1184 unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 1185 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1186 TII.get(ARM::VMOVRS), MoveReg) 1187 .addReg(SrcReg)); 1188 SrcReg = MoveReg; 1189 VT = MVT::i32; 1190 StrOpc = isThumb2 ? ARM::t2STRi12 : ARM::STRi12; 1191 } else { 1192 StrOpc = ARM::VSTRS; 1193 } 1194 break; 1195 case MVT::f64: 1196 if (!Subtarget->hasVFP2()) return false; 1197 // FIXME: Unaligned stores need special handling. Doublewords require 1198 // word-alignment. 1199 if (Alignment && Alignment < 4) 1200 return false; 1201 1202 StrOpc = ARM::VSTRD; 1203 break; 1204 } 1205 // Simplify this down to something we can handle. 1206 ARMSimplifyAddress(Addr, VT, useAM3); 1207 1208 // Create the base instruction, then add the operands. 1209 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1210 TII.get(StrOpc)) 1211 .addReg(SrcReg); 1212 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOStore, useAM3); 1213 return true; 1214} 1215 1216bool ARMFastISel::SelectStore(const Instruction *I) { 1217 Value *Op0 = I->getOperand(0); 1218 unsigned SrcReg = 0; 1219 1220 // Atomic stores need special handling. 1221 if (cast<StoreInst>(I)->isAtomic()) 1222 return false; 1223 1224 // Verify we have a legal type before going any further. 1225 MVT VT; 1226 if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT)) 1227 return false; 1228 1229 // Get the value to be stored into a register. 1230 SrcReg = getRegForValue(Op0); 1231 if (SrcReg == 0) return false; 1232 1233 // See if we can handle this address. 1234 Address Addr; 1235 if (!ARMComputeAddress(I->getOperand(1), Addr)) 1236 return false; 1237 1238 if (!ARMEmitStore(VT, SrcReg, Addr, cast<StoreInst>(I)->getAlignment())) 1239 return false; 1240 return true; 1241} 1242 1243static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred) { 1244 switch (Pred) { 1245 // Needs two compares... 1246 case CmpInst::FCMP_ONE: 1247 case CmpInst::FCMP_UEQ: 1248 default: 1249 // AL is our "false" for now. The other two need more compares. 1250 return ARMCC::AL; 1251 case CmpInst::ICMP_EQ: 1252 case CmpInst::FCMP_OEQ: 1253 return ARMCC::EQ; 1254 case CmpInst::ICMP_SGT: 1255 case CmpInst::FCMP_OGT: 1256 return ARMCC::GT; 1257 case CmpInst::ICMP_SGE: 1258 case CmpInst::FCMP_OGE: 1259 return ARMCC::GE; 1260 case CmpInst::ICMP_UGT: 1261 case CmpInst::FCMP_UGT: 1262 return ARMCC::HI; 1263 case CmpInst::FCMP_OLT: 1264 return ARMCC::MI; 1265 case CmpInst::ICMP_ULE: 1266 case CmpInst::FCMP_OLE: 1267 return ARMCC::LS; 1268 case CmpInst::FCMP_ORD: 1269 return ARMCC::VC; 1270 case CmpInst::FCMP_UNO: 1271 return ARMCC::VS; 1272 case CmpInst::FCMP_UGE: 1273 return ARMCC::PL; 1274 case CmpInst::ICMP_SLT: 1275 case CmpInst::FCMP_ULT: 1276 return ARMCC::LT; 1277 case CmpInst::ICMP_SLE: 1278 case CmpInst::FCMP_ULE: 1279 return ARMCC::LE; 1280 case CmpInst::FCMP_UNE: 1281 case CmpInst::ICMP_NE: 1282 return ARMCC::NE; 1283 case CmpInst::ICMP_UGE: 1284 return ARMCC::HS; 1285 case CmpInst::ICMP_ULT: 1286 return ARMCC::LO; 1287 } 1288} 1289 1290bool ARMFastISel::SelectBranch(const Instruction *I) { 1291 const BranchInst *BI = cast<BranchInst>(I); 1292 MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)]; 1293 MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)]; 1294 1295 // Simple branch support. 1296 1297 // If we can, avoid recomputing the compare - redoing it could lead to wonky 1298 // behavior. 1299 if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) { 1300 if (CI->hasOneUse() && (CI->getParent() == I->getParent())) { 1301 1302 // Get the compare predicate. 1303 // Try to take advantage of fallthrough opportunities. 1304 CmpInst::Predicate Predicate = CI->getPredicate(); 1305 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1306 std::swap(TBB, FBB); 1307 Predicate = CmpInst::getInversePredicate(Predicate); 1308 } 1309 1310 ARMCC::CondCodes ARMPred = getComparePred(Predicate); 1311 1312 // We may not handle every CC for now. 1313 if (ARMPred == ARMCC::AL) return false; 1314 1315 // Emit the compare. 1316 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) 1317 return false; 1318 1319 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1320 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1321 .addMBB(TBB).addImm(ARMPred).addReg(ARM::CPSR); 1322 FastEmitBranch(FBB, DL); 1323 FuncInfo.MBB->addSuccessor(TBB); 1324 return true; 1325 } 1326 } else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) { 1327 MVT SourceVT; 1328 if (TI->hasOneUse() && TI->getParent() == I->getParent() && 1329 (isLoadTypeLegal(TI->getOperand(0)->getType(), SourceVT))) { 1330 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; 1331 unsigned OpReg = getRegForValue(TI->getOperand(0)); 1332 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1333 TII.get(TstOpc)) 1334 .addReg(OpReg).addImm(1)); 1335 1336 unsigned CCMode = ARMCC::NE; 1337 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1338 std::swap(TBB, FBB); 1339 CCMode = ARMCC::EQ; 1340 } 1341 1342 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1343 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1344 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); 1345 1346 FastEmitBranch(FBB, DL); 1347 FuncInfo.MBB->addSuccessor(TBB); 1348 return true; 1349 } 1350 } else if (const ConstantInt *CI = 1351 dyn_cast<ConstantInt>(BI->getCondition())) { 1352 uint64_t Imm = CI->getZExtValue(); 1353 MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB; 1354 FastEmitBranch(Target, DL); 1355 return true; 1356 } 1357 1358 unsigned CmpReg = getRegForValue(BI->getCondition()); 1359 if (CmpReg == 0) return false; 1360 1361 // We've been divorced from our compare! Our block was split, and 1362 // now our compare lives in a predecessor block. We musn't 1363 // re-compare here, as the children of the compare aren't guaranteed 1364 // live across the block boundary (we *could* check for this). 1365 // Regardless, the compare has been done in the predecessor block, 1366 // and it left a value for us in a virtual register. Ergo, we test 1367 // the one-bit value left in the virtual register. 1368 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; 1369 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TstOpc)) 1370 .addReg(CmpReg).addImm(1)); 1371 1372 unsigned CCMode = ARMCC::NE; 1373 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1374 std::swap(TBB, FBB); 1375 CCMode = ARMCC::EQ; 1376 } 1377 1378 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1379 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1380 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); 1381 FastEmitBranch(FBB, DL); 1382 FuncInfo.MBB->addSuccessor(TBB); 1383 return true; 1384} 1385 1386bool ARMFastISel::SelectIndirectBr(const Instruction *I) { 1387 unsigned AddrReg = getRegForValue(I->getOperand(0)); 1388 if (AddrReg == 0) return false; 1389 1390 unsigned Opc = isThumb2 ? ARM::tBRIND : ARM::BX; 1391 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc)) 1392 .addReg(AddrReg)); 1393 1394 const IndirectBrInst *IB = cast<IndirectBrInst>(I); 1395 for (unsigned i = 0, e = IB->getNumSuccessors(); i != e; ++i) 1396 FuncInfo.MBB->addSuccessor(FuncInfo.MBBMap[IB->getSuccessor(i)]); 1397 1398 return true; 1399} 1400 1401bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, 1402 bool isZExt) { 1403 Type *Ty = Src1Value->getType(); 1404 EVT SrcEVT = TLI.getValueType(Ty, true); 1405 if (!SrcEVT.isSimple()) return false; 1406 MVT SrcVT = SrcEVT.getSimpleVT(); 1407 1408 bool isFloat = (Ty->isFloatTy() || Ty->isDoubleTy()); 1409 if (isFloat && !Subtarget->hasVFP2()) 1410 return false; 1411 1412 // Check to see if the 2nd operand is a constant that we can encode directly 1413 // in the compare. 1414 int Imm = 0; 1415 bool UseImm = false; 1416 bool isNegativeImm = false; 1417 // FIXME: At -O0 we don't have anything that canonicalizes operand order. 1418 // Thus, Src1Value may be a ConstantInt, but we're missing it. 1419 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(Src2Value)) { 1420 if (SrcVT == MVT::i32 || SrcVT == MVT::i16 || SrcVT == MVT::i8 || 1421 SrcVT == MVT::i1) { 1422 const APInt &CIVal = ConstInt->getValue(); 1423 Imm = (isZExt) ? (int)CIVal.getZExtValue() : (int)CIVal.getSExtValue(); 1424 // For INT_MIN/LONG_MIN (i.e., 0x80000000) we need to use a cmp, rather 1425 // then a cmn, because there is no way to represent 2147483648 as a 1426 // signed 32-bit int. 1427 if (Imm < 0 && Imm != (int)0x80000000) { 1428 isNegativeImm = true; 1429 Imm = -Imm; 1430 } 1431 UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 1432 (ARM_AM::getSOImmVal(Imm) != -1); 1433 } 1434 } else if (const ConstantFP *ConstFP = dyn_cast<ConstantFP>(Src2Value)) { 1435 if (SrcVT == MVT::f32 || SrcVT == MVT::f64) 1436 if (ConstFP->isZero() && !ConstFP->isNegative()) 1437 UseImm = true; 1438 } 1439 1440 unsigned CmpOpc; 1441 bool isICmp = true; 1442 bool needsExt = false; 1443 switch (SrcVT.SimpleTy) { 1444 default: return false; 1445 // TODO: Verify compares. 1446 case MVT::f32: 1447 isICmp = false; 1448 CmpOpc = UseImm ? ARM::VCMPEZS : ARM::VCMPES; 1449 break; 1450 case MVT::f64: 1451 isICmp = false; 1452 CmpOpc = UseImm ? ARM::VCMPEZD : ARM::VCMPED; 1453 break; 1454 case MVT::i1: 1455 case MVT::i8: 1456 case MVT::i16: 1457 needsExt = true; 1458 // Intentional fall-through. 1459 case MVT::i32: 1460 if (isThumb2) { 1461 if (!UseImm) 1462 CmpOpc = ARM::t2CMPrr; 1463 else 1464 CmpOpc = isNegativeImm ? ARM::t2CMNri : ARM::t2CMPri; 1465 } else { 1466 if (!UseImm) 1467 CmpOpc = ARM::CMPrr; 1468 else 1469 CmpOpc = isNegativeImm ? ARM::CMNri : ARM::CMPri; 1470 } 1471 break; 1472 } 1473 1474 unsigned SrcReg1 = getRegForValue(Src1Value); 1475 if (SrcReg1 == 0) return false; 1476 1477 unsigned SrcReg2 = 0; 1478 if (!UseImm) { 1479 SrcReg2 = getRegForValue(Src2Value); 1480 if (SrcReg2 == 0) return false; 1481 } 1482 1483 // We have i1, i8, or i16, we need to either zero extend or sign extend. 1484 if (needsExt) { 1485 SrcReg1 = ARMEmitIntExt(SrcVT, SrcReg1, MVT::i32, isZExt); 1486 if (SrcReg1 == 0) return false; 1487 if (!UseImm) { 1488 SrcReg2 = ARMEmitIntExt(SrcVT, SrcReg2, MVT::i32, isZExt); 1489 if (SrcReg2 == 0) return false; 1490 } 1491 } 1492 1493 if (!UseImm) { 1494 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1495 TII.get(CmpOpc)) 1496 .addReg(SrcReg1).addReg(SrcReg2)); 1497 } else { 1498 MachineInstrBuilder MIB; 1499 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1500 .addReg(SrcReg1); 1501 1502 // Only add immediate for icmp as the immediate for fcmp is an implicit 0.0. 1503 if (isICmp) 1504 MIB.addImm(Imm); 1505 AddOptionalDefs(MIB); 1506 } 1507 1508 // For floating point we need to move the result to a comparison register 1509 // that we can then use for branches. 1510 if (Ty->isFloatTy() || Ty->isDoubleTy()) 1511 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1512 TII.get(ARM::FMSTAT))); 1513 return true; 1514} 1515 1516bool ARMFastISel::SelectCmp(const Instruction *I) { 1517 const CmpInst *CI = cast<CmpInst>(I); 1518 1519 // Get the compare predicate. 1520 ARMCC::CondCodes ARMPred = getComparePred(CI->getPredicate()); 1521 1522 // We may not handle every CC for now. 1523 if (ARMPred == ARMCC::AL) return false; 1524 1525 // Emit the compare. 1526 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) 1527 return false; 1528 1529 // Now set a register based on the comparison. Explicitly set the predicates 1530 // here. 1531 unsigned MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi; 1532 const TargetRegisterClass *RC = isThumb2 ? 1533 (const TargetRegisterClass*)&ARM::rGPRRegClass : 1534 (const TargetRegisterClass*)&ARM::GPRRegClass; 1535 unsigned DestReg = createResultReg(RC); 1536 Constant *Zero = ConstantInt::get(Type::getInt32Ty(*Context), 0); 1537 unsigned ZeroReg = TargetMaterializeConstant(Zero); 1538 // ARMEmitCmp emits a FMSTAT when necessary, so it's always safe to use CPSR. 1539 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), DestReg) 1540 .addReg(ZeroReg).addImm(1) 1541 .addImm(ARMPred).addReg(ARM::CPSR); 1542 1543 UpdateValueMap(I, DestReg); 1544 return true; 1545} 1546 1547bool ARMFastISel::SelectFPExt(const Instruction *I) { 1548 // Make sure we have VFP and that we're extending float to double. 1549 if (!Subtarget->hasVFP2()) return false; 1550 1551 Value *V = I->getOperand(0); 1552 if (!I->getType()->isDoubleTy() || 1553 !V->getType()->isFloatTy()) return false; 1554 1555 unsigned Op = getRegForValue(V); 1556 if (Op == 0) return false; 1557 1558 unsigned Result = createResultReg(&ARM::DPRRegClass); 1559 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1560 TII.get(ARM::VCVTDS), Result) 1561 .addReg(Op)); 1562 UpdateValueMap(I, Result); 1563 return true; 1564} 1565 1566bool ARMFastISel::SelectFPTrunc(const Instruction *I) { 1567 // Make sure we have VFP and that we're truncating double to float. 1568 if (!Subtarget->hasVFP2()) return false; 1569 1570 Value *V = I->getOperand(0); 1571 if (!(I->getType()->isFloatTy() && 1572 V->getType()->isDoubleTy())) return false; 1573 1574 unsigned Op = getRegForValue(V); 1575 if (Op == 0) return false; 1576 1577 unsigned Result = createResultReg(&ARM::SPRRegClass); 1578 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1579 TII.get(ARM::VCVTSD), Result) 1580 .addReg(Op)); 1581 UpdateValueMap(I, Result); 1582 return true; 1583} 1584 1585bool ARMFastISel::SelectIToFP(const Instruction *I, bool isSigned) { 1586 // Make sure we have VFP. 1587 if (!Subtarget->hasVFP2()) return false; 1588 1589 MVT DstVT; 1590 Type *Ty = I->getType(); 1591 if (!isTypeLegal(Ty, DstVT)) 1592 return false; 1593 1594 Value *Src = I->getOperand(0); 1595 EVT SrcEVT = TLI.getValueType(Src->getType(), true); 1596 if (!SrcEVT.isSimple()) 1597 return false; 1598 MVT SrcVT = SrcEVT.getSimpleVT(); 1599 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) 1600 return false; 1601 1602 unsigned SrcReg = getRegForValue(Src); 1603 if (SrcReg == 0) return false; 1604 1605 // Handle sign-extension. 1606 if (SrcVT == MVT::i16 || SrcVT == MVT::i8) { 1607 SrcReg = ARMEmitIntExt(SrcVT, SrcReg, MVT::i32, 1608 /*isZExt*/!isSigned); 1609 if (SrcReg == 0) return false; 1610 } 1611 1612 // The conversion routine works on fp-reg to fp-reg and the operand above 1613 // was an integer, move it to the fp registers if possible. 1614 unsigned FP = ARMMoveToFPReg(MVT::f32, SrcReg); 1615 if (FP == 0) return false; 1616 1617 unsigned Opc; 1618 if (Ty->isFloatTy()) Opc = isSigned ? ARM::VSITOS : ARM::VUITOS; 1619 else if (Ty->isDoubleTy()) Opc = isSigned ? ARM::VSITOD : ARM::VUITOD; 1620 else return false; 1621 1622 unsigned ResultReg = createResultReg(TLI.getRegClassFor(DstVT)); 1623 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1624 ResultReg) 1625 .addReg(FP)); 1626 UpdateValueMap(I, ResultReg); 1627 return true; 1628} 1629 1630bool ARMFastISel::SelectFPToI(const Instruction *I, bool isSigned) { 1631 // Make sure we have VFP. 1632 if (!Subtarget->hasVFP2()) return false; 1633 1634 MVT DstVT; 1635 Type *RetTy = I->getType(); 1636 if (!isTypeLegal(RetTy, DstVT)) 1637 return false; 1638 1639 unsigned Op = getRegForValue(I->getOperand(0)); 1640 if (Op == 0) return false; 1641 1642 unsigned Opc; 1643 Type *OpTy = I->getOperand(0)->getType(); 1644 if (OpTy->isFloatTy()) Opc = isSigned ? ARM::VTOSIZS : ARM::VTOUIZS; 1645 else if (OpTy->isDoubleTy()) Opc = isSigned ? ARM::VTOSIZD : ARM::VTOUIZD; 1646 else return false; 1647 1648 // f64->s32/u32 or f32->s32/u32 both need an intermediate f32 reg. 1649 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32)); 1650 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1651 ResultReg) 1652 .addReg(Op)); 1653 1654 // This result needs to be in an integer register, but the conversion only 1655 // takes place in fp-regs. 1656 unsigned IntReg = ARMMoveToIntReg(DstVT, ResultReg); 1657 if (IntReg == 0) return false; 1658 1659 UpdateValueMap(I, IntReg); 1660 return true; 1661} 1662 1663bool ARMFastISel::SelectSelect(const Instruction *I) { 1664 MVT VT; 1665 if (!isTypeLegal(I->getType(), VT)) 1666 return false; 1667 1668 // Things need to be register sized for register moves. 1669 if (VT != MVT::i32) return false; 1670 1671 unsigned CondReg = getRegForValue(I->getOperand(0)); 1672 if (CondReg == 0) return false; 1673 unsigned Op1Reg = getRegForValue(I->getOperand(1)); 1674 if (Op1Reg == 0) return false; 1675 1676 // Check to see if we can use an immediate in the conditional move. 1677 int Imm = 0; 1678 bool UseImm = false; 1679 bool isNegativeImm = false; 1680 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(I->getOperand(2))) { 1681 assert (VT == MVT::i32 && "Expecting an i32."); 1682 Imm = (int)ConstInt->getValue().getZExtValue(); 1683 if (Imm < 0) { 1684 isNegativeImm = true; 1685 Imm = ~Imm; 1686 } 1687 UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 1688 (ARM_AM::getSOImmVal(Imm) != -1); 1689 } 1690 1691 unsigned Op2Reg = 0; 1692 if (!UseImm) { 1693 Op2Reg = getRegForValue(I->getOperand(2)); 1694 if (Op2Reg == 0) return false; 1695 } 1696 1697 unsigned CmpOpc = isThumb2 ? ARM::t2CMPri : ARM::CMPri; 1698 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1699 .addReg(CondReg).addImm(0)); 1700 1701 unsigned MovCCOpc; 1702 const TargetRegisterClass *RC; 1703 if (!UseImm) { 1704 RC = isThumb2 ? &ARM::tGPRRegClass : &ARM::GPRRegClass; 1705 MovCCOpc = isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr; 1706 } else { 1707 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass; 1708 if (!isNegativeImm) 1709 MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi; 1710 else 1711 MovCCOpc = isThumb2 ? ARM::t2MVNCCi : ARM::MVNCCi; 1712 } 1713 unsigned ResultReg = createResultReg(RC); 1714 if (!UseImm) 1715 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg) 1716 .addReg(Op2Reg).addReg(Op1Reg).addImm(ARMCC::NE).addReg(ARM::CPSR); 1717 else 1718 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg) 1719 .addReg(Op1Reg).addImm(Imm).addImm(ARMCC::EQ).addReg(ARM::CPSR); 1720 UpdateValueMap(I, ResultReg); 1721 return true; 1722} 1723 1724bool ARMFastISel::SelectDiv(const Instruction *I, bool isSigned) { 1725 MVT VT; 1726 Type *Ty = I->getType(); 1727 if (!isTypeLegal(Ty, VT)) 1728 return false; 1729 1730 // If we have integer div support we should have selected this automagically. 1731 // In case we have a real miss go ahead and return false and we'll pick 1732 // it up later. 1733 if (Subtarget->hasDivide()) return false; 1734 1735 // Otherwise emit a libcall. 1736 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1737 if (VT == MVT::i8) 1738 LC = isSigned ? RTLIB::SDIV_I8 : RTLIB::UDIV_I8; 1739 else if (VT == MVT::i16) 1740 LC = isSigned ? RTLIB::SDIV_I16 : RTLIB::UDIV_I16; 1741 else if (VT == MVT::i32) 1742 LC = isSigned ? RTLIB::SDIV_I32 : RTLIB::UDIV_I32; 1743 else if (VT == MVT::i64) 1744 LC = isSigned ? RTLIB::SDIV_I64 : RTLIB::UDIV_I64; 1745 else if (VT == MVT::i128) 1746 LC = isSigned ? RTLIB::SDIV_I128 : RTLIB::UDIV_I128; 1747 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SDIV!"); 1748 1749 return ARMEmitLibcall(I, LC); 1750} 1751 1752bool ARMFastISel::SelectRem(const Instruction *I, bool isSigned) { 1753 MVT VT; 1754 Type *Ty = I->getType(); 1755 if (!isTypeLegal(Ty, VT)) 1756 return false; 1757 1758 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1759 if (VT == MVT::i8) 1760 LC = isSigned ? RTLIB::SREM_I8 : RTLIB::UREM_I8; 1761 else if (VT == MVT::i16) 1762 LC = isSigned ? RTLIB::SREM_I16 : RTLIB::UREM_I16; 1763 else if (VT == MVT::i32) 1764 LC = isSigned ? RTLIB::SREM_I32 : RTLIB::UREM_I32; 1765 else if (VT == MVT::i64) 1766 LC = isSigned ? RTLIB::SREM_I64 : RTLIB::UREM_I64; 1767 else if (VT == MVT::i128) 1768 LC = isSigned ? RTLIB::SREM_I128 : RTLIB::UREM_I128; 1769 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SREM!"); 1770 1771 return ARMEmitLibcall(I, LC); 1772} 1773 1774bool ARMFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) { 1775 EVT DestVT = TLI.getValueType(I->getType(), true); 1776 1777 // We can get here in the case when we have a binary operation on a non-legal 1778 // type and the target independent selector doesn't know how to handle it. 1779 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1) 1780 return false; 1781 1782 unsigned Opc; 1783 switch (ISDOpcode) { 1784 default: return false; 1785 case ISD::ADD: 1786 Opc = isThumb2 ? ARM::t2ADDrr : ARM::ADDrr; 1787 break; 1788 case ISD::OR: 1789 Opc = isThumb2 ? ARM::t2ORRrr : ARM::ORRrr; 1790 break; 1791 case ISD::SUB: 1792 Opc = isThumb2 ? ARM::t2SUBrr : ARM::SUBrr; 1793 break; 1794 } 1795 1796 unsigned SrcReg1 = getRegForValue(I->getOperand(0)); 1797 if (SrcReg1 == 0) return false; 1798 1799 // TODO: Often the 2nd operand is an immediate, which can be encoded directly 1800 // in the instruction, rather then materializing the value in a register. 1801 unsigned SrcReg2 = getRegForValue(I->getOperand(1)); 1802 if (SrcReg2 == 0) return false; 1803 1804 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 1805 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1806 TII.get(Opc), ResultReg) 1807 .addReg(SrcReg1).addReg(SrcReg2)); 1808 UpdateValueMap(I, ResultReg); 1809 return true; 1810} 1811 1812bool ARMFastISel::SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode) { 1813 EVT FPVT = TLI.getValueType(I->getType(), true); 1814 if (!FPVT.isSimple()) return false; 1815 MVT VT = FPVT.getSimpleVT(); 1816 1817 // We can get here in the case when we want to use NEON for our fp 1818 // operations, but can't figure out how to. Just use the vfp instructions 1819 // if we have them. 1820 // FIXME: It'd be nice to use NEON instructions. 1821 Type *Ty = I->getType(); 1822 bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy()); 1823 if (isFloat && !Subtarget->hasVFP2()) 1824 return false; 1825 1826 unsigned Opc; 1827 bool is64bit = VT == MVT::f64 || VT == MVT::i64; 1828 switch (ISDOpcode) { 1829 default: return false; 1830 case ISD::FADD: 1831 Opc = is64bit ? ARM::VADDD : ARM::VADDS; 1832 break; 1833 case ISD::FSUB: 1834 Opc = is64bit ? ARM::VSUBD : ARM::VSUBS; 1835 break; 1836 case ISD::FMUL: 1837 Opc = is64bit ? ARM::VMULD : ARM::VMULS; 1838 break; 1839 } 1840 unsigned Op1 = getRegForValue(I->getOperand(0)); 1841 if (Op1 == 0) return false; 1842 1843 unsigned Op2 = getRegForValue(I->getOperand(1)); 1844 if (Op2 == 0) return false; 1845 1846 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT.SimpleTy)); 1847 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1848 TII.get(Opc), ResultReg) 1849 .addReg(Op1).addReg(Op2)); 1850 UpdateValueMap(I, ResultReg); 1851 return true; 1852} 1853 1854// Call Handling Code 1855 1856// This is largely taken directly from CCAssignFnForNode 1857// TODO: We may not support all of this. 1858CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC, 1859 bool Return, 1860 bool isVarArg) { 1861 switch (CC) { 1862 default: 1863 llvm_unreachable("Unsupported calling convention"); 1864 case CallingConv::Fast: 1865 if (Subtarget->hasVFP2() && !isVarArg) { 1866 if (!Subtarget->isAAPCS_ABI()) 1867 return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS); 1868 // For AAPCS ABI targets, just use VFP variant of the calling convention. 1869 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1870 } 1871 // Fallthrough 1872 case CallingConv::C: 1873 // Use target triple & subtarget features to do actual dispatch. 1874 if (Subtarget->isAAPCS_ABI()) { 1875 if (Subtarget->hasVFP2() && 1876 TM.Options.FloatABIType == FloatABI::Hard && !isVarArg) 1877 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1878 else 1879 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1880 } else 1881 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1882 case CallingConv::ARM_AAPCS_VFP: 1883 if (!isVarArg) 1884 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1885 // Fall through to soft float variant, variadic functions don't 1886 // use hard floating point ABI. 1887 case CallingConv::ARM_AAPCS: 1888 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1889 case CallingConv::ARM_APCS: 1890 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1891 case CallingConv::GHC: 1892 if (Return) 1893 llvm_unreachable("Can't return in GHC call convention"); 1894 else 1895 return CC_ARM_APCS_GHC; 1896 } 1897} 1898 1899bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args, 1900 SmallVectorImpl<unsigned> &ArgRegs, 1901 SmallVectorImpl<MVT> &ArgVTs, 1902 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 1903 SmallVectorImpl<unsigned> &RegArgs, 1904 CallingConv::ID CC, 1905 unsigned &NumBytes, 1906 bool isVarArg) { 1907 SmallVector<CCValAssign, 16> ArgLocs; 1908 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, ArgLocs, *Context); 1909 CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, 1910 CCAssignFnForCall(CC, false, isVarArg)); 1911 1912 // Check that we can handle all of the arguments. If we can't, then bail out 1913 // now before we add code to the MBB. 1914 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1915 CCValAssign &VA = ArgLocs[i]; 1916 MVT ArgVT = ArgVTs[VA.getValNo()]; 1917 1918 // We don't handle NEON/vector parameters yet. 1919 if (ArgVT.isVector() || ArgVT.getSizeInBits() > 64) 1920 return false; 1921 1922 // Now copy/store arg to correct locations. 1923 if (VA.isRegLoc() && !VA.needsCustom()) { 1924 continue; 1925 } else if (VA.needsCustom()) { 1926 // TODO: We need custom lowering for vector (v2f64) args. 1927 if (VA.getLocVT() != MVT::f64 || 1928 // TODO: Only handle register args for now. 1929 !VA.isRegLoc() || !ArgLocs[++i].isRegLoc()) 1930 return false; 1931 } else { 1932 switch (static_cast<EVT>(ArgVT).getSimpleVT().SimpleTy) { 1933 default: 1934 return false; 1935 case MVT::i1: 1936 case MVT::i8: 1937 case MVT::i16: 1938 case MVT::i32: 1939 break; 1940 case MVT::f32: 1941 if (!Subtarget->hasVFP2()) 1942 return false; 1943 break; 1944 case MVT::f64: 1945 if (!Subtarget->hasVFP2()) 1946 return false; 1947 break; 1948 } 1949 } 1950 } 1951 1952 // At the point, we are able to handle the call's arguments in fast isel. 1953 1954 // Get a count of how many bytes are to be pushed on the stack. 1955 NumBytes = CCInfo.getNextStackOffset(); 1956 1957 // Issue CALLSEQ_START 1958 unsigned AdjStackDown = TII.getCallFrameSetupOpcode(); 1959 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1960 TII.get(AdjStackDown)) 1961 .addImm(NumBytes)); 1962 1963 // Process the args. 1964 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1965 CCValAssign &VA = ArgLocs[i]; 1966 unsigned Arg = ArgRegs[VA.getValNo()]; 1967 MVT ArgVT = ArgVTs[VA.getValNo()]; 1968 1969 assert((!ArgVT.isVector() && ArgVT.getSizeInBits() <= 64) && 1970 "We don't handle NEON/vector parameters yet."); 1971 1972 // Handle arg promotion, etc. 1973 switch (VA.getLocInfo()) { 1974 case CCValAssign::Full: break; 1975 case CCValAssign::SExt: { 1976 MVT DestVT = VA.getLocVT(); 1977 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/false); 1978 assert (Arg != 0 && "Failed to emit a sext"); 1979 ArgVT = DestVT; 1980 break; 1981 } 1982 case CCValAssign::AExt: 1983 // Intentional fall-through. Handle AExt and ZExt. 1984 case CCValAssign::ZExt: { 1985 MVT DestVT = VA.getLocVT(); 1986 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/true); 1987 assert (Arg != 0 && "Failed to emit a sext"); 1988 ArgVT = DestVT; 1989 break; 1990 } 1991 case CCValAssign::BCvt: { 1992 unsigned BC = FastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg, 1993 /*TODO: Kill=*/false); 1994 assert(BC != 0 && "Failed to emit a bitcast!"); 1995 Arg = BC; 1996 ArgVT = VA.getLocVT(); 1997 break; 1998 } 1999 default: llvm_unreachable("Unknown arg promotion!"); 2000 } 2001 2002 // Now copy/store arg to correct locations. 2003 if (VA.isRegLoc() && !VA.needsCustom()) { 2004 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 2005 VA.getLocReg()) 2006 .addReg(Arg); 2007 RegArgs.push_back(VA.getLocReg()); 2008 } else if (VA.needsCustom()) { 2009 // TODO: We need custom lowering for vector (v2f64) args. 2010 assert(VA.getLocVT() == MVT::f64 && 2011 "Custom lowering for v2f64 args not available"); 2012 2013 CCValAssign &NextVA = ArgLocs[++i]; 2014 2015 assert(VA.isRegLoc() && NextVA.isRegLoc() && 2016 "We only handle register args!"); 2017 2018 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2019 TII.get(ARM::VMOVRRD), VA.getLocReg()) 2020 .addReg(NextVA.getLocReg(), RegState::Define) 2021 .addReg(Arg)); 2022 RegArgs.push_back(VA.getLocReg()); 2023 RegArgs.push_back(NextVA.getLocReg()); 2024 } else { 2025 assert(VA.isMemLoc()); 2026 // Need to store on the stack. 2027 Address Addr; 2028 Addr.BaseType = Address::RegBase; 2029 Addr.Base.Reg = ARM::SP; 2030 Addr.Offset = VA.getLocMemOffset(); 2031 2032 bool EmitRet = ARMEmitStore(ArgVT, Arg, Addr); (void)EmitRet; 2033 assert(EmitRet && "Could not emit a store for argument!"); 2034 } 2035 } 2036 2037 return true; 2038} 2039 2040bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 2041 const Instruction *I, CallingConv::ID CC, 2042 unsigned &NumBytes, bool isVarArg) { 2043 // Issue CALLSEQ_END 2044 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); 2045 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2046 TII.get(AdjStackUp)) 2047 .addImm(NumBytes).addImm(0)); 2048 2049 // Now the return value. 2050 if (RetVT != MVT::isVoid) { 2051 SmallVector<CCValAssign, 16> RVLocs; 2052 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, RVLocs, *Context); 2053 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, isVarArg)); 2054 2055 // Copy all of the result registers out of their specified physreg. 2056 if (RVLocs.size() == 2 && RetVT == MVT::f64) { 2057 // For this move we copy into two registers and then move into the 2058 // double fp reg we want. 2059 MVT DestVT = RVLocs[0].getValVT(); 2060 const TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT); 2061 unsigned ResultReg = createResultReg(DstRC); 2062 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2063 TII.get(ARM::VMOVDRR), ResultReg) 2064 .addReg(RVLocs[0].getLocReg()) 2065 .addReg(RVLocs[1].getLocReg())); 2066 2067 UsedRegs.push_back(RVLocs[0].getLocReg()); 2068 UsedRegs.push_back(RVLocs[1].getLocReg()); 2069 2070 // Finally update the result. 2071 UpdateValueMap(I, ResultReg); 2072 } else { 2073 assert(RVLocs.size() == 1 &&"Can't handle non-double multi-reg retvals!"); 2074 MVT CopyVT = RVLocs[0].getValVT(); 2075 2076 // Special handling for extended integers. 2077 if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16) 2078 CopyVT = MVT::i32; 2079 2080 const TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT); 2081 2082 unsigned ResultReg = createResultReg(DstRC); 2083 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 2084 ResultReg).addReg(RVLocs[0].getLocReg()); 2085 UsedRegs.push_back(RVLocs[0].getLocReg()); 2086 2087 // Finally update the result. 2088 UpdateValueMap(I, ResultReg); 2089 } 2090 } 2091 2092 return true; 2093} 2094 2095bool ARMFastISel::SelectRet(const Instruction *I) { 2096 const ReturnInst *Ret = cast<ReturnInst>(I); 2097 const Function &F = *I->getParent()->getParent(); 2098 2099 if (!FuncInfo.CanLowerReturn) 2100 return false; 2101 2102 CallingConv::ID CC = F.getCallingConv(); 2103 if (Ret->getNumOperands() > 0) { 2104 SmallVector<ISD::OutputArg, 4> Outs; 2105 GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI); 2106 2107 // Analyze operands of the call, assigning locations to each operand. 2108 SmallVector<CCValAssign, 16> ValLocs; 2109 CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, TM, ValLocs,I->getContext()); 2110 CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC, true /* is Ret */, 2111 F.isVarArg())); 2112 2113 const Value *RV = Ret->getOperand(0); 2114 unsigned Reg = getRegForValue(RV); 2115 if (Reg == 0) 2116 return false; 2117 2118 // Only handle a single return value for now. 2119 if (ValLocs.size() != 1) 2120 return false; 2121 2122 CCValAssign &VA = ValLocs[0]; 2123 2124 // Don't bother handling odd stuff for now. 2125 if (VA.getLocInfo() != CCValAssign::Full) 2126 return false; 2127 // Only handle register returns for now. 2128 if (!VA.isRegLoc()) 2129 return false; 2130 2131 unsigned SrcReg = Reg + VA.getValNo(); 2132 EVT RVEVT = TLI.getValueType(RV->getType()); 2133 if (!RVEVT.isSimple()) return false; 2134 MVT RVVT = RVEVT.getSimpleVT(); 2135 MVT DestVT = VA.getValVT(); 2136 // Special handling for extended integers. 2137 if (RVVT != DestVT) { 2138 if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16) 2139 return false; 2140 2141 assert(DestVT == MVT::i32 && "ARM should always ext to i32"); 2142 2143 // Perform extension if flagged as either zext or sext. Otherwise, do 2144 // nothing. 2145 if (Outs[0].Flags.isZExt() || Outs[0].Flags.isSExt()) { 2146 SrcReg = ARMEmitIntExt(RVVT, SrcReg, DestVT, Outs[0].Flags.isZExt()); 2147 if (SrcReg == 0) return false; 2148 } 2149 } 2150 2151 // Make the copy. 2152 unsigned DstReg = VA.getLocReg(); 2153 const TargetRegisterClass* SrcRC = MRI.getRegClass(SrcReg); 2154 // Avoid a cross-class copy. This is very unlikely. 2155 if (!SrcRC->contains(DstReg)) 2156 return false; 2157 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 2158 DstReg).addReg(SrcReg); 2159 2160 // Mark the register as live out of the function. 2161 MRI.addLiveOut(VA.getLocReg()); 2162 } 2163 2164 unsigned RetOpc = isThumb2 ? ARM::tBX_RET : ARM::BX_RET; 2165 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2166 TII.get(RetOpc))); 2167 return true; 2168} 2169 2170unsigned ARMFastISel::ARMSelectCallOp(bool UseReg) { 2171 if (UseReg) 2172 return isThumb2 ? ARM::tBLXr : ARM::BLX; 2173 else 2174 return isThumb2 ? ARM::tBL : ARM::BL; 2175} 2176 2177unsigned ARMFastISel::getLibcallReg(const Twine &Name) { 2178 GlobalValue *GV = new GlobalVariable(Type::getInt32Ty(*Context), false, 2179 GlobalValue::ExternalLinkage, 0, Name); 2180 EVT LCREVT = TLI.getValueType(GV->getType()); 2181 if (!LCREVT.isSimple()) return 0; 2182 return ARMMaterializeGV(GV, LCREVT.getSimpleVT()); 2183} 2184 2185// A quick function that will emit a call for a named libcall in F with the 2186// vector of passed arguments for the Instruction in I. We can assume that we 2187// can emit a call for any libcall we can produce. This is an abridged version 2188// of the full call infrastructure since we won't need to worry about things 2189// like computed function pointers or strange arguments at call sites. 2190// TODO: Try to unify this and the normal call bits for ARM, then try to unify 2191// with X86. 2192bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) { 2193 CallingConv::ID CC = TLI.getLibcallCallingConv(Call); 2194 2195 // Handle *simple* calls for now. 2196 Type *RetTy = I->getType(); 2197 MVT RetVT; 2198 if (RetTy->isVoidTy()) 2199 RetVT = MVT::isVoid; 2200 else if (!isTypeLegal(RetTy, RetVT)) 2201 return false; 2202 2203 // Can't handle non-double multi-reg retvals. 2204 if (RetVT != MVT::isVoid && RetVT != MVT::i32) { 2205 SmallVector<CCValAssign, 16> RVLocs; 2206 CCState CCInfo(CC, false, *FuncInfo.MF, TM, RVLocs, *Context); 2207 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, false)); 2208 if (RVLocs.size() >= 2 && RetVT != MVT::f64) 2209 return false; 2210 } 2211 2212 // Set up the argument vectors. 2213 SmallVector<Value*, 8> Args; 2214 SmallVector<unsigned, 8> ArgRegs; 2215 SmallVector<MVT, 8> ArgVTs; 2216 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 2217 Args.reserve(I->getNumOperands()); 2218 ArgRegs.reserve(I->getNumOperands()); 2219 ArgVTs.reserve(I->getNumOperands()); 2220 ArgFlags.reserve(I->getNumOperands()); 2221 for (unsigned i = 0; i < I->getNumOperands(); ++i) { 2222 Value *Op = I->getOperand(i); 2223 unsigned Arg = getRegForValue(Op); 2224 if (Arg == 0) return false; 2225 2226 Type *ArgTy = Op->getType(); 2227 MVT ArgVT; 2228 if (!isTypeLegal(ArgTy, ArgVT)) return false; 2229 2230 ISD::ArgFlagsTy Flags; 2231 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 2232 Flags.setOrigAlign(OriginalAlignment); 2233 2234 Args.push_back(Op); 2235 ArgRegs.push_back(Arg); 2236 ArgVTs.push_back(ArgVT); 2237 ArgFlags.push_back(Flags); 2238 } 2239 2240 // Handle the arguments now that we've gotten them. 2241 SmallVector<unsigned, 4> RegArgs; 2242 unsigned NumBytes; 2243 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, 2244 RegArgs, CC, NumBytes, false)) 2245 return false; 2246 2247 unsigned CalleeReg = 0; 2248 if (EnableARMLongCalls) { 2249 CalleeReg = getLibcallReg(TLI.getLibcallName(Call)); 2250 if (CalleeReg == 0) return false; 2251 } 2252 2253 // Issue the call. 2254 unsigned CallOpc = ARMSelectCallOp(EnableARMLongCalls); 2255 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 2256 DL, TII.get(CallOpc)); 2257 // BL / BLX don't take a predicate, but tBL / tBLX do. 2258 if (isThumb2) 2259 AddDefaultPred(MIB); 2260 if (EnableARMLongCalls) 2261 MIB.addReg(CalleeReg); 2262 else 2263 MIB.addExternalSymbol(TLI.getLibcallName(Call)); 2264 2265 // Add implicit physical register uses to the call. 2266 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 2267 MIB.addReg(RegArgs[i], RegState::Implicit); 2268 2269 // Add a register mask with the call-preserved registers. 2270 // Proper defs for return values will be added by setPhysRegsDeadExcept(). 2271 MIB.addRegMask(TRI.getCallPreservedMask(CC)); 2272 2273 // Finish off the call including any return values. 2274 SmallVector<unsigned, 4> UsedRegs; 2275 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes, false)) return false; 2276 2277 // Set all unused physreg defs as dead. 2278 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 2279 2280 return true; 2281} 2282 2283bool ARMFastISel::SelectCall(const Instruction *I, 2284 const char *IntrMemName = 0) { 2285 const CallInst *CI = cast<CallInst>(I); 2286 const Value *Callee = CI->getCalledValue(); 2287 2288 // Can't handle inline asm. 2289 if (isa<InlineAsm>(Callee)) return false; 2290 2291 // Allow SelectionDAG isel to handle tail calls. 2292 if (CI->isTailCall()) return false; 2293 2294 // Check the calling convention. 2295 ImmutableCallSite CS(CI); 2296 CallingConv::ID CC = CS.getCallingConv(); 2297 2298 // TODO: Avoid some calling conventions? 2299 2300 PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType()); 2301 FunctionType *FTy = cast<FunctionType>(PT->getElementType()); 2302 bool isVarArg = FTy->isVarArg(); 2303 2304 // Handle *simple* calls for now. 2305 Type *RetTy = I->getType(); 2306 MVT RetVT; 2307 if (RetTy->isVoidTy()) 2308 RetVT = MVT::isVoid; 2309 else if (!isTypeLegal(RetTy, RetVT) && RetVT != MVT::i16 && 2310 RetVT != MVT::i8 && RetVT != MVT::i1) 2311 return false; 2312 2313 // Can't handle non-double multi-reg retvals. 2314 if (RetVT != MVT::isVoid && RetVT != MVT::i1 && RetVT != MVT::i8 && 2315 RetVT != MVT::i16 && RetVT != MVT::i32) { 2316 SmallVector<CCValAssign, 16> RVLocs; 2317 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, RVLocs, *Context); 2318 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, isVarArg)); 2319 if (RVLocs.size() >= 2 && RetVT != MVT::f64) 2320 return false; 2321 } 2322 2323 // Set up the argument vectors. 2324 SmallVector<Value*, 8> Args; 2325 SmallVector<unsigned, 8> ArgRegs; 2326 SmallVector<MVT, 8> ArgVTs; 2327 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 2328 unsigned arg_size = CS.arg_size(); 2329 Args.reserve(arg_size); 2330 ArgRegs.reserve(arg_size); 2331 ArgVTs.reserve(arg_size); 2332 ArgFlags.reserve(arg_size); 2333 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end(); 2334 i != e; ++i) { 2335 // If we're lowering a memory intrinsic instead of a regular call, skip the 2336 // last two arguments, which shouldn't be passed to the underlying function. 2337 if (IntrMemName && e-i <= 2) 2338 break; 2339 2340 ISD::ArgFlagsTy Flags; 2341 unsigned AttrInd = i - CS.arg_begin() + 1; 2342 if (CS.paramHasAttr(AttrInd, Attribute::SExt)) 2343 Flags.setSExt(); 2344 if (CS.paramHasAttr(AttrInd, Attribute::ZExt)) 2345 Flags.setZExt(); 2346 2347 // FIXME: Only handle *easy* calls for now. 2348 if (CS.paramHasAttr(AttrInd, Attribute::InReg) || 2349 CS.paramHasAttr(AttrInd, Attribute::StructRet) || 2350 CS.paramHasAttr(AttrInd, Attribute::Nest) || 2351 CS.paramHasAttr(AttrInd, Attribute::ByVal)) 2352 return false; 2353 2354 Type *ArgTy = (*i)->getType(); 2355 MVT ArgVT; 2356 if (!isTypeLegal(ArgTy, ArgVT) && ArgVT != MVT::i16 && ArgVT != MVT::i8 && 2357 ArgVT != MVT::i1) 2358 return false; 2359 2360 unsigned Arg = getRegForValue(*i); 2361 if (Arg == 0) 2362 return false; 2363 2364 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 2365 Flags.setOrigAlign(OriginalAlignment); 2366 2367 Args.push_back(*i); 2368 ArgRegs.push_back(Arg); 2369 ArgVTs.push_back(ArgVT); 2370 ArgFlags.push_back(Flags); 2371 } 2372 2373 // Handle the arguments now that we've gotten them. 2374 SmallVector<unsigned, 4> RegArgs; 2375 unsigned NumBytes; 2376 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, 2377 RegArgs, CC, NumBytes, isVarArg)) 2378 return false; 2379 2380 bool UseReg = false; 2381 const GlobalValue *GV = dyn_cast<GlobalValue>(Callee); 2382 if (!GV || EnableARMLongCalls) UseReg = true; 2383 2384 unsigned CalleeReg = 0; 2385 if (UseReg) { 2386 if (IntrMemName) 2387 CalleeReg = getLibcallReg(IntrMemName); 2388 else 2389 CalleeReg = getRegForValue(Callee); 2390 2391 if (CalleeReg == 0) return false; 2392 } 2393 2394 // Issue the call. 2395 unsigned CallOpc = ARMSelectCallOp(UseReg); 2396 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 2397 DL, TII.get(CallOpc)); 2398 2399 // ARM calls don't take a predicate, but tBL / tBLX do. 2400 if(isThumb2) 2401 AddDefaultPred(MIB); 2402 if (UseReg) 2403 MIB.addReg(CalleeReg); 2404 else if (!IntrMemName) 2405 MIB.addGlobalAddress(GV, 0, 0); 2406 else 2407 MIB.addExternalSymbol(IntrMemName, 0); 2408 2409 // Add implicit physical register uses to the call. 2410 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 2411 MIB.addReg(RegArgs[i], RegState::Implicit); 2412 2413 // Add a register mask with the call-preserved registers. 2414 // Proper defs for return values will be added by setPhysRegsDeadExcept(). 2415 MIB.addRegMask(TRI.getCallPreservedMask(CC)); 2416 2417 // Finish off the call including any return values. 2418 SmallVector<unsigned, 4> UsedRegs; 2419 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes, isVarArg)) 2420 return false; 2421 2422 // Set all unused physreg defs as dead. 2423 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 2424 2425 return true; 2426} 2427 2428bool ARMFastISel::ARMIsMemCpySmall(uint64_t Len) { 2429 return Len <= 16; 2430} 2431 2432bool ARMFastISel::ARMTryEmitSmallMemCpy(Address Dest, Address Src, 2433 uint64_t Len, unsigned Alignment) { 2434 // Make sure we don't bloat code by inlining very large memcpy's. 2435 if (!ARMIsMemCpySmall(Len)) 2436 return false; 2437 2438 while (Len) { 2439 MVT VT; 2440 if (!Alignment || Alignment >= 4) { 2441 if (Len >= 4) 2442 VT = MVT::i32; 2443 else if (Len >= 2) 2444 VT = MVT::i16; 2445 else { 2446 assert (Len == 1 && "Expected a length of 1!"); 2447 VT = MVT::i8; 2448 } 2449 } else { 2450 // Bound based on alignment. 2451 if (Len >= 2 && Alignment == 2) 2452 VT = MVT::i16; 2453 else { 2454 assert (Alignment == 1 && "Expected an alignment of 1!"); 2455 VT = MVT::i8; 2456 } 2457 } 2458 2459 bool RV; 2460 unsigned ResultReg; 2461 RV = ARMEmitLoad(VT, ResultReg, Src); 2462 assert (RV == true && "Should be able to handle this load."); 2463 RV = ARMEmitStore(VT, ResultReg, Dest); 2464 assert (RV == true && "Should be able to handle this store."); 2465 (void)RV; 2466 2467 unsigned Size = VT.getSizeInBits()/8; 2468 Len -= Size; 2469 Dest.Offset += Size; 2470 Src.Offset += Size; 2471 } 2472 2473 return true; 2474} 2475 2476bool ARMFastISel::SelectIntrinsicCall(const IntrinsicInst &I) { 2477 // FIXME: Handle more intrinsics. 2478 switch (I.getIntrinsicID()) { 2479 default: return false; 2480 case Intrinsic::frameaddress: { 2481 MachineFrameInfo *MFI = FuncInfo.MF->getFrameInfo(); 2482 MFI->setFrameAddressIsTaken(true); 2483 2484 unsigned LdrOpc; 2485 const TargetRegisterClass *RC; 2486 if (isThumb2) { 2487 LdrOpc = ARM::t2LDRi12; 2488 RC = (const TargetRegisterClass*)&ARM::tGPRRegClass; 2489 } else { 2490 LdrOpc = ARM::LDRi12; 2491 RC = (const TargetRegisterClass*)&ARM::GPRRegClass; 2492 } 2493 2494 const ARMBaseRegisterInfo *RegInfo = 2495 static_cast<const ARMBaseRegisterInfo*>(TM.getRegisterInfo()); 2496 unsigned FramePtr = RegInfo->getFrameRegister(*(FuncInfo.MF)); 2497 unsigned SrcReg = FramePtr; 2498 2499 // Recursively load frame address 2500 // ldr r0 [fp] 2501 // ldr r0 [r0] 2502 // ldr r0 [r0] 2503 // ... 2504 unsigned DestReg; 2505 unsigned Depth = cast<ConstantInt>(I.getOperand(0))->getZExtValue(); 2506 while (Depth--) { 2507 DestReg = createResultReg(RC); 2508 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2509 TII.get(LdrOpc), DestReg) 2510 .addReg(SrcReg).addImm(0)); 2511 SrcReg = DestReg; 2512 } 2513 UpdateValueMap(&I, SrcReg); 2514 return true; 2515 } 2516 case Intrinsic::memcpy: 2517 case Intrinsic::memmove: { 2518 const MemTransferInst &MTI = cast<MemTransferInst>(I); 2519 // Don't handle volatile. 2520 if (MTI.isVolatile()) 2521 return false; 2522 2523 // Disable inlining for memmove before calls to ComputeAddress. Otherwise, 2524 // we would emit dead code because we don't currently handle memmoves. 2525 bool isMemCpy = (I.getIntrinsicID() == Intrinsic::memcpy); 2526 if (isa<ConstantInt>(MTI.getLength()) && isMemCpy) { 2527 // Small memcpy's are common enough that we want to do them without a call 2528 // if possible. 2529 uint64_t Len = cast<ConstantInt>(MTI.getLength())->getZExtValue(); 2530 if (ARMIsMemCpySmall(Len)) { 2531 Address Dest, Src; 2532 if (!ARMComputeAddress(MTI.getRawDest(), Dest) || 2533 !ARMComputeAddress(MTI.getRawSource(), Src)) 2534 return false; 2535 unsigned Alignment = MTI.getAlignment(); 2536 if (ARMTryEmitSmallMemCpy(Dest, Src, Len, Alignment)) 2537 return true; 2538 } 2539 } 2540 2541 if (!MTI.getLength()->getType()->isIntegerTy(32)) 2542 return false; 2543 2544 if (MTI.getSourceAddressSpace() > 255 || MTI.getDestAddressSpace() > 255) 2545 return false; 2546 2547 const char *IntrMemName = isa<MemCpyInst>(I) ? "memcpy" : "memmove"; 2548 return SelectCall(&I, IntrMemName); 2549 } 2550 case Intrinsic::memset: { 2551 const MemSetInst &MSI = cast<MemSetInst>(I); 2552 // Don't handle volatile. 2553 if (MSI.isVolatile()) 2554 return false; 2555 2556 if (!MSI.getLength()->getType()->isIntegerTy(32)) 2557 return false; 2558 2559 if (MSI.getDestAddressSpace() > 255) 2560 return false; 2561 2562 return SelectCall(&I, "memset"); 2563 } 2564 case Intrinsic::trap: { 2565 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::TRAP)); 2566 return true; 2567 } 2568 } 2569} 2570 2571bool ARMFastISel::SelectTrunc(const Instruction *I) { 2572 // The high bits for a type smaller than the register size are assumed to be 2573 // undefined. 2574 Value *Op = I->getOperand(0); 2575 2576 EVT SrcVT, DestVT; 2577 SrcVT = TLI.getValueType(Op->getType(), true); 2578 DestVT = TLI.getValueType(I->getType(), true); 2579 2580 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) 2581 return false; 2582 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1) 2583 return false; 2584 2585 unsigned SrcReg = getRegForValue(Op); 2586 if (!SrcReg) return false; 2587 2588 // Because the high bits are undefined, a truncate doesn't generate 2589 // any code. 2590 UpdateValueMap(I, SrcReg); 2591 return true; 2592} 2593 2594unsigned ARMFastISel::ARMEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, 2595 bool isZExt) { 2596 if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8) 2597 return 0; 2598 2599 unsigned Opc; 2600 bool isBoolZext = false; 2601 const TargetRegisterClass *RC = TLI.getRegClassFor(MVT::i32); 2602 switch (SrcVT.SimpleTy) { 2603 default: return 0; 2604 case MVT::i16: 2605 if (!Subtarget->hasV6Ops()) return 0; 2606 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass; 2607 if (isZExt) 2608 Opc = isThumb2 ? ARM::t2UXTH : ARM::UXTH; 2609 else 2610 Opc = isThumb2 ? ARM::t2SXTH : ARM::SXTH; 2611 break; 2612 case MVT::i8: 2613 if (!Subtarget->hasV6Ops()) return 0; 2614 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass; 2615 if (isZExt) 2616 Opc = isThumb2 ? ARM::t2UXTB : ARM::UXTB; 2617 else 2618 Opc = isThumb2 ? ARM::t2SXTB : ARM::SXTB; 2619 break; 2620 case MVT::i1: 2621 if (isZExt) { 2622 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass; 2623 Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri; 2624 isBoolZext = true; 2625 break; 2626 } 2627 return 0; 2628 } 2629 2630 unsigned ResultReg = createResultReg(RC); 2631 MachineInstrBuilder MIB; 2632 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg) 2633 .addReg(SrcReg); 2634 if (isBoolZext) 2635 MIB.addImm(1); 2636 else 2637 MIB.addImm(0); 2638 AddOptionalDefs(MIB); 2639 return ResultReg; 2640} 2641 2642bool ARMFastISel::SelectIntExt(const Instruction *I) { 2643 // On ARM, in general, integer casts don't involve legal types; this code 2644 // handles promotable integers. 2645 Type *DestTy = I->getType(); 2646 Value *Src = I->getOperand(0); 2647 Type *SrcTy = Src->getType(); 2648 2649 bool isZExt = isa<ZExtInst>(I); 2650 unsigned SrcReg = getRegForValue(Src); 2651 if (!SrcReg) return false; 2652 2653 EVT SrcEVT, DestEVT; 2654 SrcEVT = TLI.getValueType(SrcTy, true); 2655 DestEVT = TLI.getValueType(DestTy, true); 2656 if (!SrcEVT.isSimple()) return false; 2657 if (!DestEVT.isSimple()) return false; 2658 2659 MVT SrcVT = SrcEVT.getSimpleVT(); 2660 MVT DestVT = DestEVT.getSimpleVT(); 2661 unsigned ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, isZExt); 2662 if (ResultReg == 0) return false; 2663 UpdateValueMap(I, ResultReg); 2664 return true; 2665} 2666 2667bool ARMFastISel::SelectShift(const Instruction *I, 2668 ARM_AM::ShiftOpc ShiftTy) { 2669 // We handle thumb2 mode by target independent selector 2670 // or SelectionDAG ISel. 2671 if (isThumb2) 2672 return false; 2673 2674 // Only handle i32 now. 2675 EVT DestVT = TLI.getValueType(I->getType(), true); 2676 if (DestVT != MVT::i32) 2677 return false; 2678 2679 unsigned Opc = ARM::MOVsr; 2680 unsigned ShiftImm; 2681 Value *Src2Value = I->getOperand(1); 2682 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Src2Value)) { 2683 ShiftImm = CI->getZExtValue(); 2684 2685 // Fall back to selection DAG isel if the shift amount 2686 // is zero or greater than the width of the value type. 2687 if (ShiftImm == 0 || ShiftImm >=32) 2688 return false; 2689 2690 Opc = ARM::MOVsi; 2691 } 2692 2693 Value *Src1Value = I->getOperand(0); 2694 unsigned Reg1 = getRegForValue(Src1Value); 2695 if (Reg1 == 0) return false; 2696 2697 unsigned Reg2 = 0; 2698 if (Opc == ARM::MOVsr) { 2699 Reg2 = getRegForValue(Src2Value); 2700 if (Reg2 == 0) return false; 2701 } 2702 2703 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 2704 if(ResultReg == 0) return false; 2705 2706 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2707 TII.get(Opc), ResultReg) 2708 .addReg(Reg1); 2709 2710 if (Opc == ARM::MOVsi) 2711 MIB.addImm(ARM_AM::getSORegOpc(ShiftTy, ShiftImm)); 2712 else if (Opc == ARM::MOVsr) { 2713 MIB.addReg(Reg2); 2714 MIB.addImm(ARM_AM::getSORegOpc(ShiftTy, 0)); 2715 } 2716 2717 AddOptionalDefs(MIB); 2718 UpdateValueMap(I, ResultReg); 2719 return true; 2720} 2721 2722// TODO: SoftFP support. 2723bool ARMFastISel::TargetSelectInstruction(const Instruction *I) { 2724 2725 switch (I->getOpcode()) { 2726 case Instruction::Load: 2727 return SelectLoad(I); 2728 case Instruction::Store: 2729 return SelectStore(I); 2730 case Instruction::Br: 2731 return SelectBranch(I); 2732 case Instruction::IndirectBr: 2733 return SelectIndirectBr(I); 2734 case Instruction::ICmp: 2735 case Instruction::FCmp: 2736 return SelectCmp(I); 2737 case Instruction::FPExt: 2738 return SelectFPExt(I); 2739 case Instruction::FPTrunc: 2740 return SelectFPTrunc(I); 2741 case Instruction::SIToFP: 2742 return SelectIToFP(I, /*isSigned*/ true); 2743 case Instruction::UIToFP: 2744 return SelectIToFP(I, /*isSigned*/ false); 2745 case Instruction::FPToSI: 2746 return SelectFPToI(I, /*isSigned*/ true); 2747 case Instruction::FPToUI: 2748 return SelectFPToI(I, /*isSigned*/ false); 2749 case Instruction::Add: 2750 return SelectBinaryIntOp(I, ISD::ADD); 2751 case Instruction::Or: 2752 return SelectBinaryIntOp(I, ISD::OR); 2753 case Instruction::Sub: 2754 return SelectBinaryIntOp(I, ISD::SUB); 2755 case Instruction::FAdd: 2756 return SelectBinaryFPOp(I, ISD::FADD); 2757 case Instruction::FSub: 2758 return SelectBinaryFPOp(I, ISD::FSUB); 2759 case Instruction::FMul: 2760 return SelectBinaryFPOp(I, ISD::FMUL); 2761 case Instruction::SDiv: 2762 return SelectDiv(I, /*isSigned*/ true); 2763 case Instruction::UDiv: 2764 return SelectDiv(I, /*isSigned*/ false); 2765 case Instruction::SRem: 2766 return SelectRem(I, /*isSigned*/ true); 2767 case Instruction::URem: 2768 return SelectRem(I, /*isSigned*/ false); 2769 case Instruction::Call: 2770 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) 2771 return SelectIntrinsicCall(*II); 2772 return SelectCall(I); 2773 case Instruction::Select: 2774 return SelectSelect(I); 2775 case Instruction::Ret: 2776 return SelectRet(I); 2777 case Instruction::Trunc: 2778 return SelectTrunc(I); 2779 case Instruction::ZExt: 2780 case Instruction::SExt: 2781 return SelectIntExt(I); 2782 case Instruction::Shl: 2783 return SelectShift(I, ARM_AM::lsl); 2784 case Instruction::LShr: 2785 return SelectShift(I, ARM_AM::lsr); 2786 case Instruction::AShr: 2787 return SelectShift(I, ARM_AM::asr); 2788 default: break; 2789 } 2790 return false; 2791} 2792 2793/// TryToFoldLoad - The specified machine instr operand is a vreg, and that 2794/// vreg is being provided by the specified load instruction. If possible, 2795/// try to fold the load as an operand to the instruction, returning true if 2796/// successful. 2797bool ARMFastISel::TryToFoldLoad(MachineInstr *MI, unsigned OpNo, 2798 const LoadInst *LI) { 2799 // Verify we have a legal type before going any further. 2800 MVT VT; 2801 if (!isLoadTypeLegal(LI->getType(), VT)) 2802 return false; 2803 2804 // Combine load followed by zero- or sign-extend. 2805 // ldrb r1, [r0] ldrb r1, [r0] 2806 // uxtb r2, r1 => 2807 // mov r3, r2 mov r3, r1 2808 bool isZExt = true; 2809 switch(MI->getOpcode()) { 2810 default: return false; 2811 case ARM::SXTH: 2812 case ARM::t2SXTH: 2813 isZExt = false; 2814 case ARM::UXTH: 2815 case ARM::t2UXTH: 2816 if (VT != MVT::i16) 2817 return false; 2818 break; 2819 case ARM::SXTB: 2820 case ARM::t2SXTB: 2821 isZExt = false; 2822 case ARM::UXTB: 2823 case ARM::t2UXTB: 2824 if (VT != MVT::i8) 2825 return false; 2826 break; 2827 } 2828 // See if we can handle this address. 2829 Address Addr; 2830 if (!ARMComputeAddress(LI->getOperand(0), Addr)) return false; 2831 2832 unsigned ResultReg = MI->getOperand(0).getReg(); 2833 if (!ARMEmitLoad(VT, ResultReg, Addr, LI->getAlignment(), isZExt, false)) 2834 return false; 2835 MI->eraseFromParent(); 2836 return true; 2837} 2838 2839unsigned ARMFastISel::ARMLowerPICELF(const GlobalValue *GV, 2840 unsigned Align, MVT VT) { 2841 bool UseGOTOFF = GV->hasLocalLinkage() || GV->hasHiddenVisibility(); 2842 ARMConstantPoolConstant *CPV = 2843 ARMConstantPoolConstant::Create(GV, UseGOTOFF ? ARMCP::GOTOFF : ARMCP::GOT); 2844 unsigned Idx = MCP.getConstantPoolIndex(CPV, Align); 2845 2846 unsigned Opc; 2847 unsigned DestReg1 = createResultReg(TLI.getRegClassFor(VT)); 2848 // Load value. 2849 if (isThumb2) { 2850 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2851 TII.get(ARM::t2LDRpci), DestReg1) 2852 .addConstantPoolIndex(Idx)); 2853 Opc = UseGOTOFF ? ARM::t2ADDrr : ARM::t2LDRs; 2854 } else { 2855 // The extra immediate is for addrmode2. 2856 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 2857 DL, TII.get(ARM::LDRcp), DestReg1) 2858 .addConstantPoolIndex(Idx).addImm(0)); 2859 Opc = UseGOTOFF ? ARM::ADDrr : ARM::LDRrs; 2860 } 2861 2862 unsigned GlobalBaseReg = AFI->getGlobalBaseReg(); 2863 if (GlobalBaseReg == 0) { 2864 GlobalBaseReg = MRI.createVirtualRegister(TLI.getRegClassFor(VT)); 2865 AFI->setGlobalBaseReg(GlobalBaseReg); 2866 } 2867 2868 unsigned DestReg2 = createResultReg(TLI.getRegClassFor(VT)); 2869 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 2870 DL, TII.get(Opc), DestReg2) 2871 .addReg(DestReg1) 2872 .addReg(GlobalBaseReg); 2873 if (!UseGOTOFF) 2874 MIB.addImm(0); 2875 AddOptionalDefs(MIB); 2876 2877 return DestReg2; 2878} 2879 2880namespace llvm { 2881 FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo, 2882 const TargetLibraryInfo *libInfo) { 2883 // Completely untested on non-iOS. 2884 const TargetMachine &TM = funcInfo.MF->getTarget(); 2885 2886 // Darwin and thumb1 only for now. 2887 const ARMSubtarget *Subtarget = &TM.getSubtarget<ARMSubtarget>(); 2888 if (Subtarget->isTargetIOS() && !Subtarget->isThumb1Only()) 2889 return new ARMFastISel(funcInfo, libInfo); 2890 return 0; 2891 } 2892} 2893